From eb56ce2e2ab2007616905c64d93aa456b8149065 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 22 Feb 2024 11:33:05 -0500 Subject: [PATCH] docs: fix low-level api example --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b208d6f..505f4d2 100644 --- a/README.md +++ b/README.md @@ -561,7 +561,7 @@ Below is a short example demonstrating how to use the low-level API to tokenize ```python >>> import llama_cpp >>> import ctypes ->>> llama_cpp.llama_backend_init(numa=False) # Must be called once at the start of each program +>>> llama_cpp.llama_backend_init(False) # Must be called once at the start of each program >>> params = llama_cpp.llama_context_default_params() # use bytes for char * params >>> model = llama_cpp.llama_load_model_from_file(b"./models/7b/llama-model.gguf", params) @@ -569,7 +569,7 @@ Below is a short example demonstrating how to use the low-level API to tokenize >>> max_tokens = params.n_ctx # use ctypes arrays for array params >>> tokens = (llama_cpp.llama_token * int(max_tokens))() ->>> n_tokens = llama_cpp.llama_tokenize(ctx, b"Q: Name the planets in the solar system? A: ", tokens, max_tokens, add_bos=llama_cpp.c_bool(True)) +>>> n_tokens = llama_cpp.llama_tokenize(ctx, b"Q: Name the planets in the solar system? A: ", tokens, max_tokens, llama_cpp.c_bool(True)) >>> llama_cpp.llama_free(ctx) ```