From d410f12fae32bf77a8eedc05e7bef263dc6b7cfd Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Sat, 17 Jun 2023 13:38:48 -0400 Subject: [PATCH] Update docs. Closes #386 --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 366f050..a0b2030 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -228,7 +228,7 @@ class Llama: model_path: Path to the model. n_ctx: Maximum context size. n_parts: Number of parts to split the model into. If -1, the number of parts is automatically determined. - seed: Random seed. 0 for random. + seed: Random seed. -1 for random. f16_kv: Use half-precision for key/value cache. logits_all: Return logits for all tokens, not just the last token. vocab_only: Only load the vocabulary no weights.