From 282698b6d383e216e129856f25b0ca41348ad525 Mon Sep 17 00:00:00 2001 From: Alexey Date: Fri, 23 Jun 2023 00:19:24 +0400 Subject: [PATCH] server: pass seed param from command line to llama --- llama_cpp/server/app.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index 313e27d..ef319c7 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -30,6 +30,9 @@ class Settings(BaseSettings): ge=0, description="The number of layers to put on the GPU. The rest will be on the CPU.", ) + seed: int = Field( + default=1337, description="Random seed. -1 for random." + ) n_batch: int = Field( default=512, ge=1, description="The batch size to use per eval." ) @@ -109,6 +112,7 @@ def create_app(settings: Optional[Settings] = None): llama = llama_cpp.Llama( model_path=settings.model, n_gpu_layers=settings.n_gpu_layers, + seed=settings.seed, f16_kv=settings.f16_kv, use_mlock=settings.use_mlock, use_mmap=settings.use_mmap,