From c471871d0bfe0ac8d9bf69f6cee6cff768776ad2 Mon Sep 17 00:00:00 2001 From: Billy Cao Date: Sun, 13 Aug 2023 11:21:28 +0800 Subject: [PATCH] make n_gpu_layers=-1 offload all layers --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 20a5e0c..8115d46 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -268,7 +268,7 @@ class Llama: self.params = llama_cpp.llama_context_default_params() self.params.n_ctx = n_ctx - self.params.n_gpu_layers = n_gpu_layers + self.params.n_gpu_layers = 0x7FFFFFFF if n_gpu_layers == -1 else n_gpu_layers # 0x7FFFFFFF is INT32 max, will be auto set to all layers self.params.seed = seed self.params.f16_kv = f16_kv self.params.logits_all = logits_all