From 17dd7fa8e02b48c30c5a80f0225a9b93a6f5d4b8 Mon Sep 17 00:00:00 2001 From: Hannes Krumbiegel Date: Fri, 11 Aug 2023 09:58:48 +0200 Subject: [PATCH 1/3] Add py.typed --- llama_cpp/py.typed | 0 setup.py | 1 + 2 files changed, 1 insertion(+) create mode 100644 llama_cpp/py.typed diff --git a/llama_cpp/py.typed b/llama_cpp/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/setup.py b/setup.py index 8e6139d..74040d5 100644 --- a/setup.py +++ b/setup.py @@ -15,6 +15,7 @@ setup( author_email="abetlen@gmail.com", license="MIT", package_dir={"llama_cpp": "llama_cpp", "llama_cpp.server": "llama_cpp/server"}, + package_data={"llama_cpp": ["py.typed"]}, packages=["llama_cpp", "llama_cpp.server"], install_requires=["typing-extensions>=4.5.0", "numpy>=1.20.0", "diskcache>=5.6.1"], extras_require={ From d018c7b01dd08518b59ebcedc603111243a39391 Mon Sep 17 00:00:00 2001 From: Billy Cao Date: Sat, 12 Aug 2023 18:41:47 +0800 Subject: [PATCH 2/3] Add doc string for n_gpu_layers argument --- llama_cpp/llama.py | 1 + 1 file changed, 1 insertion(+) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index a996d5c..20a5e0c 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -239,6 +239,7 @@ class Llama: n_ctx: Maximum context size. n_parts: Number of parts to split the model into. If -1, the number of parts is automatically determined. seed: Random seed. -1 for random. + n_gpu_layers: Number of layers to offload to GPU (-ngl). If -1, all layers are offloaded. f16_kv: Use half-precision for key/value cache. logits_all: Return logits for all tokens, not just the last token. vocab_only: Only load the vocabulary no weights. From c471871d0bfe0ac8d9bf69f6cee6cff768776ad2 Mon Sep 17 00:00:00 2001 From: Billy Cao Date: Sun, 13 Aug 2023 11:21:28 +0800 Subject: [PATCH 3/3] make n_gpu_layers=-1 offload all layers --- llama_cpp/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 20a5e0c..8115d46 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -268,7 +268,7 @@ class Llama: self.params = llama_cpp.llama_context_default_params() self.params.n_ctx = n_ctx - self.params.n_gpu_layers = n_gpu_layers + self.params.n_gpu_layers = 0x7FFFFFFF if n_gpu_layers == -1 else n_gpu_layers # 0x7FFFFFFF is INT32 max, will be auto set to all layers self.params.seed = seed self.params.f16_kv = f16_kv self.params.logits_all = logits_all