From f6a7850e1a316c5168ba51cbdbb669d774cd0c15 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Tue, 8 Aug 2023 14:30:58 -0400 Subject: [PATCH] Update llama.cpp --- llama_cpp/llama_cpp.py | 2 ++ vendor/llama.cpp | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 423a4a0..bbb2a1e 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -181,6 +181,7 @@ llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p) # // Keep the booleans together to avoid misalignment during copy-by-value. # bool low_vram; // if true, reduce VRAM usage at the cost of performance +# bool mul_mat_q; // if true, use experimental mul_mat_q kernels # bool f16_kv; // use fp16 for KV cache # bool logits_all; // the llama_eval() call computes all logits, not just the last one # bool vocab_only; // only load the vocabulary, no weights @@ -203,6 +204,7 @@ class llama_context_params(Structure): ("progress_callback", llama_progress_callback), ("progress_callback_user_data", c_void_p), ("low_vram", c_bool), + ("mul_mat_q", c_bool), ("f16_kv", c_bool), ("logits_all", c_bool), ("vocab_only", c_bool), diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 41c6741..f5bfea0 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 41c674161fb2459bdf7806d1eebead15bc5d046e +Subproject commit f5bfea0580e417f99850d5456ca541d871a3e48c