llm: patch to fix qwen 2 temporarily on nvidia (#4897)
This commit is contained in:
parent
78f81fc0e5
commit
ce0dc33cb8
1 changed files with 13 additions and 0 deletions
13
llm/patches/06-qwen2.diff
Normal file
13
llm/patches/06-qwen2.diff
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
diff --git a/llama.cpp b/llama.cpp
|
||||||
|
index 40d2ec2c..f34eb79a 100644
|
||||||
|
--- a/llama.cpp
|
||||||
|
+++ b/llama.cpp
|
||||||
|
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
|
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||||
|
cb(kq, "kq", il);
|
||||||
|
|
||||||
|
- if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX) {
|
||||||
|
+ if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3 || model.arch == LLM_ARCH_GPTNEOX || model.arch == LLM_ARCH_QWEN2) {
|
||||||
|
// for this arch, we need to perform the KQ multiplication with F32 precision, otherwise we get NaNs
|
||||||
|
// ref: https://github.com/ggerganov/llama.cpp/pull/4490#issuecomment-1859055847
|
||||||
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
Loading…
Reference in a new issue