From 5534f2cc6a3f29022998950472741d16e7a66b40 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Sat, 20 Jul 2024 21:48:12 -0400 Subject: [PATCH] llm: consider `head_dim` in llama arch (#5817) --- llm/patches/11-embd_kv.diff | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 llm/patches/11-embd_kv.diff diff --git a/llm/patches/11-embd_kv.diff b/llm/patches/11-embd_kv.diff new file mode 100644 index 00000000..ad17a700 --- /dev/null +++ b/llm/patches/11-embd_kv.diff @@ -0,0 +1,19 @@ +diff --git a/src/llama.cpp b/src/llama.cpp +index 2b9ace28..e60d3d8d 100644 +--- a/src/llama.cpp ++++ b/src/llama.cpp +@@ -6052,10 +6052,10 @@ static bool llm_load_tensors( + + layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}); + +- layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd}); +- layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa}); +- layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}); +- layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); ++ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * n_head}); ++ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa}); ++ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa}); ++ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * n_head, n_embd}); + + // optional bias tensors + layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);