20 lines
1.1 KiB
Diff
20 lines
1.1 KiB
Diff
diff --git a/src/llama.cpp b/src/llama.cpp
|
|
index a207451f..fba6b175 100644
|
|
--- a/src/llama.cpp
|
|
+++ b/src/llama.cpp
|
|
@@ -4969,6 +4969,7 @@ static void llm_load_hparams(
|
|
hparams.attn_soft_cap = true;
|
|
|
|
switch (hparams.n_layer) {
|
|
+ case 26: model.type = e_model::MODEL_2B; break;
|
|
case 42: model.type = e_model::MODEL_9B; break;
|
|
case 46: model.type = e_model::MODEL_27B; break;
|
|
default: model.type = e_model::MODEL_UNKNOWN;
|
|
@@ -11736,6 +11737,7 @@ struct llm_build_context {
|
|
|
|
// ref: https://github.com/google/gemma_pytorch/commit/03e657582d17cb5a8617ebf333c1c16f3694670e
|
|
switch (model.type) {
|
|
+ case e_model::MODEL_2B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
|
|
case e_model::MODEL_9B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k))); break;
|
|
case e_model::MODEL_27B: Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd / n_head))); break;
|
|
default: GGML_ABORT("fatal error");
|