fixes for gguf (#3863)

This commit is contained in:
Patrick Devine 2024-04-23 20:57:20 -07:00 committed by GitHub
parent ce8ce82567
commit 14476d48cc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -190,8 +190,6 @@ func (llm *gguf) Decode(rs io.ReadSeeker) error {
llm.kv[k] = v llm.kv[k] = v
} }
slog.Debug(fmt.Sprintf("general.architecture = %s", llm.kv["general.architecture"]))
// decode tensors // decode tensors
for i := 0; uint64(i) < llm.numTensor(); i++ { for i := 0; uint64(i) < llm.numTensor(); i++ {
name, err := readGGUFString(llm, rs) name, err := readGGUFString(llm, rs)
@ -465,11 +463,13 @@ var ggufKVOrder = map[string][]string{
"llama.embedding_length", "llama.embedding_length",
"llama.block_count", "llama.block_count",
"llama.feed_forward_length", "llama.feed_forward_length",
"llama.rope.dimension_count",
"llama.attention.head_count", "llama.attention.head_count",
"llama.attention.head_count_kv", "llama.attention.head_count_kv",
"llama.attention.layer_norm_rms_epsilon", "llama.attention.layer_norm_rms_epsilon",
"llama.rope.freq_base", "llama.rope.freq_base",
"llama.rope.dimension_count",
"llama.expert_count",
"llama.expert_used_count",
"gemma.context_length", "gemma.context_length",
"gemma.embedding_length", "gemma.embedding_length",
"gemma.block_count", "gemma.block_count",
@ -577,6 +577,8 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
return err return err
} }
} }
default:
return fmt.Errorf("improper type for '%s'", k)
} }
if err != nil { if err != nil {
return err return err
@ -598,9 +600,11 @@ func (llm *gguf) Encode(ws io.WriteSeeker, kv KV, tensors []Tensor) error {
return err return err
} }
dims := 1 dims := 0
if tensor.Shape[1] > 0 { for cnt := 0; cnt < len(tensor.Shape); cnt++ {
dims = 2 if tensor.Shape[cnt] > 0 {
dims++
}
} }
if err := binary.Write(ws, llm.ByteOrder, uint32(dims)); err != nil { if err := binary.Write(ws, llm.ByteOrder, uint32(dims)); err != nil {