fix: quant err message (#5616)

This commit is contained in:
Josh 2024-07-11 17:24:29 -07:00 committed by GitHub
parent c4cf8ad559
commit 10e768826c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -33,7 +33,7 @@ func Quantize(infile, outfile string, ftype fileType) error {
params.ftype = ftype.Value() params.ftype = ftype.Value()
if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 { if rc := C.llama_model_quantize(cinfile, coutfile, &params); rc != 0 {
return fmt.Errorf("llama_model_quantize: %d", rc) return fmt.Errorf("failed to quantize model. This model architecture may not be supported, or you may need to upgrade Ollama to the latest version")
} }
return nil return nil