diff --git a/llm/server.go b/llm/server.go index 61346069..8b63cfbd 100644 --- a/llm/server.go +++ b/llm/server.go @@ -560,6 +560,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error { if s.status != nil && s.status.LastErrMsg != "" { msg = s.status.LastErrMsg } + if strings.Contains(msg, "unknown model") { + return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade") + } return fmt.Errorf("llama runner process has terminated: %v %s", err, msg) default: } diff --git a/llm/status.go b/llm/status.go index 8a49bd55..0f56b7f9 100644 --- a/llm/status.go +++ b/llm/status.go @@ -25,6 +25,7 @@ var errorPrefixes = []string{ "CUDA error", "cudaMalloc failed", "\"ERR\"", + "architecture", } func (w *StatusWriter) Write(b []byte) (int, error) {