From 8be7d67f7e649196b63210408bd7bb54ef1cf791 Mon Sep 17 00:00:00 2001 From: bretello Date: Mon, 24 Jul 2023 14:42:37 +0200 Subject: [PATCH] raise exception when `llama_load_model_from_file` fails --- llama_cpp/llama_cpp.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index eea26ac..949c6af 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -367,7 +367,10 @@ _lib.llama_backend_free.restype = None def llama_load_model_from_file( path_model: bytes, params: llama_context_params ) -> llama_model_p: - return _lib.llama_load_model_from_file(path_model, params) + result = _lib.llama_load_model_from_file(path_model, params) + if result is None: + raise Exception(f"Failed to load model from {path_model}") + return result _lib.llama_load_model_from_file.argtypes = [c_char_p, llama_context_params]