From 401309d11c3eccc1ef491e37dd0cb454874b455f Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Mon, 24 Jul 2023 13:11:10 -0400 Subject: [PATCH] Revert "Merge pull request #521 from bretello/main" This reverts commit 07f0f3a3860aca25682d1088f0da93b4a894fd1d, reversing changes made to d8a3ddbb1cf4d3a9051f778351caf44550b9caed. --- llama_cpp/llama_cpp.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 87a3b9a..423a4a0 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -423,10 +423,7 @@ _lib.llama_backend_free.restype = None def llama_load_model_from_file( path_model: bytes, params: llama_context_params ) -> llama_model_p: - result = _lib.llama_load_model_from_file(path_model, params) - if result is None: - raise Exception(f"Failed to load model from {path_model}") - return result + return _lib.llama_load_model_from_file(path_model, params) _lib.llama_load_model_from_file.argtypes = [c_char_p, llama_context_params]