From 7b136bb5b1fa8d1505724dc1a5a41e780f785b9f Mon Sep 17 00:00:00 2001 From: Sujeendran Menon <30560413+sujeendran@users.noreply.github.com> Date: Thu, 2 Nov 2023 04:25:57 +0530 Subject: [PATCH] Fix for shared library not found and compile issues in Windows (#848) * fix windows library dll name issue * Updated README.md Windows instructions * Update llama_cpp.py to handle different windows dll file versions --- README.md | 6 ++++++ llama_cpp/llama_cpp.py | 1 + 2 files changed, 7 insertions(+) diff --git a/README.md b/README.md index 1881af6..aee2d8a 100644 --- a/README.md +++ b/README.md @@ -92,6 +92,12 @@ Then, call `pip` after setting the variables: pip install llama-cpp-python ``` +If you run into issues where it complains it can't find `'nmake'` `'?'` or CMAKE_C_COMPILER, you can extract w64devkit as [mentioned in llama.cpp repo](https://github.com/ggerganov/llama.cpp#openblas) and add those manually to CMAKE_ARGS before running `pip` install: +```ps +$env:CMAKE_GENERATOR = "MinGW Makefiles" +$env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on -DCMAKE_C_COMPILER=C:/w64devkit/bin/gcc.exe -DCMAKE_CXX_COMPILER=C:/w64devkit/bin/g++.exe" +``` + See the above instructions and set `CMAKE_ARGS` to the BLAS backend you want to use. #### MacOS remarks diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 594c57b..ba4e26b 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -42,6 +42,7 @@ def _load_shared_library(lib_base_name: str): elif sys.platform == "win32": _lib_paths += [ _base_path / f"{lib_base_name}.dll", + _base_path / f"lib{lib_base_name}.dll", ] else: raise RuntimeError("Unsupported platform")