diff --git a/.gitignore b/.gitignore index d07b694..d09b209 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +_skbuild/ + .envrc models/ diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..1f7a9fe --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,12 @@ +cmake_minimum_required(VERSION 3.4...3.22) + +project(llama_cpp) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") +set(LLAMA_STATIC "Off") +set(BUILD_SHARED_LIBS "On") + +add_subdirectory(vendor/llama.cpp) + +install(TARGETS llama LIBRARY DESTINATION llama_cpp) \ No newline at end of file diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index e64eb77..e53c704 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -5,7 +5,7 @@ from ctypes import c_int, c_float, c_double, c_char_p, c_void_p, c_bool, POINTER import pathlib # Load the library -libfile = pathlib.Path(__file__).parent.parent / "libllama.so" +libfile = pathlib.Path(__file__).parent / "libllama.so" lib = ctypes.CDLL(str(libfile)) diff --git a/pyproject.toml b/pyproject.toml index 94dc13f..2fece07 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,5 +20,10 @@ python = "^3.8.1" black = "^23.1.0" [build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" +requires = [ + "setuptools>=42", + "scikit-build>=0.13", + "cmake>=3.18", + "ninja", +] +build-backend = "setuptools.build_meta" diff --git a/setup.py b/setup.py index fa6a013..b52a10f 100644 --- a/setup.py +++ b/setup.py @@ -1,42 +1,11 @@ -import os -import subprocess -from setuptools import setup, Extension - -from distutils.command.build_ext import build_ext - - -class build_ext_custom(build_ext): - def run(self): - build_dir = os.path.join(os.getcwd(), "build") - src_dir = os.path.join(os.getcwd(), "vendor", "llama.cpp") - - os.makedirs(build_dir, exist_ok=True) - - cmake_flags = [ - "-DLLAMA_STATIC=Off", - "-DBUILD_SHARED_LIBS=On", - "-DCMAKE_CXX_FLAGS=-fPIC", - "-DCMAKE_C_FLAGS=-fPIC", - ] - subprocess.check_call(["cmake", src_dir, *cmake_flags], cwd=build_dir) - subprocess.check_call(["cmake", "--build", "."], cwd=build_dir) - - # Move the shared library to the root directory - lib_path = os.path.join(build_dir, "libllama.so") - target_path = os.path.join(os.getcwd(), "libllama.so") - os.rename(lib_path, target_path) - +from skbuild import setup setup( name="llama_cpp", description="A Python wrapper for llama.cpp", - version="0.0.1", + version="0.1.1", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT", - py_modules=["llama_cpp"], - ext_modules=[ - Extension("libllama", ["vendor/llama.cpp"]), - ], - cmdclass={"build_ext": build_ext_custom}, + packages=['llama_cpp'], )