llama.cpp/Makefile

82 lines
1.8 KiB
Makefile
Raw Normal View History

2023-05-26 21:56:20 +00:00
update:
poetry install
git submodule update --init --recursive
update.vendor:
cd vendor/llama.cpp && git pull origin master
deps:
2023-09-14 06:01:45 +00:00
python3 -m pip install --upgrade pip
python3 -m pip install -e ".[all]"
2023-05-26 21:56:20 +00:00
build:
2024-01-19 13:47:56 +00:00
python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
2024-02-21 16:04:30 +00:00
build.debug:
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Debug" python3 -m pip install --verbose --config-settings=cmake.verbose=true --config-settings=logging.level=INFO --config-settings=install.strip=false --editable .
2023-05-26 21:56:20 +00:00
build.cuda:
2024-01-19 13:47:56 +00:00
CMAKE_ARGS="-DLLAMA_CUBLAS=on" python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
build.opencl:
2024-01-19 13:47:56 +00:00
CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
build.openblas:
2024-02-14 08:47:40 +00:00
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
build.blis:
2024-02-14 08:47:40 +00:00
CMAKE_ARGS="-DLLAMA_BLAS=on -DLLAMA_BLAS_VENDOR=FLAME" python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
2023-06-08 04:22:39 +00:00
build.metal:
2024-01-19 13:47:56 +00:00
CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install --verbose -e .
2023-06-08 04:22:39 +00:00
2024-01-29 15:39:23 +00:00
build.vulkan:
CMAKE_ARGS="-DLLAMA_VULKAN=on" python3 -m pip install --verbose -e .
2024-01-30 14:48:09 +00:00
build.kompute:
CMAKE_ARGS="-DLLAMA_KOMPUTE=on" python3 -m pip install --verbose -e .
build.sycl:
CMAKE_ARGS="-DLLAMA_SYCL=on" python3 -m pip install --verbose -e .
2023-05-26 21:56:20 +00:00
build.sdist:
python3 -m build --sdist
2023-05-26 21:56:20 +00:00
deploy.pypi:
python3 -m twine upload dist/*
deploy.gh-docs:
mkdocs build
mkdocs gh-deploy
2023-07-06 21:57:56 +00:00
test:
python3 -m pytest
2023-07-07 07:38:51 +00:00
docker:
docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile .
run-server:
uvicorn --factory llama.server:app --host ${HOST} --port ${PORT}
2023-05-26 21:56:20 +00:00
clean:
- cd vendor/llama.cpp && make clean
- cd vendor/llama.cpp && rm libllama.so
- rm -rf _skbuild
- rm llama_cpp/*.so
- rm llama_cpp/*.dylib
2023-06-10 22:17:34 +00:00
- rm llama_cpp/*.metal
- rm llama_cpp/*.dll
2023-06-10 22:17:34 +00:00
- rm llama_cpp/*.lib
2023-05-26 21:56:20 +00:00
.PHONY: \
update \
update.vendor \
build \
build.cuda \
build.opencl \
build.openblas \
build.sdist \
deploy.pypi \
deploy.gh-docs \
2023-07-07 07:38:51 +00:00
docker \
2023-05-26 21:56:20 +00:00
clean