llama.cpp/Makefile

91 lines
2 KiB
Makefile
Raw Normal View History

2023-05-26 17:56:20 -04:00
update:
poetry install
git submodule update --init --recursive
update.vendor:
cd vendor/llama.cpp && git pull origin master
deps:
2023-09-14 02:01:45 -04:00
python3 -m pip install --upgrade pip
python3 -m pip install -e ".[all]"
2023-05-26 17:56:20 -04:00
build:
2024-01-19 08:47:56 -05:00
python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
2024-02-21 11:04:30 -05:00
build.debug:
python3 -m pip install \
--verbose \
--config-settings=cmake.verbose=true \
--config-settings=logging.level=INFO \
--config-settings=install.strip=false \
--config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-ggdb -O0';-DCMAKE_CXX_FLAGS='-ggdb -O0'" \
--editable .
2024-02-21 11:04:30 -05:00
2023-05-26 17:56:20 -04:00
build.cuda:
CMAKE_ARGS="-DLLAMA_CUDA=on" python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
build.opencl:
2024-01-19 08:47:56 -05:00
CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
build.openblas:
2024-02-14 03:47:40 -05:00
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
build.blis:
2024-02-14 03:47:40 -05:00
CMAKE_ARGS="-DLLAMA_BLAS=on -DLLAMA_BLAS_VENDOR=FLAME" python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
2023-06-08 00:22:39 -04:00
build.metal:
2024-01-19 08:47:56 -05:00
CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install --verbose -e .
2023-06-08 00:22:39 -04:00
2024-01-29 10:39:23 -05:00
build.vulkan:
CMAKE_ARGS="-DLLAMA_VULKAN=on" python3 -m pip install --verbose -e .
2024-01-30 09:48:09 -05:00
build.kompute:
CMAKE_ARGS="-DLLAMA_KOMPUTE=on" python3 -m pip install --verbose -e .
build.sycl:
CMAKE_ARGS="-DLLAMA_SYCL=on" python3 -m pip install --verbose -e .
build.rpc:
CMAKE_ARGS="-DLLAMA_RPC=on" python3 -m pip install --verbose -e .
2023-05-26 17:56:20 -04:00
build.sdist:
python3 -m build --sdist
2023-05-26 17:56:20 -04:00
deploy.pypi:
python3 -m twine upload dist/*
deploy.gh-docs:
mkdocs build
mkdocs gh-deploy
2023-07-06 17:57:56 -04:00
test:
python3 -m pytest
2023-07-07 03:38:51 -04:00
docker:
docker build -t llama-cpp-python:latest -f docker/simple/Dockerfile .
run-server:
uvicorn --factory llama.server:app --host ${HOST} --port ${PORT}
2023-05-26 17:56:20 -04:00
clean:
- cd vendor/llama.cpp && make clean
- cd vendor/llama.cpp && rm libllama.so
- rm -rf _skbuild
- rm llama_cpp/*.so
- rm llama_cpp/*.dylib
2023-06-10 18:17:34 -04:00
- rm llama_cpp/*.metal
- rm llama_cpp/*.dll
2023-06-10 18:17:34 -04:00
- rm llama_cpp/*.lib
2023-05-26 17:56:20 -04:00
.PHONY: \
update \
update.vendor \
build \
build.cuda \
build.opencl \
build.openblas \
build.sdist \
deploy.pypi \
deploy.gh-docs \
2023-07-07 03:38:51 -04:00
docker \
2023-05-26 17:56:20 -04:00
clean