From 1dd3f473c09399e116d40cd843541d6225dbafb6 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Tue, 12 Sep 2023 19:01:16 -0400 Subject: [PATCH] Remove references to FORCE_CMAKE --- Makefile | 10 +++++----- README.md | 11 +++++------ docker/cuda_simple/Dockerfile | 2 +- docs/install/macos.md | 2 +- 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index 559853b..e293b2d 100644 --- a/Makefile +++ b/Makefile @@ -13,19 +13,19 @@ build: python3 -m pip install -e . build.cuda: - CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 python3 -m pip install -e . + CMAKE_ARGS="-DLLAMA_CUBLAS=on" python3 -m pip install -e . build.opencl: - CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 python3 -m pip install -e . + CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install -e . build.openblas: - CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 python3 -m pip install -e . + CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install -e . build.blis: - CMAKE_ARGS="-DLLAMA_OPENBLAS=on -DLLAMA_OPENBLAS_VENDOR=blis" FORCE_CMAKE=1 python3 -m pip install -e . + CMAKE_ARGS="-DLLAMA_OPENBLAS=on -DLLAMA_OPENBLAS_VENDOR=blis" python3 -m pip install -e . build.metal: - CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 python3 -m pip install -e . + CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install -e . build.sdist: python3 -m build --sdist diff --git a/README.md b/README.md index 10c02af..3d4a5bc 100644 --- a/README.md +++ b/README.md @@ -48,36 +48,35 @@ Otherwise, while installing it will build the llama.ccp x86 version which will b ### Installation with Hardware Acceleration `llama.cpp` supports multiple BLAS backends for faster processing. -Use the `FORCE_CMAKE=1` environment variable to force the use of `cmake` and install the pip package for the desired BLAS backend. To install with OpenBLAS, set the `LLAMA_BLAS and LLAMA_BLAS_VENDOR` environment variables before installing: ```bash -CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install llama-cpp-python +CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python ``` To install with cuBLAS, set the `LLAMA_CUBLAS=1` environment variable before installing: ```bash -CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python +CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python ``` To install with CLBlast, set the `LLAMA_CLBLAST=1` environment variable before installing: ```bash -CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 pip install llama-cpp-python +CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python ``` To install with Metal (MPS), set the `LLAMA_METAL=on` environment variable before installing: ```bash -CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python +CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python ``` To install with hipBLAS / ROCm support for AMD cards, set the `LLAMA_HIPBLAS=on` environment variable before installing: ```bash -CMAKE_ARGS="-DLLAMA_HIPBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python +CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python ``` #### Windows remarks diff --git a/docker/cuda_simple/Dockerfile b/docker/cuda_simple/Dockerfile index e5aaf17..51350af 100644 --- a/docker/cuda_simple/Dockerfile +++ b/docker/cuda_simple/Dockerfile @@ -21,7 +21,7 @@ ENV LLAMA_CUBLAS=1 RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings # Install llama-cpp-python (build with cuda) -RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python +RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Run the server CMD python3 -m llama_cpp.server diff --git a/docs/install/macos.md b/docs/install/macos.md index 3330396..8f0e9db 100644 --- a/docs/install/macos.md +++ b/docs/install/macos.md @@ -30,7 +30,7 @@ conda activate llama *(you needed xcode installed in order pip to build/compile the C++ code)* ``` pip uninstall llama-cpp-python -y -CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir +CMAKE_ARGS="-DLLAMA_METAL=on" pip install -U llama-cpp-python --no-cache-dir pip install 'llama-cpp-python[server]' # you should now have llama-cpp-python v0.1.62 or higher installed