Remove references to FORCE_CMAKE
This commit is contained in:
parent
6e89775759
commit
1dd3f473c0
4 changed files with 12 additions and 13 deletions
10
Makefile
10
Makefile
|
@ -13,19 +13,19 @@ build:
|
|||
python3 -m pip install -e .
|
||||
|
||||
build.cuda:
|
||||
CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 python3 -m pip install -e .
|
||||
CMAKE_ARGS="-DLLAMA_CUBLAS=on" python3 -m pip install -e .
|
||||
|
||||
build.opencl:
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 python3 -m pip install -e .
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install -e .
|
||||
|
||||
build.openblas:
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 python3 -m pip install -e .
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install -e .
|
||||
|
||||
build.blis:
|
||||
CMAKE_ARGS="-DLLAMA_OPENBLAS=on -DLLAMA_OPENBLAS_VENDOR=blis" FORCE_CMAKE=1 python3 -m pip install -e .
|
||||
CMAKE_ARGS="-DLLAMA_OPENBLAS=on -DLLAMA_OPENBLAS_VENDOR=blis" python3 -m pip install -e .
|
||||
|
||||
build.metal:
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 python3 -m pip install -e .
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" python3 -m pip install -e .
|
||||
|
||||
build.sdist:
|
||||
python3 -m build --sdist
|
||||
|
|
11
README.md
11
README.md
|
@ -48,36 +48,35 @@ Otherwise, while installing it will build the llama.ccp x86 version which will b
|
|||
### Installation with Hardware Acceleration
|
||||
|
||||
`llama.cpp` supports multiple BLAS backends for faster processing.
|
||||
Use the `FORCE_CMAKE=1` environment variable to force the use of `cmake` and install the pip package for the desired BLAS backend.
|
||||
|
||||
To install with OpenBLAS, set the `LLAMA_BLAS and LLAMA_BLAS_VENDOR` environment variables before installing:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python
|
||||
```
|
||||
|
||||
To install with cuBLAS, set the `LLAMA_CUBLAS=1` environment variable before installing:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
||||
```
|
||||
|
||||
To install with CLBlast, set the `LLAMA_CLBLAST=1` environment variable before installing:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python
|
||||
```
|
||||
|
||||
To install with Metal (MPS), set the `LLAMA_METAL=on` environment variable before installing:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python
|
||||
```
|
||||
|
||||
To install with hipBLAS / ROCm support for AMD cards, set the `LLAMA_HIPBLAS=on` environment variable before installing:
|
||||
|
||||
```bash
|
||||
CMAKE_ARGS="-DLLAMA_HIPBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python
|
||||
```
|
||||
|
||||
#### Windows remarks
|
||||
|
|
|
@ -21,7 +21,7 @@ ENV LLAMA_CUBLAS=1
|
|||
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings
|
||||
|
||||
# Install llama-cpp-python (build with cuda)
|
||||
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python
|
||||
|
||||
# Run the server
|
||||
CMD python3 -m llama_cpp.server
|
||||
|
|
|
@ -30,7 +30,7 @@ conda activate llama
|
|||
*(you needed xcode installed in order pip to build/compile the C++ code)*
|
||||
```
|
||||
pip uninstall llama-cpp-python -y
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dir
|
||||
CMAKE_ARGS="-DLLAMA_METAL=on" pip install -U llama-cpp-python --no-cache-dir
|
||||
pip install 'llama-cpp-python[server]'
|
||||
|
||||
# you should now have llama-cpp-python v0.1.62 or higher installed
|
||||
|
|
Loading…
Reference in a new issue