diff --git a/Makefile b/Makefile
index d8fb0cc..90f562f 100644
--- a/Makefile
+++ b/Makefile
@@ -24,9 +24,6 @@ build.debug:
build.cuda:
CMAKE_ARGS="-DLLAMA_CUDA=on" python3 -m pip install --verbose -e .
-build.opencl:
- CMAKE_ARGS="-DLLAMA_CLBLAST=on" python3 -m pip install --verbose -e .
-
build.openblas:
CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" python3 -m pip install --verbose -e .
diff --git a/README.md b/README.md
index 0f7abfb..4c37ba3 100644
--- a/README.md
+++ b/README.md
@@ -165,17 +165,6 @@ pip install llama-cpp-python \
--extra-index-url https://abetlen.github.io/llama-cpp-python/whl/metal
```
-
-
-
-CLBlast (OpenCL)
-
-To install with CLBlast, set the `LLAMA_CLBLAST=on` environment variable before installing:
-
-```bash
-CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python
-```
-