diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 6731f8b..69e571d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -73,7 +73,7 @@ jobs: - name: Install dependencies run: | python3 -m pip install --upgrade pip - python3 -m pip install .[all] --verbose --config-settings=cmake.verbose=true --config-settings=logging.level=INFO + CMAKE_ARGS="-DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_AVX512=OFF -DLLAMA_AVX512_VBMI=OFF -DLLAMA_AVX512_VNNI=OFF -DLLAMA_FMA=OFF -DLLAMA_F16C=OFF -DLLAMA_ACCELERATE=OFF -DLLAMA_METAL=OFF" python3 -m pip install .[all] --verbose --config-settings=cmake.verbose=true --config-settings=logging.level=INFO - name: Test with pytest run: | python3 -m pytest \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ed552c..2b50da3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,17 +4,6 @@ project(llama_cpp) option(BUILD_LLAMA_CPP "Build llama.cpp shared library and install alongside python package" ON) -if (APPLE) - set(LLAMA_AVX OFF) - set(LLAMA_AVX2 OFF) - set(LLAMA_AVX512 OFF) - set(LLAMA_AVX512_VBMI OFF) - set(LLAMA_AVX512_VNNI OFF) - set(LLAMA_FMA OFF) - set(LLAMA_F16C OFF) - set(LLAMA_ACCELERATE OFF) - set(LLAMA_METAL OFF) -endif() if (BUILD_LLAMA_CPP) set(BUILD_SHARED_LIBS "On")