f8ef4439e9
The build tags rocm or cuda must be specified to both go generate and go build. ROCm builds should have both ROCM_PATH set (and the ROCM SDK present) as well as CLBlast installed (for GGML) and CLBlast_DIR set in the environment to the CLBlast cmake directory (likely /usr/lib/cmake/CLBlast). Build tags are also used to switch VRAM detection between cuda and rocm implementations, using added "accelerator_foo.go" files which contain architecture specific functions and variables. accelerator_none is used when no tags are set, and a helper function addRunner will ignore it if it is the chosen accelerator. Fix go generate commands, thanks @deadmeu for testing.
24 lines
1.3 KiB
Go
24 lines
1.3 KiB
Go
//go:build cuda
|
|
|
|
package llm
|
|
|
|
//go:generate git submodule init
|
|
|
|
//go:generate git submodule update --force ggml
|
|
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
|
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
|
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
|
|
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
|
|
|
//go:generate rm -rf ggml/build/cuda
|
|
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
|
//go:generate cmake --build ggml/build/cuda --target server --config Release
|
|
//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
|
|
|
|
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
|
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
|
|
|
//go:generate rm -rf gguf/build/cuda
|
|
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
|
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
|
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
|