2023-09-12 15:04:35 +00:00
|
|
|
package llm
|
|
|
|
|
|
|
|
//go:generate git submodule init
|
|
|
|
|
|
|
|
//go:generate git submodule update --force gguf
|
2023-09-20 19:15:23 +00:00
|
|
|
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
2023-10-23 17:41:18 +00:00
|
|
|
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
2023-10-27 19:13:44 +00:00
|
|
|
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
2023-09-14 19:08:13 +00:00
|
|
|
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
2023-10-06 14:15:42 +00:00
|
|
|
//go:generate mv gguf/build/cpu/bin/server gguf/build/cpu/bin/ollama-runner
|
2023-09-14 19:08:13 +00:00
|
|
|
|
2023-11-24 19:05:57 +00:00
|
|
|
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_CUDA_PEER_MAX_BATCH_SIZE=0
|
2023-09-20 19:00:41 +00:00
|
|
|
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
2023-10-06 14:15:42 +00:00
|
|
|
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
|