windows CUDA support (#1262)
* Support cuda build in Windows * Enable dynamic NumGPU allocation for Windows
This commit is contained in:
parent
12e8c12d2b
commit
82b9b329ff
3 changed files with 11 additions and 1 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -7,3 +7,4 @@ dist
|
||||||
ollama
|
ollama
|
||||||
ggml-metal.metal
|
ggml-metal.metal
|
||||||
.cache
|
.cache
|
||||||
|
*.exe
|
||||||
|
|
|
@ -14,3 +14,11 @@ package llm
|
||||||
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
//go:generate cmake -S gguf -B gguf/build/cpu -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
||||||
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
//go:generate cmake --build gguf/build/cpu --target server --config Release
|
||||||
//go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
|
//go:generate cmd /c move gguf\build\cpu\bin\Release\server.exe gguf\build\cpu\bin\Release\ollama-runner.exe
|
||||||
|
|
||||||
|
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||||
|
//go:generate cmake --build ggml/build/cuda --target server --config Release
|
||||||
|
//go:generate cmd /c move ggml\build\cuda\bin\Release\server.exe ggml\build\cuda\bin\Release\ollama-runner.exe
|
||||||
|
|
||||||
|
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
||||||
|
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
||||||
|
//go:generate cmd /c move gguf\build\cuda\bin\Release\server.exe gguf\build\cuda\bin\Release\ollama-runner.exe
|
||||||
|
|
|
@ -84,6 +84,7 @@ func chooseRunners(workDir, runnerType string) []ModelRunner {
|
||||||
case "windows":
|
case "windows":
|
||||||
// TODO: select windows GPU runner here when available
|
// TODO: select windows GPU runner here when available
|
||||||
runners = []ModelRunner{
|
runners = []ModelRunner{
|
||||||
|
{Path: path.Join(buildPath, "cuda", "bin", "Release", "ollama-runner.exe"), Accelerated: true},
|
||||||
{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
|
{Path: path.Join(buildPath, "cpu", "bin", "Release", "ollama-runner.exe")},
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -269,7 +270,7 @@ func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
|
||||||
if opts.NumGPU != -1 {
|
if opts.NumGPU != -1 {
|
||||||
return opts.NumGPU
|
return opts.NumGPU
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" || runtime.GOOS == "windows" {
|
||||||
freeBytes, err := CheckVRAM()
|
freeBytes, err := CheckVRAM()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.Is(err, errNvidiaSMI) {
|
if !errors.Is(err, errNvidiaSMI) {
|
||||||
|
|
Loading…
Reference in a new issue