ollama/llm/gpu_darwin.go
Daniel Hiltgen d4cd695759 Add cgo implementation for llama.cpp
Run the server.cpp directly inside the Go runtime via cgo
while retaining the LLM Go abstractions.
2023-12-19 09:05:46 -08:00

19 lines
385 B
Go

//go:build darwin
package llm
import (
"github.com/jmorganca/ollama/api"
)
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
func CheckVRAM() (int64, error) {
// TODO - assume metal, and return free memory?
return 0, errNvidiaSMI
}
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
// default to enable metal on macOS
return 1
}