7555ea44f8
This switches the default llama.cpp to be CPU based, and builds the GPU variants as dynamically loaded libraries which we can select at runtime. This also bumps the ROCm library to version 6 given 5.7 builds don't work on the latest ROCm library that just shipped.
41 lines
655 B
Go
41 lines
655 B
Go
//go:build darwin
|
|
|
|
package gpu
|
|
|
|
import "C"
|
|
import (
|
|
"runtime"
|
|
|
|
"github.com/jmorganca/ollama/api"
|
|
)
|
|
|
|
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
|
|
func CheckVRAM() (int64, error) {
|
|
// TODO - assume metal, and return free memory?
|
|
return 0, nil
|
|
|
|
}
|
|
|
|
func GetGPUInfo() GpuInfo {
|
|
// TODO - Metal vs. x86 macs...
|
|
|
|
return GpuInfo{
|
|
Driver: "METAL",
|
|
Library: "default",
|
|
TotalMemory: 0,
|
|
FreeMemory: 0,
|
|
}
|
|
}
|
|
|
|
func NumGPU(numLayer, fileSizeBytes int64, opts api.Options) int {
|
|
if runtime.GOARCH == "arm64" {
|
|
return 1
|
|
}
|
|
|
|
// metal only supported on arm64
|
|
return 0
|
|
}
|
|
|
|
func nativeInit() error {
|
|
return nil
|
|
}
|