2023-11-29 19:00:37 +00:00
|
|
|
//go:build linux || windows
|
|
|
|
|
|
|
|
package gpu
|
|
|
|
|
|
|
|
/*
|
2023-12-14 01:26:47 +00:00
|
|
|
#cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
|
|
|
|
#cgo windows LDFLAGS: -lpthread
|
|
|
|
|
2023-11-29 19:00:37 +00:00
|
|
|
#include "gpu_info.h"
|
|
|
|
|
|
|
|
*/
|
|
|
|
import "C"
|
|
|
|
import (
|
|
|
|
"fmt"
|
2024-01-18 18:52:01 +00:00
|
|
|
"log/slog"
|
2024-01-10 22:39:51 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2023-12-23 19:35:44 +00:00
|
|
|
"runtime"
|
2024-01-25 23:57:32 +00:00
|
|
|
"strconv"
|
2024-01-10 22:39:51 +00:00
|
|
|
"strings"
|
2023-11-29 19:00:37 +00:00
|
|
|
"sync"
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
|
|
|
type handles struct {
|
|
|
|
cuda *C.cuda_handle_t
|
|
|
|
}
|
|
|
|
|
|
|
|
var gpuMutex sync.Mutex
|
|
|
|
var gpuHandles *handles = nil
|
|
|
|
|
2024-01-20 20:15:50 +00:00
|
|
|
// With our current CUDA compile flags, older than 5.0 will not work properly
|
|
|
|
var CudaComputeMin = [2]C.int{5, 0}
|
2024-01-07 05:40:04 +00:00
|
|
|
|
2024-01-10 22:39:51 +00:00
|
|
|
// Possible locations for the nvidia-ml library
|
|
|
|
var CudaLinuxGlobs = []string{
|
|
|
|
"/usr/local/cuda/lib64/libnvidia-ml.so*",
|
|
|
|
"/usr/lib/x86_64-linux-gnu/nvidia/current/libnvidia-ml.so*",
|
|
|
|
"/usr/lib/x86_64-linux-gnu/libnvidia-ml.so*",
|
|
|
|
"/usr/lib/wsl/lib/libnvidia-ml.so*",
|
2024-01-19 21:23:29 +00:00
|
|
|
"/usr/lib/wsl/drivers/*/libnvidia-ml.so*",
|
2024-01-10 22:39:51 +00:00
|
|
|
"/opt/cuda/lib64/libnvidia-ml.so*",
|
|
|
|
"/usr/lib*/libnvidia-ml.so*",
|
|
|
|
"/usr/local/lib*/libnvidia-ml.so*",
|
|
|
|
"/usr/lib/aarch64-linux-gnu/nvidia/current/libnvidia-ml.so*",
|
|
|
|
"/usr/lib/aarch64-linux-gnu/libnvidia-ml.so*",
|
2024-01-24 18:32:00 +00:00
|
|
|
|
|
|
|
// TODO: are these stubs ever valid?
|
|
|
|
"/opt/cuda/targets/x86_64-linux/lib/stubs/libnvidia-ml.so*",
|
2024-01-10 22:39:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
var CudaWindowsGlobs = []string{
|
|
|
|
"c:\\Windows\\System32\\nvml.dll",
|
|
|
|
}
|
|
|
|
|
2023-11-29 19:00:37 +00:00
|
|
|
// Note: gpuMutex must already be held
|
|
|
|
func initGPUHandles() {
|
2024-01-10 22:39:51 +00:00
|
|
|
|
2023-12-14 01:26:47 +00:00
|
|
|
// TODO - if the ollama build is CPU only, don't do these checks as they're irrelevant and confusing
|
2024-01-10 22:39:51 +00:00
|
|
|
|
2024-02-16 01:15:09 +00:00
|
|
|
gpuHandles = &handles{nil}
|
2024-01-10 22:39:51 +00:00
|
|
|
var cudaMgmtName string
|
|
|
|
var cudaMgmtPatterns []string
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "windows":
|
|
|
|
cudaMgmtName = "nvml.dll"
|
|
|
|
cudaMgmtPatterns = make([]string, len(CudaWindowsGlobs))
|
|
|
|
copy(cudaMgmtPatterns, CudaWindowsGlobs)
|
|
|
|
case "linux":
|
|
|
|
cudaMgmtName = "libnvidia-ml.so"
|
|
|
|
cudaMgmtPatterns = make([]string, len(CudaLinuxGlobs))
|
|
|
|
copy(cudaMgmtPatterns, CudaLinuxGlobs)
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info("Detecting GPU type")
|
2024-01-10 22:39:51 +00:00
|
|
|
cudaLibPaths := FindGPULibs(cudaMgmtName, cudaMgmtPatterns)
|
|
|
|
if len(cudaLibPaths) > 0 {
|
|
|
|
cuda := LoadCUDAMgmt(cudaLibPaths)
|
|
|
|
if cuda != nil {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info("Nvidia GPU detected")
|
2024-01-10 22:39:51 +00:00
|
|
|
gpuHandles.cuda = cuda
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func GetGPUInfo() GpuInfo {
|
|
|
|
// TODO - consider exploring lspci (and equivalent on windows) to check for
|
|
|
|
// GPUs so we can report warnings if we see Nvidia/AMD but fail to load the libraries
|
|
|
|
gpuMutex.Lock()
|
|
|
|
defer gpuMutex.Unlock()
|
|
|
|
if gpuHandles == nil {
|
|
|
|
initGPUHandles()
|
|
|
|
}
|
|
|
|
|
2024-01-28 23:22:38 +00:00
|
|
|
// All our GPU builds on x86 have AVX enabled, so fallback to CPU if we don't detect at least AVX
|
2024-01-26 19:11:09 +00:00
|
|
|
cpuVariant := GetCPUVariant()
|
2024-01-28 23:22:38 +00:00
|
|
|
if cpuVariant == "" && runtime.GOARCH == "amd64" {
|
2024-01-26 19:11:09 +00:00
|
|
|
slog.Warn("CPU does not have AVX or AVX2, disabling GPU support.")
|
|
|
|
}
|
|
|
|
|
2023-11-29 19:00:37 +00:00
|
|
|
var memInfo C.mem_info_t
|
2023-12-22 23:43:31 +00:00
|
|
|
resp := GpuInfo{}
|
2024-01-28 23:22:38 +00:00
|
|
|
if gpuHandles.cuda != nil && (cpuVariant != "" || runtime.GOARCH != "amd64") {
|
2023-11-29 19:00:37 +00:00
|
|
|
C.cuda_check_vram(*gpuHandles.cuda, &memInfo)
|
2023-12-14 01:26:47 +00:00
|
|
|
if memInfo.err != nil {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("error looking up CUDA GPU memory: %s", C.GoString(memInfo.err)))
|
2023-12-14 01:26:47 +00:00
|
|
|
C.free(unsafe.Pointer(memInfo.err))
|
2024-01-28 21:13:10 +00:00
|
|
|
} else if memInfo.count > 0 {
|
2024-01-07 05:40:04 +00:00
|
|
|
// Verify minimum compute capability
|
|
|
|
var cc C.cuda_compute_capability_t
|
|
|
|
C.cuda_compute_capability(*gpuHandles.cuda, &cc)
|
|
|
|
if cc.err != nil {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("error looking up CUDA GPU compute capability: %s", C.GoString(cc.err)))
|
2024-01-07 05:40:04 +00:00
|
|
|
C.free(unsafe.Pointer(cc.err))
|
2024-01-20 18:48:43 +00:00
|
|
|
} else if cc.major > CudaComputeMin[0] || (cc.major == CudaComputeMin[0] && cc.minor >= CudaComputeMin[1]) {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("CUDA Compute Capability detected: %d.%d", cc.major, cc.minor))
|
2024-01-07 05:40:04 +00:00
|
|
|
resp.Library = "cuda"
|
|
|
|
} else {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("CUDA GPU is too old. Falling back to CPU mode. Compute Capability detected: %d.%d", cc.major, cc.minor))
|
2024-01-07 05:40:04 +00:00
|
|
|
}
|
2023-12-14 01:26:47 +00:00
|
|
|
}
|
2024-02-16 01:15:09 +00:00
|
|
|
} else {
|
|
|
|
AMDGetGPUInfo(&resp)
|
|
|
|
if resp.Library != "" {
|
|
|
|
return resp
|
2023-12-14 01:26:47 +00:00
|
|
|
}
|
|
|
|
}
|
2023-12-23 19:35:44 +00:00
|
|
|
if resp.Library == "" {
|
2023-11-29 19:00:37 +00:00
|
|
|
C.cpu_check_ram(&memInfo)
|
2024-01-07 23:48:05 +00:00
|
|
|
resp.Library = "cpu"
|
2024-01-26 19:11:09 +00:00
|
|
|
resp.Variant = cpuVariant
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
|
|
|
if memInfo.err != nil {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("error looking up CPU memory: %s", C.GoString(memInfo.err)))
|
2023-11-29 19:00:37 +00:00
|
|
|
C.free(unsafe.Pointer(memInfo.err))
|
2023-12-14 01:26:47 +00:00
|
|
|
return resp
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
2024-01-09 20:53:33 +00:00
|
|
|
|
|
|
|
resp.DeviceCount = uint32(memInfo.count)
|
2023-11-29 19:00:37 +00:00
|
|
|
resp.FreeMemory = uint64(memInfo.free)
|
|
|
|
resp.TotalMemory = uint64(memInfo.total)
|
|
|
|
return resp
|
|
|
|
}
|
|
|
|
|
2023-12-22 23:43:31 +00:00
|
|
|
func getCPUMem() (memInfo, error) {
|
|
|
|
var ret memInfo
|
|
|
|
var info C.mem_info_t
|
|
|
|
C.cpu_check_ram(&info)
|
|
|
|
if info.err != nil {
|
|
|
|
defer C.free(unsafe.Pointer(info.err))
|
|
|
|
return ret, fmt.Errorf(C.GoString(info.err))
|
|
|
|
}
|
|
|
|
ret.FreeMemory = uint64(info.free)
|
|
|
|
ret.TotalMemory = uint64(info.total)
|
|
|
|
return ret, nil
|
|
|
|
}
|
|
|
|
|
2023-11-29 19:00:37 +00:00
|
|
|
func CheckVRAM() (int64, error) {
|
2024-03-07 00:53:51 +00:00
|
|
|
userLimit := os.Getenv("OLLAMA_MAX_VRAM")
|
|
|
|
if userLimit != "" {
|
|
|
|
avail, err := strconv.ParseInt(userLimit, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf("Invalid OLLAMA_MAX_VRAM setting %s: %s", userLimit, err)
|
|
|
|
}
|
|
|
|
slog.Info(fmt.Sprintf("user override OLLAMA_MAX_VRAM=%d", avail))
|
|
|
|
return avail, nil
|
|
|
|
}
|
2023-11-29 19:00:37 +00:00
|
|
|
gpuInfo := GetGPUInfo()
|
2023-12-23 19:35:44 +00:00
|
|
|
if gpuInfo.FreeMemory > 0 && (gpuInfo.Library == "cuda" || gpuInfo.Library == "rocm") {
|
2024-01-20 22:11:38 +00:00
|
|
|
// leave 10% or 1024MiB of VRAM free per GPU to handle unaccounted for overhead
|
2024-01-11 00:08:51 +00:00
|
|
|
overhead := gpuInfo.FreeMemory / 10
|
|
|
|
gpus := uint64(gpuInfo.DeviceCount)
|
2024-01-20 22:11:38 +00:00
|
|
|
if overhead < gpus*1024*1024*1024 {
|
|
|
|
overhead = gpus * 1024 * 1024 * 1024
|
2024-01-09 16:47:30 +00:00
|
|
|
}
|
2024-01-25 23:57:32 +00:00
|
|
|
avail := int64(gpuInfo.FreeMemory - overhead)
|
|
|
|
slog.Debug(fmt.Sprintf("%s detected %d devices with %dM available memory", gpuInfo.Library, gpuInfo.DeviceCount, avail/1024/1024))
|
|
|
|
return avail, nil
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
return 0, fmt.Errorf("no GPU detected") // TODO - better handling of CPU based memory determiniation
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
2024-01-10 22:39:51 +00:00
|
|
|
|
|
|
|
func FindGPULibs(baseLibName string, patterns []string) []string {
|
|
|
|
// Multiple GPU libraries may exist, and some may not work, so keep trying until we exhaust them
|
|
|
|
var ldPaths []string
|
|
|
|
gpuLibPaths := []string{}
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("Searching for GPU management library %s", baseLibName))
|
2024-01-10 22:39:51 +00:00
|
|
|
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "windows":
|
|
|
|
ldPaths = strings.Split(os.Getenv("PATH"), ";")
|
|
|
|
case "linux":
|
|
|
|
ldPaths = strings.Split(os.Getenv("LD_LIBRARY_PATH"), ":")
|
|
|
|
default:
|
|
|
|
return gpuLibPaths
|
|
|
|
}
|
|
|
|
// Start with whatever we find in the PATH/LD_LIBRARY_PATH
|
|
|
|
for _, ldPath := range ldPaths {
|
|
|
|
d, err := filepath.Abs(ldPath)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
patterns = append(patterns, filepath.Join(d, baseLibName+"*"))
|
|
|
|
}
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Debug(fmt.Sprintf("gpu management search paths: %v", patterns))
|
2024-01-10 22:39:51 +00:00
|
|
|
for _, pattern := range patterns {
|
|
|
|
// Ignore glob discovery errors
|
|
|
|
matches, _ := filepath.Glob(pattern)
|
|
|
|
for _, match := range matches {
|
|
|
|
// Resolve any links so we don't try the same lib multiple times
|
|
|
|
// and weed out any dups across globs
|
|
|
|
libPath := match
|
|
|
|
tmp := match
|
|
|
|
var err error
|
|
|
|
for ; err == nil; tmp, err = os.Readlink(libPath) {
|
|
|
|
if !filepath.IsAbs(tmp) {
|
|
|
|
tmp = filepath.Join(filepath.Dir(libPath), tmp)
|
|
|
|
}
|
|
|
|
libPath = tmp
|
|
|
|
}
|
|
|
|
new := true
|
|
|
|
for _, cmp := range gpuLibPaths {
|
|
|
|
if cmp == libPath {
|
|
|
|
new = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if new {
|
|
|
|
gpuLibPaths = append(gpuLibPaths, libPath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("Discovered GPU libraries: %v", gpuLibPaths))
|
2024-01-10 22:39:51 +00:00
|
|
|
return gpuLibPaths
|
|
|
|
}
|
|
|
|
|
|
|
|
func LoadCUDAMgmt(cudaLibPaths []string) *C.cuda_handle_t {
|
|
|
|
var resp C.cuda_init_resp_t
|
2024-01-23 00:03:32 +00:00
|
|
|
resp.ch.verbose = getVerboseState()
|
2024-01-10 22:39:51 +00:00
|
|
|
for _, libPath := range cudaLibPaths {
|
|
|
|
lib := C.CString(libPath)
|
|
|
|
defer C.free(unsafe.Pointer(lib))
|
|
|
|
C.cuda_init(lib, &resp)
|
|
|
|
if resp.err != nil {
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("Unable to load CUDA management library %s: %s", libPath, C.GoString(resp.err)))
|
2024-01-10 22:39:51 +00:00
|
|
|
C.free(unsafe.Pointer(resp.err))
|
|
|
|
} else {
|
|
|
|
return &resp.ch
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-01-23 00:03:32 +00:00
|
|
|
func getVerboseState() C.uint16_t {
|
|
|
|
if debug := os.Getenv("OLLAMA_DEBUG"); debug != "" {
|
|
|
|
return C.uint16_t(1)
|
|
|
|
}
|
|
|
|
return C.uint16_t(0)
|
|
|
|
}
|