llm: avoid loading model if system memory is too small (#5637)
* llm: avoid loading model if system memory is too small * update log * Instrument swap free space On linux and windows, expose how much swap space is available so we can take that into consideration when scheduling models * use `systemSwapFreeMemory` in check --------- Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
This commit is contained in:
parent
791650ddef
commit
c4cf8ad559
6 changed files with 22 additions and 13 deletions
|
@ -360,14 +360,17 @@ func GetGPUInfo() GpuInfoList {
|
||||||
"before",
|
"before",
|
||||||
"total", format.HumanBytes2(cpus[0].TotalMemory),
|
"total", format.HumanBytes2(cpus[0].TotalMemory),
|
||||||
"free", format.HumanBytes2(cpus[0].FreeMemory),
|
"free", format.HumanBytes2(cpus[0].FreeMemory),
|
||||||
|
"free_swap", format.HumanBytes2(cpus[0].FreeSwap),
|
||||||
),
|
),
|
||||||
slog.Group(
|
slog.Group(
|
||||||
"now",
|
"now",
|
||||||
"total", format.HumanBytes2(mem.TotalMemory),
|
"total", format.HumanBytes2(mem.TotalMemory),
|
||||||
"free", format.HumanBytes2(mem.FreeMemory),
|
"free", format.HumanBytes2(mem.FreeMemory),
|
||||||
|
"free_swap", format.HumanBytes2(mem.FreeSwap),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
cpus[0].FreeMemory = mem.FreeMemory
|
cpus[0].FreeMemory = mem.FreeMemory
|
||||||
|
cpus[0].FreeSwap = mem.FreeSwap
|
||||||
}
|
}
|
||||||
|
|
||||||
var memInfo C.mem_info_t
|
var memInfo C.mem_info_t
|
||||||
|
|
|
@ -57,6 +57,7 @@ func GetCPUMem() (memInfo, error) {
|
||||||
return memInfo{
|
return memInfo{
|
||||||
TotalMemory: uint64(C.getPhysicalMemory()),
|
TotalMemory: uint64(C.getPhysicalMemory()),
|
||||||
FreeMemory: uint64(C.getFreeMemory()),
|
FreeMemory: uint64(C.getFreeMemory()),
|
||||||
|
// FreeSwap omitted as Darwin uses dynamic paging
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ var OneapiMgmtName = "libze_intel_gpu.so"
|
||||||
|
|
||||||
func GetCPUMem() (memInfo, error) {
|
func GetCPUMem() (memInfo, error) {
|
||||||
var mem memInfo
|
var mem memInfo
|
||||||
var total, available, free, buffers, cached uint64
|
var total, available, free, buffers, cached, freeSwap uint64
|
||||||
f, err := os.Open("/proc/meminfo")
|
f, err := os.Open("/proc/meminfo")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mem, err
|
return mem, err
|
||||||
|
@ -70,20 +70,21 @@ func GetCPUMem() (memInfo, error) {
|
||||||
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
_, err = fmt.Sscanf(line, "Buffers:%d", &buffers)
|
||||||
case strings.HasPrefix(line, "Cached:"):
|
case strings.HasPrefix(line, "Cached:"):
|
||||||
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
_, err = fmt.Sscanf(line, "Cached:%d", &cached)
|
||||||
|
case strings.HasPrefix(line, "SwapFree:"):
|
||||||
|
_, err = fmt.Sscanf(line, "SwapFree:%d", &freeSwap)
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mem, err
|
return mem, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if total > 0 && available > 0 {
|
|
||||||
mem.TotalMemory = total * format.KibiByte
|
|
||||||
mem.FreeMemory = available * format.KibiByte
|
|
||||||
return mem, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
mem.TotalMemory = total * format.KibiByte
|
mem.TotalMemory = total * format.KibiByte
|
||||||
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
mem.FreeSwap = freeSwap * format.KibiByte
|
||||||
|
if available > 0 {
|
||||||
|
mem.FreeMemory = available * format.KibiByte
|
||||||
|
} else {
|
||||||
|
mem.FreeMemory = (free + buffers + cached) * format.KibiByte
|
||||||
|
}
|
||||||
return mem, nil
|
return mem, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,5 +51,5 @@ func GetCPUMem() (memInfo, error) {
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
return memInfo{}, fmt.Errorf("GlobalMemoryStatusEx failed: %w", err)
|
||||||
}
|
}
|
||||||
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys}, nil
|
return memInfo{TotalMemory: memStatus.TotalPhys, FreeMemory: memStatus.AvailPhys, FreeSwap: memStatus.AvailPageFile}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
type memInfo struct {
|
type memInfo struct {
|
||||||
TotalMemory uint64 `json:"total_memory,omitempty"`
|
TotalMemory uint64 `json:"total_memory,omitempty"`
|
||||||
FreeMemory uint64 `json:"free_memory,omitempty"`
|
FreeMemory uint64 `json:"free_memory,omitempty"`
|
||||||
|
FreeSwap uint64 `json:"free_swap,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Beginning of an `ollama info` command
|
// Beginning of an `ollama info` command
|
||||||
|
|
|
@ -88,6 +88,7 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
var estimate MemoryEstimate
|
var estimate MemoryEstimate
|
||||||
var systemTotalMemory uint64
|
var systemTotalMemory uint64
|
||||||
var systemFreeMemory uint64
|
var systemFreeMemory uint64
|
||||||
|
var systemSwapFreeMemory uint64
|
||||||
|
|
||||||
systemMemInfo, err := gpu.GetCPUMem()
|
systemMemInfo, err := gpu.GetCPUMem()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -95,7 +96,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
} else {
|
} else {
|
||||||
systemTotalMemory = systemMemInfo.TotalMemory
|
systemTotalMemory = systemMemInfo.TotalMemory
|
||||||
systemFreeMemory = systemMemInfo.FreeMemory
|
systemFreeMemory = systemMemInfo.FreeMemory
|
||||||
slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", systemFreeMemory)
|
systemSwapFreeMemory = systemMemInfo.FreeSwap
|
||||||
|
slog.Debug("system memory", "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "free_swap", format.HumanBytes2(systemSwapFreeMemory))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
|
// If the user wants zero GPU layers, reset the gpu list to be CPU/system ram info
|
||||||
|
@ -125,9 +127,10 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
// On linux, over-allocating CPU memory will almost always result in an error
|
// On linux, over-allocating CPU memory will almost always result in an error
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" {
|
||||||
systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
|
systemMemoryRequired := estimate.TotalSize - estimate.VRAMSize
|
||||||
if systemMemoryRequired > systemTotalMemory {
|
available := min(systemTotalMemory, systemFreeMemory+systemSwapFreeMemory)
|
||||||
slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "system", format.HumanBytes2(systemTotalMemory))
|
if systemMemoryRequired > available {
|
||||||
return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(systemTotalMemory))
|
slog.Warn("model request too large for system", "requested", format.HumanBytes2(systemMemoryRequired), "available", available, "total", format.HumanBytes2(systemTotalMemory), "free", format.HumanBytes2(systemFreeMemory), "swap", format.HumanBytes2(systemSwapFreeMemory))
|
||||||
|
return nil, fmt.Errorf("model requires more system memory (%s) than is available (%s)", format.HumanBytes2(systemMemoryRequired), format.HumanBytes2(available))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue