2023-07-21 20:33:56 +00:00
|
|
|
package llm
|
|
|
|
|
|
|
|
import (
|
2023-08-30 20:35:03 +00:00
|
|
|
"context"
|
2023-07-21 20:33:56 +00:00
|
|
|
"fmt"
|
2023-08-03 22:40:16 +00:00
|
|
|
"log"
|
2023-07-21 20:33:56 +00:00
|
|
|
"os"
|
2023-10-05 16:53:47 +00:00
|
|
|
"runtime"
|
2023-07-21 20:33:56 +00:00
|
|
|
|
|
|
|
"github.com/jmorganca/ollama/api"
|
2023-11-29 19:00:37 +00:00
|
|
|
"github.com/jmorganca/ollama/gpu"
|
2023-07-21 20:33:56 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type LLM interface {
|
2023-12-05 19:57:33 +00:00
|
|
|
Predict(context.Context, PredictOpts, func(PredictResult)) error
|
2023-08-30 20:35:03 +00:00
|
|
|
Embedding(context.Context, string) ([]float64, error)
|
|
|
|
Encode(context.Context, string) ([]int, error)
|
|
|
|
Decode(context.Context, []int) (string, error)
|
2023-07-21 20:33:56 +00:00
|
|
|
Close()
|
|
|
|
}
|
|
|
|
|
2023-12-20 18:36:01 +00:00
|
|
|
var AvailableShims = map[string]string{}
|
2023-12-15 22:27:27 +00:00
|
|
|
|
2023-11-30 18:30:23 +00:00
|
|
|
func New(workDir, model string, adapters, projectors []string, opts api.Options) (LLM, error) {
|
2023-07-21 20:33:56 +00:00
|
|
|
if _, err := os.Stat(model); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := os.Open(model)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-08-14 23:08:02 +00:00
|
|
|
defer f.Close()
|
2023-07-21 20:33:56 +00:00
|
|
|
|
2023-09-07 17:55:37 +00:00
|
|
|
ggml, err := DecodeGGML(f)
|
2023-07-21 20:33:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
if opts.NumCtx < 4 {
|
|
|
|
opts.NumCtx = 4
|
|
|
|
}
|
|
|
|
|
|
|
|
fmt.Println("size", ggml.Size)
|
|
|
|
fmt.Println("filetype", ggml.FileType())
|
|
|
|
fmt.Println("architecture", ggml.ModelFamily())
|
|
|
|
fmt.Println("type", ggml.ModelType())
|
|
|
|
fmt.Println("name", ggml.Name())
|
|
|
|
fmt.Println("embd", ggml.NumEmbed())
|
|
|
|
fmt.Println("head", ggml.NumHead())
|
|
|
|
fmt.Println("head_kv", ggml.NumHeadKv())
|
|
|
|
fmt.Println("gqa", ggml.NumGQA())
|
|
|
|
|
|
|
|
available, _ := gpu.CheckVRAM()
|
|
|
|
|
|
|
|
// For now assume filesize = model size
|
|
|
|
// TODO: use actual model size
|
|
|
|
requiredModel := ggml.Size
|
|
|
|
|
|
|
|
// fp16 k,v matrices require = n_ctx * n_layer * n_embd / n_head * n_head_kv * 2 bytes each * 2 key and value
|
|
|
|
requiredKv := 2 * 2 * int64(opts.NumCtx) * int64(ggml.NumLayers()) * int64(ggml.NumEmbed()) * int64(ggml.NumHeadKv()) / int64(ggml.NumHead())
|
2023-10-12 17:36:23 +00:00
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
// this amount is the overhead + tensors in memory
|
|
|
|
// TODO: get this from the llama.cpp's graph calcluations instead of
|
2024-01-09 02:32:44 +00:00
|
|
|
// estimating it's 1/6 * kv_cache_size * num_gqa
|
|
|
|
requiredAlloc := int64(ggml.NumGQA()) * requiredKv / 6
|
2023-10-12 17:36:23 +00:00
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
requiredTotal := requiredModel + requiredKv + requiredAlloc
|
|
|
|
|
|
|
|
log.Println("system memory bytes:", available)
|
|
|
|
log.Println("required model bytes:", requiredModel)
|
|
|
|
log.Println("required kv bytes:", requiredKv)
|
|
|
|
log.Println("required alloc bytes:", requiredAlloc)
|
|
|
|
log.Println("required total bytes:", requiredTotal)
|
|
|
|
|
|
|
|
info := gpu.GetGPUInfo()
|
|
|
|
library := info.Library
|
|
|
|
|
|
|
|
if opts.NumGPU == -1 {
|
|
|
|
// default to offloading all layers
|
|
|
|
opts.NumGPU = int(ggml.NumLayers()) + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// decide how many layers to put on the GPU
|
|
|
|
if opts.NumGPU > 0 {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "darwin":
|
|
|
|
if requiredTotal > available {
|
|
|
|
log.Println("not enough vram available, falling back to CPU only")
|
|
|
|
opts.NumGPU = 0
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if library == "cpu" || library == "default" {
|
|
|
|
opts.NumGPU = 0
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// no offloading required
|
|
|
|
if requiredTotal <= available {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2024-01-09 05:44:34 +00:00
|
|
|
// requiredAlloc is always loaded for the CUDA runner, so don't load it if it won't fit
|
|
|
|
if requiredAlloc > available {
|
2024-01-08 21:42:00 +00:00
|
|
|
log.Printf("not enough vram available, falling back to CPU only")
|
|
|
|
library = "cpu"
|
|
|
|
opts.NumGPU = 0
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
available -= requiredAlloc
|
|
|
|
|
|
|
|
// fill remaining vram with layers
|
|
|
|
log.Println("splitting", available, "of available memory bytes into layers")
|
|
|
|
bytesPerLayer := int64((requiredModel + requiredKv) / int64(ggml.NumLayers()))
|
|
|
|
log.Println("bytes per layer:", bytesPerLayer)
|
|
|
|
layers := available / bytesPerLayer
|
2024-01-09 04:17:44 +00:00
|
|
|
log.Println("total required with split:", requiredAlloc+(layers*bytesPerLayer))
|
2024-01-08 21:42:00 +00:00
|
|
|
if layers < int64(opts.NumGPU) {
|
|
|
|
opts.NumGPU = int(layers)
|
|
|
|
}
|
2023-10-13 21:41:51 +00:00
|
|
|
}
|
2023-08-03 22:47:36 +00:00
|
|
|
}
|
|
|
|
|
2023-11-24 18:58:09 +00:00
|
|
|
opts.NumGQA = 0
|
|
|
|
opts.RopeFrequencyBase = 0.0
|
|
|
|
opts.RopeFrequencyScale = 0.0
|
2024-01-09 05:44:34 +00:00
|
|
|
return newLlmServer(library, model, adapters, projectors, opts)
|
2023-11-29 19:00:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Give any native cgo implementations an opportunity to initialize
|
|
|
|
func Init(workdir string) error {
|
|
|
|
return nativeInit(workdir)
|
2023-07-21 20:33:56 +00:00
|
|
|
}
|
2023-12-20 18:36:01 +00:00
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
func newLlmServer(library, model string, adapters, projectors []string, opts api.Options) (extServer, error) {
|
2023-12-20 18:36:01 +00:00
|
|
|
if _, libPresent := AvailableShims[library]; libPresent && library != "default" {
|
2024-01-08 21:42:00 +00:00
|
|
|
srv, err := newDynamicShimExtServer(AvailableShims[library], model, adapters, projectors, opts)
|
2023-12-20 18:36:01 +00:00
|
|
|
if err == nil {
|
|
|
|
return srv, nil
|
|
|
|
}
|
2024-01-04 16:41:41 +00:00
|
|
|
log.Printf("Failed to load dynamic library %s - falling back to CPU mode %s", library, err)
|
|
|
|
// TODO - update some state to indicate we were unable to load the GPU library for future "info" ux
|
2023-12-20 18:36:01 +00:00
|
|
|
}
|
|
|
|
|
2024-01-08 21:42:00 +00:00
|
|
|
return newDefaultExtServer(model, adapters, projectors, opts)
|
2023-12-20 18:36:01 +00:00
|
|
|
}
|