ollama/llama/llama.go
2023-07-09 10:25:07 -04:00

215 lines
7 KiB
Go

// MIT License
// Copyright (c) 2023 go-skynet authors
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package llama
// #cgo LDFLAGS: -Lbuild -lbinding -lllama -lm -lggml_static -lstdc++
// #cgo CXXFLAGS: -std=c++11
// #cgo darwin LDFLAGS: -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
// #include "binding/binding.h"
// #include <stdlib.h>
import "C"
import (
"fmt"
"strings"
"sync"
"unsafe"
)
type LLama struct {
ctx unsafe.Pointer
embeddings bool
contextSize int
}
func New(model string, mo ModelOptions) (*LLama, error) {
modelPath := C.CString(model)
defer C.free(unsafe.Pointer(modelPath))
ctx := C.load_model(modelPath, C.int(mo.ContextSize), C.int(mo.Seed), C.bool(mo.F16Memory), C.bool(mo.MLock), C.bool(mo.Embeddings), C.bool(mo.MMap), C.bool(mo.LowVRAM), C.bool(mo.VocabOnly), C.int(mo.NGPULayers), C.int(mo.NBatch), C.CString(mo.MainGPU), C.CString(mo.TensorSplit), C.bool(mo.NUMA))
if ctx == nil {
return nil, fmt.Errorf("failed loading model")
}
ll := &LLama{ctx: ctx, contextSize: mo.ContextSize, embeddings: mo.Embeddings}
return ll, nil
}
func (l *LLama) Free() {
C.llama_binding_free_model(l.ctx)
}
func (l *LLama) Eval(text string, po PredictOptions) error {
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
defer C.free(unsafe.Pointer(input))
reverseCount := len(po.StopPrompts)
reversePrompt := make([]*C.char, reverseCount)
var pass **C.char
for i, s := range po.StopPrompts {
cs := C.CString(s)
reversePrompt[i] = cs
pass = &reversePrompt[0]
defer C.free(unsafe.Pointer(cs))
}
cLogitBias := C.CString(po.LogitBias)
defer C.free(unsafe.Pointer(cLogitBias))
cMainGPU := C.CString(po.MainGPU)
defer C.free(unsafe.Pointer(cMainGPU))
cTensorSplit := C.CString(po.TensorSplit)
defer C.free(unsafe.Pointer(cTensorSplit))
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
)
defer C.llama_free_params(params)
ret := C.eval(params, l.ctx, input)
if ret != 0 {
return fmt.Errorf("inference failed")
}
return nil
}
func (l *LLama) Predict(text string, po PredictOptions) (string, error) {
if po.TokenCallback != nil {
setCallback(l.ctx, po.TokenCallback)
}
input := C.CString(text)
if po.Tokens == 0 {
po.Tokens = 99999999
}
defer C.free(unsafe.Pointer(input))
out := make([]byte, po.Tokens)
reverseCount := len(po.StopPrompts)
reversePrompt := make([]*C.char, reverseCount)
var pass **C.char
for i, s := range po.StopPrompts {
cs := C.CString(s)
reversePrompt[i] = cs
pass = &reversePrompt[0]
defer C.free(unsafe.Pointer(cs))
}
cLogitBias := C.CString(po.LogitBias)
defer C.free(unsafe.Pointer(cLogitBias))
cMainGPU := C.CString(po.MainGPU)
defer C.free(unsafe.Pointer(cMainGPU))
cTensorSplit := C.CString(po.TensorSplit)
defer C.free(unsafe.Pointer(cTensorSplit))
params := C.llama_allocate_params(input, C.int(po.Seed), C.int(po.Threads), C.int(po.Tokens), C.int(po.TopK),
C.float(po.TopP), C.float(po.Temperature), C.float(po.Penalty), C.int(po.Repeat),
C.bool(po.IgnoreEOS), C.bool(po.F16KV),
C.int(po.Batch), C.int(po.NKeep), pass, C.int(reverseCount),
C.float(po.TailFreeSamplingZ), C.float(po.TypicalP), C.float(po.FrequencyPenalty), C.float(po.PresencePenalty),
C.int(po.Mirostat), C.float(po.MirostatETA), C.float(po.MirostatTAU), C.bool(po.PenalizeNL), cLogitBias,
C.bool(po.MLock), C.bool(po.MMap), cMainGPU, cTensorSplit,
)
defer C.llama_free_params(params)
ret := C.llama_predict(params, l.ctx, (*C.char)(unsafe.Pointer(&out[0])), C.bool(po.DebugMode))
if ret != 0 {
return "", fmt.Errorf("inference failed")
}
res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
res = strings.TrimPrefix(res, " ")
res = strings.TrimPrefix(res, text)
res = strings.TrimPrefix(res, "\n")
for _, s := range po.StopPrompts {
res = strings.TrimRight(res, s)
}
if po.TokenCallback != nil {
setCallback(l.ctx, nil)
}
return res, nil
}
// CGo only allows us to use static calls from C to Go, we can't just dynamically pass in func's.
// This is the next best thing, we register the callbacks in this map and call tokenCallback from
// the C code. We also attach a finalizer to LLama, so it will unregister the callback when the
// garbage collection frees it.
// SetTokenCallback registers a callback for the individual tokens created when running Predict. It
// will be called once for each token. The callback shall return true as long as the model should
// continue predicting the next token. When the callback returns false the predictor will return.
// The tokens are just converted into Go strings, they are not trimmed or otherwise changed. Also
// the tokens may not be valid UTF-8.
// Pass in nil to remove a callback.
//
// It is save to call this method while a prediction is running.
func (l *LLama) SetTokenCallback(callback func(token string) bool) {
setCallback(l.ctx, callback)
}
var (
m sync.Mutex
callbacks = map[uintptr]func(string) bool{}
)
//export tokenCallback
func tokenCallback(statePtr unsafe.Pointer, token *C.char) bool {
m.Lock()
defer m.Unlock()
if callback, ok := callbacks[uintptr(statePtr)]; ok {
return callback(C.GoString(token))
}
return true
}
// setCallback can be used to register a token callback for LLama. Pass in a nil callback to
// remove the callback.
func setCallback(statePtr unsafe.Pointer, callback func(string) bool) {
m.Lock()
defer m.Unlock()
if callback == nil {
delete(callbacks, uintptr(statePtr))
} else {
callbacks[uintptr(statePtr)] = callback
}
}