2023-08-30 20:35:03 +00:00
|
|
|
package llm
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2023-11-29 19:00:37 +00:00
|
|
|
_ "embed"
|
2023-08-30 20:35:03 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"os/exec"
|
2023-10-11 16:32:13 +00:00
|
|
|
"sync"
|
2023-08-30 20:35:03 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/jmorganca/ollama/api"
|
2023-10-13 21:45:50 +00:00
|
|
|
"github.com/jmorganca/ollama/format"
|
2023-08-30 20:35:03 +00:00
|
|
|
)
|
|
|
|
|
2023-11-10 00:44:02 +00:00
|
|
|
const jsonGrammar = `
|
|
|
|
root ::= object
|
|
|
|
value ::= object | array | string | number | ("true" | "false" | "null") ws
|
|
|
|
|
|
|
|
object ::=
|
|
|
|
"{" ws (
|
|
|
|
string ":" ws value
|
|
|
|
("," ws string ":" ws value)*
|
|
|
|
)? "}" ws
|
|
|
|
|
|
|
|
array ::=
|
|
|
|
"[" ws (
|
|
|
|
value
|
|
|
|
("," ws value)*
|
|
|
|
)? "]" ws
|
|
|
|
|
|
|
|
string ::=
|
|
|
|
"\"" (
|
|
|
|
[^"\\] |
|
|
|
|
"\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) # escapes
|
|
|
|
)* "\"" ws
|
|
|
|
|
|
|
|
number ::= ("-"? ([0-9] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws
|
|
|
|
|
|
|
|
# Optional space: by convention, applied in this grammar after literal chars when allowed
|
|
|
|
ws ::= ([ \t\n] ws)?
|
|
|
|
`
|
|
|
|
|
2023-08-30 20:35:03 +00:00
|
|
|
type llamaModel struct {
|
|
|
|
hyperparameters llamaHyperparameters
|
|
|
|
}
|
|
|
|
|
2023-09-12 17:01:20 +00:00
|
|
|
func (llm *llamaModel) ModelFamily() string {
|
|
|
|
return "llama"
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
2023-09-12 17:01:20 +00:00
|
|
|
func llamaModelType(numLayer uint32) string {
|
|
|
|
switch numLayer {
|
2023-08-30 20:35:03 +00:00
|
|
|
case 26:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "3B"
|
2023-08-30 20:35:03 +00:00
|
|
|
case 32:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "7B"
|
2023-08-30 20:35:03 +00:00
|
|
|
case 40:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "13B"
|
2023-08-30 20:35:03 +00:00
|
|
|
case 48:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "34B"
|
2023-08-30 20:35:03 +00:00
|
|
|
case 60:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "30B"
|
2023-08-30 20:35:03 +00:00
|
|
|
case 80:
|
2023-09-12 17:01:20 +00:00
|
|
|
return "65B"
|
|
|
|
default:
|
2023-10-03 02:52:25 +00:00
|
|
|
return "unknown"
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
2023-09-12 17:01:20 +00:00
|
|
|
}
|
2023-08-30 20:35:03 +00:00
|
|
|
|
2023-09-12 17:01:20 +00:00
|
|
|
func (llm *llamaModel) ModelType() string {
|
|
|
|
return llamaModelType(llm.hyperparameters.NumLayer)
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
2023-09-12 17:01:20 +00:00
|
|
|
func (llm *llamaModel) FileType() string {
|
|
|
|
return fileType(llm.hyperparameters.FileType)
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
2023-09-25 22:36:46 +00:00
|
|
|
func (llm *llamaModel) NumLayers() int64 {
|
|
|
|
return int64(llm.hyperparameters.NumLayer)
|
|
|
|
}
|
|
|
|
|
2023-08-30 20:35:03 +00:00
|
|
|
type llamaHyperparameters struct {
|
|
|
|
// NumVocab is the size of the model's vocabulary.
|
|
|
|
NumVocab uint32
|
|
|
|
|
|
|
|
// NumEmbd is the size of the model's embedding layer.
|
|
|
|
NumEmbd uint32
|
|
|
|
NumMult uint32
|
|
|
|
NumHead uint32
|
|
|
|
|
|
|
|
// NumLayer is the number of layers in the model.
|
|
|
|
NumLayer uint32
|
|
|
|
NumRot uint32
|
|
|
|
|
|
|
|
// FileType describes the quantization level of the model, e.g. Q4_0, Q5_K, etc.
|
2023-09-12 17:01:20 +00:00
|
|
|
FileType uint32
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Running struct {
|
2023-10-18 19:36:56 +00:00
|
|
|
Port int
|
|
|
|
Cmd *exec.Cmd
|
|
|
|
Cancel context.CancelFunc
|
|
|
|
exitOnce sync.Once
|
|
|
|
exitCh chan error // channel to receive the exit status of the subprocess
|
|
|
|
*StatusWriter // captures error messages from the llama runner process
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
2023-12-11 21:56:22 +00:00
|
|
|
type ImageData struct {
|
|
|
|
Data []byte `json:"data"`
|
|
|
|
ID int `json:"id"`
|
|
|
|
}
|
|
|
|
|
2023-11-09 21:16:16 +00:00
|
|
|
var (
|
2023-11-19 02:24:59 +00:00
|
|
|
errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
|
2023-11-09 21:16:16 +00:00
|
|
|
errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
|
2023-12-20 18:36:01 +00:00
|
|
|
payloadMissing = fmt.Errorf("expected dynamic library payloads not included in this build of ollama")
|
2023-11-09 21:16:16 +00:00
|
|
|
)
|
2023-09-12 15:04:35 +00:00
|
|
|
|
2023-10-12 15:16:37 +00:00
|
|
|
// StatusWriter is a writer that captures error messages from the llama runner process
|
|
|
|
type StatusWriter struct {
|
2023-10-18 19:36:56 +00:00
|
|
|
ErrCh chan error
|
|
|
|
LastErrMsg string
|
2023-10-12 15:16:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func NewStatusWriter() *StatusWriter {
|
|
|
|
return &StatusWriter{
|
|
|
|
ErrCh: make(chan error, 1),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w *StatusWriter) Write(b []byte) (int, error) {
|
2023-10-18 19:36:56 +00:00
|
|
|
var errMsg string
|
2023-10-12 15:16:37 +00:00
|
|
|
if _, after, ok := bytes.Cut(b, []byte("error:")); ok {
|
2023-10-18 19:36:56 +00:00
|
|
|
errMsg = string(bytes.TrimSpace(after))
|
|
|
|
} else if _, after, ok := bytes.Cut(b, []byte("CUDA error")); ok {
|
|
|
|
errMsg = string(bytes.TrimSpace(after))
|
2023-10-12 15:16:37 +00:00
|
|
|
}
|
2023-10-18 19:36:56 +00:00
|
|
|
|
|
|
|
if errMsg != "" {
|
|
|
|
w.LastErrMsg = errMsg
|
|
|
|
w.ErrCh <- fmt.Errorf("llama runner: %s", errMsg)
|
|
|
|
}
|
|
|
|
|
2023-10-12 15:16:37 +00:00
|
|
|
return os.Stderr.Write(b)
|
|
|
|
}
|
|
|
|
|
2023-10-16 23:31:29 +00:00
|
|
|
type prediction struct {
|
2023-09-03 21:46:35 +00:00
|
|
|
Content string `json:"content"`
|
|
|
|
Model string `json:"model"`
|
|
|
|
Prompt string `json:"prompt"`
|
|
|
|
Stop bool `json:"stop"`
|
|
|
|
|
2023-10-16 23:31:29 +00:00
|
|
|
Timings struct {
|
|
|
|
PredictedN int `json:"predicted_n"`
|
|
|
|
PredictedMS float64 `json:"predicted_ms"`
|
|
|
|
PromptN int `json:"prompt_n"`
|
|
|
|
PromptMS float64 `json:"prompt_ms"`
|
|
|
|
}
|
2023-08-30 20:35:03 +00:00
|
|
|
}
|
|
|
|
|
2023-10-12 16:34:16 +00:00
|
|
|
const maxBufferSize = 512 * format.KiloByte
|
2023-11-29 19:00:37 +00:00
|
|
|
const maxRetries = 3
|
|
|
|
const retryDelay = 1 * time.Second
|
2023-10-04 18:09:00 +00:00
|
|
|
|
2023-12-05 19:57:33 +00:00
|
|
|
type PredictOpts struct {
|
2024-01-03 17:01:42 +00:00
|
|
|
Prompt string
|
|
|
|
Format string
|
|
|
|
Images []api.ImageData
|
|
|
|
Options api.Options
|
2023-12-05 19:57:33 +00:00
|
|
|
}
|
2023-12-05 05:16:27 +00:00
|
|
|
|
2023-12-05 19:57:33 +00:00
|
|
|
type PredictResult struct {
|
|
|
|
Content string
|
|
|
|
Done bool
|
|
|
|
PromptEvalCount int
|
|
|
|
PromptEvalDuration time.Duration
|
|
|
|
EvalCount int
|
|
|
|
EvalDuration time.Duration
|
|
|
|
}
|
2023-09-03 18:10:03 +00:00
|
|
|
|
2023-08-30 20:35:03 +00:00
|
|
|
type TokenizeRequest struct {
|
|
|
|
Content string `json:"content"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type TokenizeResponse struct {
|
|
|
|
Tokens []int `json:"tokens"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type DetokenizeRequest struct {
|
|
|
|
Tokens []int `json:"tokens"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type DetokenizeResponse struct {
|
|
|
|
Content string `json:"content"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type EmbeddingRequest struct {
|
|
|
|
Content string `json:"content"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type EmbeddingResponse struct {
|
|
|
|
Embedding []float64 `json:"embedding"`
|
|
|
|
}
|