ollama/server/routes.go

217 lines
4.9 KiB
Go
Raw Normal View History

package server
import (
2023-07-06 18:33:29 +00:00
"embed"
2023-07-06 17:40:11 +00:00
"encoding/json"
"fmt"
"io"
"log"
2023-07-07 00:03:00 +00:00
"math"
"net"
"net/http"
2023-07-06 17:40:11 +00:00
"path"
"runtime"
2023-07-06 17:40:11 +00:00
"strings"
"text/template"
"github.com/gin-gonic/gin"
2023-07-06 17:40:11 +00:00
"github.com/lithammer/fuzzysearch/fuzzy"
2023-07-03 20:32:48 +00:00
"github.com/jmorganca/ollama/api"
2023-07-06 17:40:11 +00:00
"github.com/jmorganca/ollama/llama"
)
2023-07-06 18:33:29 +00:00
//go:embed templates/*
var templatesFS embed.FS
var templates = template.Must(template.ParseFS(templatesFS, "templates/*.prompt"))
2023-07-06 17:40:11 +00:00
2023-07-05 19:37:33 +00:00
func generate(c *gin.Context) {
var req api.GenerateRequest
2023-07-07 00:09:48 +00:00
req.ModelOptions = api.DefaultModelOptions
req.PredictOptions = api.DefaultPredictOptions
2023-07-05 19:37:33 +00:00
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
2023-07-06 22:43:04 +00:00
if remoteModel, _ := getRemote(req.Model); remoteModel != nil {
req.Model = remoteModel.FullName()
}
2023-07-07 00:09:48 +00:00
modelOpts := getModelOpts(req)
modelOpts.NGPULayers = 1 // hard-code this for now
model, err := llama.New(req.Model, modelOpts)
2023-07-05 19:37:33 +00:00
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
return
}
2023-07-06 18:14:32 +00:00
defer model.Free()
2023-07-06 17:40:11 +00:00
templateNames := make([]string, 0, len(templates.Templates()))
for _, template := range templates.Templates() {
templateNames = append(templateNames, template.Name())
}
2023-07-07 00:03:00 +00:00
match, _ := matchRankOne(path.Base(req.Model), templateNames)
2023-07-06 17:40:11 +00:00
if template := templates.Lookup(match); template != nil {
var sb strings.Builder
if err := template.Execute(&sb, req); err != nil {
fmt.Println("Prompt template failed:", err.Error())
return
}
req.Prompt = sb.String()
}
2023-07-05 19:37:33 +00:00
ch := make(chan string)
2023-07-07 00:09:48 +00:00
model.SetTokenCallback(func(token string) bool {
ch <- token
return true
})
predictOpts := getPredictOpts(req)
2023-07-04 04:47:00 +00:00
2023-07-05 19:37:33 +00:00
go func() {
defer close(ch)
2023-07-07 00:09:48 +00:00
_, err := model.Predict(req.Prompt, predictOpts)
2023-07-04 04:47:00 +00:00
if err != nil {
2023-07-05 19:37:33 +00:00
panic(err)
2023-07-04 04:47:00 +00:00
}
2023-07-05 19:37:33 +00:00
}()
2023-07-04 04:47:00 +00:00
2023-07-05 19:37:33 +00:00
c.Stream(func(w io.Writer) bool {
2023-07-06 17:40:11 +00:00
token, ok := <-ch
2023-07-05 19:37:33 +00:00
if !ok {
return false
}
2023-07-06 17:40:11 +00:00
2023-07-06 19:27:50 +00:00
resp := api.GenerateResponse{
Response: token,
2023-07-06 17:40:11 +00:00
}
bts, err := json.Marshal(resp)
if err != nil {
return false
}
bts = append(bts, '\n')
if _, err := w.Write(bts); err != nil {
return false
}
2023-07-05 19:37:33 +00:00
return true
})
2023-07-05 19:37:33 +00:00
}
func Serve(ln net.Listener) error {
r := gin.Default()
2023-07-06 16:24:49 +00:00
r.POST("api/pull", func(c *gin.Context) {
var req api.PullRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
2023-07-06 18:18:40 +00:00
progressCh := make(chan api.PullProgress)
2023-07-06 16:24:49 +00:00
go func() {
defer close(progressCh)
if err := pull(req.Model, progressCh); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
}()
c.Stream(func(w io.Writer) bool {
progress, ok := <-progressCh
if !ok {
return false
}
2023-07-06 19:30:36 +00:00
bts, err := json.Marshal(progress)
if err != nil {
return false
}
bts = append(bts, '\n')
if _, err := w.Write(bts); err != nil {
return false
}
2023-07-06 16:24:49 +00:00
return true
})
})
2023-07-05 19:37:33 +00:00
r.POST("/api/generate", generate)
log.Printf("Listening on %s", ln.Addr())
s := &http.Server{
Handler: r,
}
return s.Serve(ln)
}
2023-07-06 17:40:11 +00:00
func matchRankOne(source string, targets []string) (bestMatch string, bestRank int) {
2023-07-07 00:03:00 +00:00
bestRank = math.MaxInt
2023-07-06 17:40:11 +00:00
for _, target := range targets {
2023-07-07 00:03:00 +00:00
if rank := fuzzy.LevenshteinDistance(source, target); bestRank > rank {
2023-07-06 17:40:11 +00:00
bestRank = rank
bestMatch = target
}
}
return
}
2023-07-07 00:09:48 +00:00
func getModelOpts(req api.GenerateRequest) llama.ModelOptions {
var opts llama.ModelOptions
opts.ContextSize = req.ModelOptions.ContextSize
opts.Seed = req.ModelOptions.Seed
opts.F16Memory = req.ModelOptions.F16Memory
opts.MLock = req.ModelOptions.MLock
opts.Embeddings = req.ModelOptions.Embeddings
opts.MMap = req.ModelOptions.MMap
opts.LowVRAM = req.ModelOptions.LowVRAM
opts.NBatch = req.ModelOptions.NBatch
opts.VocabOnly = req.ModelOptions.VocabOnly
opts.NUMA = req.ModelOptions.NUMA
opts.NGPULayers = req.ModelOptions.NGPULayers
opts.MainGPU = req.ModelOptions.MainGPU
opts.TensorSplit = req.ModelOptions.TensorSplit
return opts
}
func getPredictOpts(req api.GenerateRequest) llama.PredictOptions {
var opts llama.PredictOptions
if req.PredictOptions.Threads == -1 {
opts.Threads = runtime.NumCPU()
} else {
opts.Threads = req.PredictOptions.Threads
}
opts.Seed = req.PredictOptions.Seed
opts.Tokens = req.PredictOptions.Tokens
opts.Penalty = req.PredictOptions.Penalty
opts.Repeat = req.PredictOptions.Repeat
opts.Batch = req.PredictOptions.Batch
opts.NKeep = req.PredictOptions.NKeep
opts.TopK = req.PredictOptions.TopK
opts.TopP = req.PredictOptions.TopP
opts.TailFreeSamplingZ = req.PredictOptions.TailFreeSamplingZ
opts.TypicalP = req.PredictOptions.TypicalP
opts.Temperature = req.PredictOptions.Temperature
opts.FrequencyPenalty = req.PredictOptions.FrequencyPenalty
opts.PresencePenalty = req.PredictOptions.PresencePenalty
opts.Mirostat = req.PredictOptions.Mirostat
opts.MirostatTAU = req.PredictOptions.MirostatTAU
opts.MirostatETA = req.PredictOptions.MirostatETA
opts.MMap = req.PredictOptions.MMap
return opts
}