ollama/server/routes.go

82 lines
1.5 KiB
Go
Raw Normal View History

package server
import (
"fmt"
"io"
"log"
"net"
"net/http"
"runtime"
"github.com/gin-gonic/gin"
2023-07-03 20:32:48 +00:00
llama "github.com/jmorganca/ollama/llama"
2023-07-03 20:32:48 +00:00
"github.com/jmorganca/ollama/api"
)
2023-07-05 19:37:33 +00:00
func pull(c *gin.Context) {
// TODO
2023-07-05 19:37:33 +00:00
c.JSON(http.StatusOK, gin.H{"message": "ok"})
}
func generate(c *gin.Context) {
2023-07-04 04:47:00 +00:00
// TODO: these should be request parameters
gpulayers := 0
tokens := 512
threads := runtime.NumCPU()
2023-07-05 19:37:33 +00:00
// TODO: set prompt from template
fmt.Println("Generating text...")
2023-07-05 19:37:33 +00:00
var req api.GenerateRequest
if err := c.ShouldBindJSON(&req); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"message": err.Error()})
return
}
2023-07-05 19:37:33 +00:00
fmt.Println(req)
l, err := llama.New(req.Model, llama.EnableF16Memory, llama.SetContext(128), llama.EnableEmbeddings, llama.SetGPULayers(gpulayers))
if err != nil {
fmt.Println("Loading the model failed:", err.Error())
return
}
2023-07-05 19:37:33 +00:00
ch := make(chan string)
2023-07-04 04:47:00 +00:00
2023-07-05 19:37:33 +00:00
go func() {
defer close(ch)
_, err := l.Predict(req.Prompt, llama.Debug, llama.SetTokenCallback(func(token string) bool {
ch <- token
return true
}), llama.SetTokens(tokens), llama.SetThreads(threads), llama.SetTopK(90), llama.SetTopP(0.86), llama.SetStopWords("llama"))
2023-07-04 04:47:00 +00:00
if err != nil {
2023-07-05 19:37:33 +00:00
panic(err)
2023-07-04 04:47:00 +00:00
}
2023-07-05 19:37:33 +00:00
}()
2023-07-04 04:47:00 +00:00
2023-07-05 19:37:33 +00:00
c.Stream(func(w io.Writer) bool {
tok, ok := <-ch
if !ok {
return false
}
c.SSEvent("token", tok)
return true
})
2023-07-05 19:37:33 +00:00
}
func Serve(ln net.Listener) error {
r := gin.Default()
r.POST("api/pull", pull)
r.POST("/api/generate", generate)
log.Printf("Listening on %s", ln.Addr())
s := &http.Server{
Handler: r,
}
return s.Serve(ln)
}