4340f8eba4
We can have the same examples as e.g. https://github.com/ollama/ollama-python/tree/main/examples here. Using consistent naming and renaming the existing example to have -http- since it uses direct HTTP requests rather than api/ Updates #2840
40 lines
867 B
Go
40 lines
867 B
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
|
|
"github.com/jmorganca/ollama/api"
|
|
)
|
|
|
|
func main() {
|
|
client, err := api.ClientFromEnvironment()
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
|
|
// By default, GenerateRequest is streaming.
|
|
req := &api.GenerateRequest{
|
|
Model: "gemma",
|
|
Prompt: "how many planets are there?",
|
|
}
|
|
|
|
ctx := context.Background()
|
|
respFunc := func(resp api.GenerateResponse) error {
|
|
// Only print the response here; GenerateResponse has a number of other
|
|
// interesting fields you want to examine.
|
|
|
|
// In streaming mode, responses are partial so we call fmt.Print (and not
|
|
// Println) in order to avoid spurious newlines being introduced. The
|
|
// model will insert its own newlines if it wants.
|
|
fmt.Print(resp.Response)
|
|
return nil
|
|
}
|
|
|
|
err = client.Generate(ctx, req, respFunc)
|
|
if err != nil {
|
|
log.Fatal(err)
|
|
}
|
|
fmt.Println()
|
|
}
|