34b9db5afc
This change adds support for multiple concurrent requests, as well as loading multiple models by spawning multiple runners. The default settings are currently set at 1 concurrent request per model and only 1 loaded model at a time, but these can be adjusted by setting OLLAMA_NUM_PARALLEL and OLLAMA_MAX_LOADED_MODELS.
28 lines
600 B
Go
28 lines
600 B
Go
//go:build integration
|
|
|
|
package integration
|
|
|
|
import (
|
|
"context"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/ollama/ollama/api"
|
|
)
|
|
|
|
func TestContextExhaustion(t *testing.T) {
|
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) // TODO maybe shorter?
|
|
defer cancel()
|
|
// Set up the test data
|
|
req := api.GenerateRequest{
|
|
Model: "llama2",
|
|
Prompt: "Write me a story with a ton of emojis?",
|
|
Stream: &stream,
|
|
Options: map[string]interface{}{
|
|
"temperature": 0,
|
|
"seed": 123,
|
|
"num_ctx": 128,
|
|
},
|
|
}
|
|
GenerateTestHelper(ctx, t, req, []string{"once", "upon", "lived"})
|
|
}
|