2024-03-30 16:50:05 +00:00
|
|
|
//go:build integration
|
|
|
|
|
|
|
|
package integration
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"log/slog"
|
|
|
|
"strconv"
|
|
|
|
"sync"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
2024-07-04 02:43:17 +00:00
|
|
|
|
|
|
|
"github.com/ollama/ollama/api"
|
|
|
|
"github.com/ollama/ollama/envconfig"
|
|
|
|
"github.com/ollama/ollama/format"
|
2024-03-30 16:50:05 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestMultiModelConcurrency(t *testing.T) {
|
|
|
|
var (
|
|
|
|
req = [2]api.GenerateRequest{
|
|
|
|
{
|
2024-05-31 21:28:02 +00:00
|
|
|
Model: "orca-mini",
|
|
|
|
Prompt: "why is the ocean blue?",
|
|
|
|
Stream: &stream,
|
|
|
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
2024-03-30 16:50:05 +00:00
|
|
|
Options: map[string]interface{}{
|
|
|
|
"seed": 42,
|
|
|
|
"temperature": 0.0,
|
|
|
|
},
|
|
|
|
}, {
|
2024-05-31 21:28:02 +00:00
|
|
|
Model: "tinydolphin",
|
|
|
|
Prompt: "what is the origin of the us thanksgiving holiday?",
|
|
|
|
Stream: &stream,
|
|
|
|
KeepAlive: &api.Duration{Duration: 10 * time.Second},
|
2024-03-30 16:50:05 +00:00
|
|
|
Options: map[string]interface{}{
|
|
|
|
"seed": 42,
|
|
|
|
"temperature": 0.0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
resp = [2][]string{
|
|
|
|
[]string{"sunlight"},
|
2024-05-18 19:34:31 +00:00
|
|
|
[]string{"england", "english", "massachusetts", "pilgrims", "british"},
|
2024-03-30 16:50:05 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(req))
|
2024-05-31 21:28:02 +00:00
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*240)
|
2024-03-30 16:50:05 +00:00
|
|
|
defer cancel()
|
2024-05-23 20:12:14 +00:00
|
|
|
|
|
|
|
client, _, cleanup := InitServerConnection(ctx, t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
for i := 0; i < len(req); i++ {
|
|
|
|
require.NoError(t, PullIfMissing(ctx, client, req[i].Model))
|
|
|
|
}
|
|
|
|
|
2024-03-30 16:50:05 +00:00
|
|
|
for i := 0; i < len(req); i++ {
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
2024-05-31 21:28:02 +00:00
|
|
|
DoGenerate(ctx, t, client, req[i], resp[i], 60*time.Second, 10*time.Second)
|
2024-03-30 16:50:05 +00:00
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIntegrationConcurrentPredictOrcaMini(t *testing.T) {
|
2024-05-31 21:28:02 +00:00
|
|
|
req, resp := GenerateRequests()
|
|
|
|
reqLimit := len(req)
|
|
|
|
iterLimit := 5
|
|
|
|
|
2024-07-22 16:08:11 +00:00
|
|
|
vram := os.Getenv("OLLAMA_MAX_VRAM") // TODO - discover actual VRAM
|
2024-05-31 21:28:02 +00:00
|
|
|
if vram != "" {
|
|
|
|
max, err := strconv.ParseUint(vram, 10, 64)
|
|
|
|
require.NoError(t, err)
|
|
|
|
// Don't hammer on small VRAM cards...
|
|
|
|
if max < 4*1024*1024*1024 {
|
|
|
|
reqLimit = min(reqLimit, 2)
|
|
|
|
iterLimit = 2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 9*time.Minute)
|
2024-03-30 16:50:05 +00:00
|
|
|
defer cancel()
|
|
|
|
client, _, cleanup := InitServerConnection(ctx, t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Get the server running (if applicable) warm the model up with a single initial request
|
2024-05-31 21:28:02 +00:00
|
|
|
DoGenerate(ctx, t, client, req[0], resp[0], 60*time.Second, 10*time.Second)
|
2024-03-30 16:50:05 +00:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
2024-05-31 21:28:02 +00:00
|
|
|
wg.Add(reqLimit)
|
|
|
|
for i := 0; i < reqLimit; i++ {
|
2024-03-30 16:50:05 +00:00
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
2024-05-31 21:28:02 +00:00
|
|
|
for j := 0; j < iterLimit; j++ {
|
2024-03-30 16:50:05 +00:00
|
|
|
slog.Info("Starting", "req", i, "iter", j)
|
2024-05-31 21:28:02 +00:00
|
|
|
// On slower GPUs it can take a while to process the concurrent requests
|
2024-03-30 16:50:05 +00:00
|
|
|
// so we allow a much longer initial timeout
|
2024-05-31 21:28:02 +00:00
|
|
|
DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 20*time.Second)
|
2024-03-30 16:50:05 +00:00
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stress the system if we know how much VRAM it has, and attempt to load more models than will fit
|
|
|
|
func TestMultiModelStress(t *testing.T) {
|
2024-07-04 02:43:17 +00:00
|
|
|
s := os.Getenv("OLLAMA_MAX_VRAM") // TODO - discover actual VRAM
|
|
|
|
if s == "" {
|
2024-03-30 16:50:05 +00:00
|
|
|
t.Skip("OLLAMA_MAX_VRAM not specified, can't pick the right models for the stress test")
|
|
|
|
}
|
2024-07-04 02:43:17 +00:00
|
|
|
|
|
|
|
maxVram, err := strconv.ParseUint(s, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2024-03-30 16:50:05 +00:00
|
|
|
type model struct {
|
|
|
|
name string
|
|
|
|
size uint64 // Approximate amount of VRAM they typically use when fully loaded in VRAM
|
|
|
|
}
|
|
|
|
|
|
|
|
smallModels := []model{
|
|
|
|
{
|
|
|
|
name: "orca-mini",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 2992 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "phi",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 2616 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "gemma:2b",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 2364 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "stable-code:3b",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 2608 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "starcoder2:3b",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 2166 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
mediumModels := []model{
|
|
|
|
{
|
|
|
|
name: "llama2",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 5118 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "mistral",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 4620 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "orca-mini:7b",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 5118 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "dolphin-mistral",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 4620 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "gemma:7b",
|
2024-07-04 02:43:17 +00:00
|
|
|
size: 5000 * format.MebiByte,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "codellama:7b",
|
|
|
|
size: 5118 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// These seem to be too slow to be useful...
|
|
|
|
// largeModels := []model{
|
|
|
|
// {
|
|
|
|
// name: "llama2:13b",
|
2024-07-04 02:43:17 +00:00
|
|
|
// size: 7400 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
// },
|
|
|
|
// {
|
|
|
|
// name: "codellama:13b",
|
2024-07-04 02:43:17 +00:00
|
|
|
// size: 7400 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
// },
|
|
|
|
// {
|
|
|
|
// name: "orca-mini:13b",
|
2024-07-04 02:43:17 +00:00
|
|
|
// size: 7400 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
// },
|
|
|
|
// {
|
|
|
|
// name: "gemma:7b",
|
2024-07-04 02:43:17 +00:00
|
|
|
// size: 5000 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
// },
|
|
|
|
// {
|
|
|
|
// name: "starcoder2:15b",
|
2024-07-04 02:43:17 +00:00
|
|
|
// size: 9100 * format.MebiByte,
|
2024-03-30 16:50:05 +00:00
|
|
|
// },
|
|
|
|
// }
|
|
|
|
|
|
|
|
var chosenModels []model
|
|
|
|
switch {
|
2024-07-04 02:43:17 +00:00
|
|
|
case maxVram < 10000*format.MebiByte:
|
2024-03-30 16:50:05 +00:00
|
|
|
slog.Info("selecting small models")
|
|
|
|
chosenModels = smallModels
|
2024-07-04 02:43:17 +00:00
|
|
|
// case maxVram < 30000*format.MebiByte:
|
2024-03-30 16:50:05 +00:00
|
|
|
default:
|
|
|
|
slog.Info("selecting medium models")
|
|
|
|
chosenModels = mediumModels
|
|
|
|
// default:
|
|
|
|
// slog.Info("selecting large models")
|
|
|
|
// chosenModels = largModels
|
|
|
|
}
|
|
|
|
|
|
|
|
req, resp := GenerateRequests()
|
|
|
|
|
|
|
|
for i := range req {
|
|
|
|
if i > len(chosenModels) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
req[i].Model = chosenModels[i].name
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Minute) // TODO baseline -- 10m too short
|
|
|
|
defer cancel()
|
|
|
|
client, _, cleanup := InitServerConnection(ctx, t)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Make sure all the models are pulled before we get started
|
|
|
|
for _, r := range req {
|
|
|
|
require.NoError(t, PullIfMissing(ctx, client, r.Model))
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
2024-07-04 02:43:17 +00:00
|
|
|
consumed := uint64(256 * format.MebiByte) // Assume some baseline usage
|
2024-03-30 16:50:05 +00:00
|
|
|
for i := 0; i < len(req); i++ {
|
|
|
|
// Always get at least 2 models, but dont' overshoot VRAM too much or we'll take too long
|
2024-07-04 02:43:17 +00:00
|
|
|
if i > 1 && consumed > vram {
|
|
|
|
slog.Info("achieved target vram exhaustion", "count", i, "vram", format.HumanBytes2(vram), "models", format.HumanBytes2(consumed))
|
2024-03-30 16:50:05 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
consumed += chosenModels[i].size
|
2024-07-04 02:43:17 +00:00
|
|
|
slog.Info("target vram", "count", i, "vram", format.HumanBytes2(vram), "models", format.HumanBytes2(consumed))
|
2024-03-30 16:50:05 +00:00
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func(i int) {
|
|
|
|
defer wg.Done()
|
|
|
|
for j := 0; j < 3; j++ {
|
|
|
|
slog.Info("Starting", "req", i, "iter", j, "model", req[i].Model)
|
2024-05-10 21:13:26 +00:00
|
|
|
DoGenerate(ctx, t, client, req[i], resp[i], 120*time.Second, 5*time.Second)
|
2024-03-30 16:50:05 +00:00
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
2024-05-18 19:34:31 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
models, err := client.ListRunning(ctx)
|
|
|
|
if err != nil {
|
|
|
|
slog.Warn("failed to list running models", "error", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, m := range models.Models {
|
|
|
|
slog.Info("loaded model snapshot", "model", m)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2024-03-30 16:50:05 +00:00
|
|
|
wg.Wait()
|
|
|
|
}
|