Signed-off-by: baalajimaestro <me@baalajimaestro.me>
This commit is contained in:
commit
415d9f0f15
79 changed files with 1964 additions and 980 deletions
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
|
@ -304,6 +304,10 @@ jobs:
|
||||||
write-host "Installing plugin"
|
write-host "Installing plugin"
|
||||||
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
& "${env:RUNNER_TEMP}\plugin\*\kmscng.msi" /quiet
|
||||||
write-host "plugin installed"
|
write-host "plugin installed"
|
||||||
|
- name: remove unwanted mingw dll.a files
|
||||||
|
run: |
|
||||||
|
Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libpthread.dll.a"
|
||||||
|
Remove-Item "C:\mingw64\x86_64-w64-mingw32\lib\libwinpthread.dll.a"
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
2
.github/workflows/test.yaml
vendored
2
.github/workflows/test.yaml
vendored
|
@ -58,6 +58,7 @@ jobs:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
env:
|
env:
|
||||||
GOARCH: ${{ matrix.arch }}
|
GOARCH: ${{ matrix.arch }}
|
||||||
|
CGO_ENABLED: '1'
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
@ -79,6 +80,7 @@ jobs:
|
||||||
- run: go generate -x ./...
|
- run: go generate -x ./...
|
||||||
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
if: ${{ ! startsWith(matrix.os, 'windows-') }}
|
||||||
name: 'Unix Go Generate'
|
name: 'Unix Go Generate'
|
||||||
|
- run: go build .
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
name: ${{ matrix.os }}-${{ matrix.arch }}-libraries
|
||||||
|
|
90
api/types.go
90
api/types.go
|
@ -168,42 +168,11 @@ type Runner struct {
|
||||||
F16KV bool `json:"f16_kv,omitempty"`
|
F16KV bool `json:"f16_kv,omitempty"`
|
||||||
LogitsAll bool `json:"logits_all,omitempty"`
|
LogitsAll bool `json:"logits_all,omitempty"`
|
||||||
VocabOnly bool `json:"vocab_only,omitempty"`
|
VocabOnly bool `json:"vocab_only,omitempty"`
|
||||||
UseMMap TriState `json:"use_mmap,omitempty"`
|
UseMMap *bool `json:"use_mmap,omitempty"`
|
||||||
UseMLock bool `json:"use_mlock,omitempty"`
|
UseMLock bool `json:"use_mlock,omitempty"`
|
||||||
NumThread int `json:"num_thread,omitempty"`
|
NumThread int `json:"num_thread,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TriState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
TriStateUndefined TriState = -1
|
|
||||||
TriStateFalse TriState = 0
|
|
||||||
TriStateTrue TriState = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
func (b *TriState) UnmarshalJSON(data []byte) error {
|
|
||||||
var v bool
|
|
||||||
if err := json.Unmarshal(data, &v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if v {
|
|
||||||
*b = TriStateTrue
|
|
||||||
}
|
|
||||||
*b = TriStateFalse
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *TriState) MarshalJSON() ([]byte, error) {
|
|
||||||
if *b == TriStateUndefined {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
var v bool
|
|
||||||
if *b == TriStateTrue {
|
|
||||||
v = true
|
|
||||||
}
|
|
||||||
return json.Marshal(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EmbeddingRequest is the request passed to [Client.Embeddings].
|
// EmbeddingRequest is the request passed to [Client.Embeddings].
|
||||||
type EmbeddingRequest struct {
|
type EmbeddingRequest struct {
|
||||||
// Model is the model name.
|
// Model is the model name.
|
||||||
|
@ -345,6 +314,13 @@ type ProcessModelResponse struct {
|
||||||
SizeVRAM int64 `json:"size_vram"`
|
SizeVRAM int64 `json:"size_vram"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RetrieveModelResponse struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
OwnedBy string `json:"owned_by"`
|
||||||
|
}
|
||||||
|
|
||||||
type TokenResponse struct {
|
type TokenResponse struct {
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
}
|
}
|
||||||
|
@ -437,19 +413,6 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) {
|
|
||||||
val, ok := val.(bool)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("option %q must be of type boolean", key)
|
|
||||||
}
|
|
||||||
if val {
|
|
||||||
field.SetInt(int64(TriStateTrue))
|
|
||||||
} else {
|
|
||||||
field.SetInt(int64(TriStateFalse))
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch field.Kind() {
|
switch field.Kind() {
|
||||||
case reflect.Int:
|
case reflect.Int:
|
||||||
switch t := val.(type) {
|
switch t := val.(type) {
|
||||||
|
@ -496,6 +459,17 @@ func (opts *Options) FromMap(m map[string]interface{}) error {
|
||||||
slice[i] = str
|
slice[i] = str
|
||||||
}
|
}
|
||||||
field.Set(reflect.ValueOf(slice))
|
field.Set(reflect.ValueOf(slice))
|
||||||
|
case reflect.Pointer:
|
||||||
|
var b bool
|
||||||
|
if field.Type() == reflect.TypeOf(&b) {
|
||||||
|
val, ok := val.(bool)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("option %q must be of type boolean", key)
|
||||||
|
}
|
||||||
|
field.Set(reflect.ValueOf(&val))
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unknown type loading config params: %v %v", field.Kind(), field.Type())
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
return fmt.Errorf("unknown type loading config params: %v", field.Kind())
|
||||||
}
|
}
|
||||||
|
@ -538,7 +512,7 @@ func DefaultOptions() Options {
|
||||||
LowVRAM: false,
|
LowVRAM: false,
|
||||||
F16KV: true,
|
F16KV: true,
|
||||||
UseMLock: false,
|
UseMLock: false,
|
||||||
UseMMap: TriStateUndefined,
|
UseMMap: nil,
|
||||||
UseNUMA: false,
|
UseNUMA: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -608,19 +582,6 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
||||||
} else {
|
} else {
|
||||||
field := valueOpts.FieldByName(opt.Name)
|
field := valueOpts.FieldByName(opt.Name)
|
||||||
if field.IsValid() && field.CanSet() {
|
if field.IsValid() && field.CanSet() {
|
||||||
if reflect.PointerTo(field.Type()) == reflect.TypeOf((*TriState)(nil)) {
|
|
||||||
boolVal, err := strconv.ParseBool(vals[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid bool value %s", vals)
|
|
||||||
}
|
|
||||||
if boolVal {
|
|
||||||
out[key] = TriStateTrue
|
|
||||||
} else {
|
|
||||||
out[key] = TriStateFalse
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch field.Kind() {
|
switch field.Kind() {
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
floatVal, err := strconv.ParseFloat(vals[0], 32)
|
floatVal, err := strconv.ParseFloat(vals[0], 32)
|
||||||
|
@ -648,6 +609,17 @@ func FormatParams(params map[string][]string) (map[string]interface{}, error) {
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
// TODO: only string slices are supported right now
|
// TODO: only string slices are supported right now
|
||||||
out[key] = vals
|
out[key] = vals
|
||||||
|
case reflect.Pointer:
|
||||||
|
var b bool
|
||||||
|
if field.Type() == reflect.TypeOf(&b) {
|
||||||
|
boolVal, err := strconv.ParseBool(vals[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid bool value %s", vals)
|
||||||
|
}
|
||||||
|
out[key] = &boolVal
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
return nil, fmt.Errorf("unknown type %s for %s", field.Kind(), key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,25 +108,27 @@ func TestDurationMarshalUnmarshal(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUseMmapParsingFromJSON(t *testing.T) {
|
func TestUseMmapParsingFromJSON(t *testing.T) {
|
||||||
|
tr := true
|
||||||
|
fa := false
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
req string
|
req string
|
||||||
exp TriState
|
exp *bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Undefined",
|
name: "Undefined",
|
||||||
req: `{ }`,
|
req: `{ }`,
|
||||||
exp: TriStateUndefined,
|
exp: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "True",
|
name: "True",
|
||||||
req: `{ "use_mmap": true }`,
|
req: `{ "use_mmap": true }`,
|
||||||
exp: TriStateTrue,
|
exp: &tr,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "False",
|
name: "False",
|
||||||
req: `{ "use_mmap": false }`,
|
req: `{ "use_mmap": false }`,
|
||||||
exp: TriStateFalse,
|
exp: &fa,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,50 +146,52 @@ func TestUseMmapParsingFromJSON(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUseMmapFormatParams(t *testing.T) {
|
func TestUseMmapFormatParams(t *testing.T) {
|
||||||
|
tr := true
|
||||||
|
fa := false
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
req map[string][]string
|
req map[string][]string
|
||||||
exp TriState
|
exp *bool
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "True",
|
name: "True",
|
||||||
req: map[string][]string{
|
req: map[string][]string{
|
||||||
"use_mmap": []string{"true"},
|
"use_mmap": {"true"},
|
||||||
},
|
},
|
||||||
exp: TriStateTrue,
|
exp: &tr,
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "False",
|
name: "False",
|
||||||
req: map[string][]string{
|
req: map[string][]string{
|
||||||
"use_mmap": []string{"false"},
|
"use_mmap": {"false"},
|
||||||
},
|
},
|
||||||
exp: TriStateFalse,
|
exp: &fa,
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Numeric True",
|
name: "Numeric True",
|
||||||
req: map[string][]string{
|
req: map[string][]string{
|
||||||
"use_mmap": []string{"1"},
|
"use_mmap": {"1"},
|
||||||
},
|
},
|
||||||
exp: TriStateTrue,
|
exp: &tr,
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Numeric False",
|
name: "Numeric False",
|
||||||
req: map[string][]string{
|
req: map[string][]string{
|
||||||
"use_mmap": []string{"0"},
|
"use_mmap": {"0"},
|
||||||
},
|
},
|
||||||
exp: TriStateFalse,
|
exp: &fa,
|
||||||
err: nil,
|
err: nil,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "invalid string",
|
name: "invalid string",
|
||||||
req: map[string][]string{
|
req: map[string][]string{
|
||||||
"use_mmap": []string{"foo"},
|
"use_mmap": {"foo"},
|
||||||
},
|
},
|
||||||
exp: TriStateUndefined,
|
exp: nil,
|
||||||
err: fmt.Errorf("invalid bool value [foo]"),
|
err: fmt.Errorf("invalid bool value [foo]"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -195,11 +199,11 @@ func TestUseMmapFormatParams(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
resp, err := FormatParams(test.req)
|
resp, err := FormatParams(test.req)
|
||||||
require.Equal(t, err, test.err)
|
require.Equal(t, test.err, err)
|
||||||
respVal, ok := resp["use_mmap"]
|
respVal, ok := resp["use_mmap"]
|
||||||
if test.exp != TriStateUndefined {
|
if test.exp != nil {
|
||||||
assert.True(t, ok, "resp: %v", resp)
|
assert.True(t, ok, "resp: %v", resp)
|
||||||
assert.Equal(t, test.exp, respVal)
|
assert.Equal(t, *test.exp, *respVal.(*bool))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,7 +104,7 @@ like to use. For example, to compile an optimized binary for an Intel i9-9880H,
|
||||||
you might use:
|
you might use:
|
||||||
|
|
||||||
```
|
```
|
||||||
OLLAMA_CUSTOM_CPU_DEFS="-DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_F16C=on -DLLAMA_FMA=on" go generate ./...
|
OLLAMA_CUSTOM_CPU_DEFS="-DGGML_AVX=on -DGGML_AVX2=on -DGGML_F16C=on -DGGML_FMA=on" go generate ./...
|
||||||
go build .
|
go build .
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
16
docs/faq.md
16
docs/faq.md
|
@ -257,3 +257,19 @@ If you wish to override the `OLLAMA_KEEP_ALIVE` setting, use the `keep_alive` AP
|
||||||
## How do I manage the maximum number of requests the Ollama server can queue?
|
## How do I manage the maximum number of requests the Ollama server can queue?
|
||||||
|
|
||||||
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
If too many requests are sent to the server, it will respond with a 503 error indicating the server is overloaded. You can adjust how many requests may be queue by setting `OLLAMA_MAX_QUEUE`.
|
||||||
|
|
||||||
|
## How does Ollama handle concurrent requests?
|
||||||
|
|
||||||
|
Ollama supports two levels of concurrent processing. If your system has sufficient available memory (system memory when using CPU inference, or VRAM for GPU inference) then multiple models can be loaded at the same time. For a given model, if there is sufficient available memory when the model is loaded, it is configured to allow parallel request processing.
|
||||||
|
|
||||||
|
If there is insufficient available memory to load a new model request while one or more models are already loaded, all new requests will be queued until the new model can be loaded. As prior models become idle, one or more will be unloaded to make room for the new model. Queued requests will be processed in order. When using GPU inference new models must be able to completely fit in VRAM to allow concurrent model loads.
|
||||||
|
|
||||||
|
Parallel request processing for a given model results in increasing the context size by the number of parallel requests. For example, a 2K context with 4 parallel requests will result in an 8K context and additional memory allocation.
|
||||||
|
|
||||||
|
The following server settings may be used to adjust how Ollama handles concurrent requests on most platforms:
|
||||||
|
|
||||||
|
- `OLLAMA_MAX_LOADED_MODELS` - The maximum number of models that can be loaded concurrently provided they fit in available memory. The default is 3 * the number of GPUs or 3 for CPU inference.
|
||||||
|
- `OLLAMA_NUM_PARALLEL` - The maximum number of parallel requests each model will process at the same time. The default will auto-select either 4 or 1 based on available memory.
|
||||||
|
- `OLLAMA_MAX_QUEUE` - The maximum number of requests Ollama will queue when busy before rejecting additional requests. The default is 512
|
||||||
|
|
||||||
|
Note: Windows with Radeon GPUs currently default to 1 model maximum due to limitations in ROCm v5.7 for available VRAM reporting. Once ROCm v6 is available, Windows Radeon will follow the defaults above. You may enable concurrent model loads on Radeon on Windows, but ensure you don't load more models than will fit into your GPUs VRAM.
|
|
@ -65,6 +65,7 @@ curl http://localhost:11434/v1/chat/completions \
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}'
|
}'
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Endpoints
|
## Endpoints
|
||||||
|
|
|
@ -70,14 +70,18 @@ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION="0.1.29" sh
|
||||||
|
|
||||||
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
If your system is configured with the "noexec" flag where Ollama stores its temporary executable files, you can specify an alternate location by setting OLLAMA_TMPDIR to a location writable by the user ollama runs as. For example OLLAMA_TMPDIR=/usr/share/ollama/
|
||||||
|
|
||||||
## Container fails to run on NVIDIA GPU
|
## NVIDIA GPU Discovery
|
||||||
|
|
||||||
Make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
When Ollama starts up, it takes inventory of the GPUs present in the system to determine compatibility and how much VRAM is available. Sometimes this discovery can fail to find your GPUs. In general, running the latest driver will yield the best results.
|
||||||
|
|
||||||
Sometimes the container runtime can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
### Linux NVIDIA Troubleshooting
|
||||||
|
|
||||||
- Is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
If you are using a container to run Ollama, make sure you've set up the container runtime first as described in [docker.md](./docker.md)
|
||||||
- Is the uvm driver not loaded? `sudo nvidia-modprobe -u`
|
|
||||||
|
Sometimes the Ollama can have difficulties initializing the GPU. When you check the server logs, this can show up as various error codes, such as "3" (not initialized), "46" (device unavailable), "100" (no device), "999" (unknown), or others. The following troubleshooting techniques may help resolve the problem
|
||||||
|
|
||||||
|
- If you are using a container, is the container runtime working? Try `docker run --gpus all ubuntu nvidia-smi` - if this doesn't work, Ollama wont be able to see your NVIDIA GPU.
|
||||||
|
- Is the uvm driver loaded? `sudo nvidia-modprobe -u`
|
||||||
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
- Try reloading the nvidia_uvm driver - `sudo rmmod nvidia_uvm` then `sudo modprobe nvidia_uvm`
|
||||||
- Try rebooting
|
- Try rebooting
|
||||||
- Make sure you're running the latest nvidia drivers
|
- Make sure you're running the latest nvidia drivers
|
||||||
|
@ -85,3 +89,8 @@ Sometimes the container runtime can have difficulties initializing the GPU. When
|
||||||
If none of those resolve the problem, gather additional information and file an issue:
|
If none of those resolve the problem, gather additional information and file an issue:
|
||||||
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
- Set `CUDA_ERROR_LEVEL=50` and try again to get more diagnostic logs
|
||||||
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
- Check dmesg for any errors `sudo dmesg | grep -i nvrm` and `sudo dmesg | grep -i nvidia`
|
||||||
|
|
||||||
|
|
||||||
|
## Windows Terminal Errors
|
||||||
|
|
||||||
|
Older versions of Windows 10 (e.g., 21H1) are known to have a bug where the standard terminal program does not display control characters correctly. This can result in a long string of strings like `←[?25h←[?25l` being displayed, sometimes erroring with `The parameter is incorrect` To resolve this problem, please update to Win 10 22H1 or newer.
|
||||||
|
|
|
@ -19,7 +19,7 @@ Logs will often be helpful in diagnosing the problem (see
|
||||||
|
|
||||||
## System Requirements
|
## System Requirements
|
||||||
|
|
||||||
* Windows 10 or newer, Home or Pro
|
* Windows 10 22H2 or newer, Home or Pro
|
||||||
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
* NVIDIA 452.39 or newer Drivers if you have an NVIDIA card
|
||||||
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
* AMD Radeon Driver https://www.amd.com/en/support if you have a Radeon card
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,14 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OllamaHost struct {
|
type OllamaHost struct {
|
||||||
|
@ -34,17 +36,17 @@ var (
|
||||||
// Set via OLLAMA_HOST in the environment
|
// Set via OLLAMA_HOST in the environment
|
||||||
Host *OllamaHost
|
Host *OllamaHost
|
||||||
// Set via OLLAMA_KEEP_ALIVE in the environment
|
// Set via OLLAMA_KEEP_ALIVE in the environment
|
||||||
KeepAlive string
|
KeepAlive time.Duration
|
||||||
// Set via OLLAMA_LLM_LIBRARY in the environment
|
// Set via OLLAMA_LLM_LIBRARY in the environment
|
||||||
LLMLibrary string
|
LLMLibrary string
|
||||||
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
// Set via OLLAMA_MAX_LOADED_MODELS in the environment
|
||||||
MaxRunners int
|
MaxRunners int
|
||||||
// Set via OLLAMA_MAX_QUEUE in the environment
|
// Set via OLLAMA_MAX_QUEUE in the environment
|
||||||
MaxQueuedRequests int
|
MaxQueuedRequests int
|
||||||
// Set via OLLAMA_MODELS in the environment
|
|
||||||
ModelsDir string
|
|
||||||
// Set via OLLAMA_MAX_VRAM in the environment
|
// Set via OLLAMA_MAX_VRAM in the environment
|
||||||
MaxVRAM uint64
|
MaxVRAM uint64
|
||||||
|
// Set via OLLAMA_MODELS in the environment
|
||||||
|
ModelsDir string
|
||||||
// Set via OLLAMA_NOHISTORY in the environment
|
// Set via OLLAMA_NOHISTORY in the environment
|
||||||
NoHistory bool
|
NoHistory bool
|
||||||
// Set via OLLAMA_NOPRUNE in the environment
|
// Set via OLLAMA_NOPRUNE in the environment
|
||||||
|
@ -85,13 +87,13 @@ func AsMap() map[string]EnvVar {
|
||||||
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
"OLLAMA_HOST": {"OLLAMA_HOST", Host, "IP Address for the ollama server (default 127.0.0.1:11434)"},
|
||||||
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
"OLLAMA_KEEP_ALIVE": {"OLLAMA_KEEP_ALIVE", KeepAlive, "The duration that models stay loaded in memory (default \"5m\")"},
|
||||||
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
"OLLAMA_LLM_LIBRARY": {"OLLAMA_LLM_LIBRARY", LLMLibrary, "Set LLM library to bypass autodetection"},
|
||||||
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models (default 1)"},
|
"OLLAMA_MAX_LOADED_MODELS": {"OLLAMA_MAX_LOADED_MODELS", MaxRunners, "Maximum number of loaded models per GPU"},
|
||||||
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
"OLLAMA_MAX_QUEUE": {"OLLAMA_MAX_QUEUE", MaxQueuedRequests, "Maximum number of queued requests"},
|
||||||
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
"OLLAMA_MAX_VRAM": {"OLLAMA_MAX_VRAM", MaxVRAM, "Maximum VRAM"},
|
||||||
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
"OLLAMA_MODELS": {"OLLAMA_MODELS", ModelsDir, "The path to the models directory"},
|
||||||
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
"OLLAMA_NOHISTORY": {"OLLAMA_NOHISTORY", NoHistory, "Do not preserve readline history"},
|
||||||
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
"OLLAMA_NOPRUNE": {"OLLAMA_NOPRUNE", NoPrune, "Do not prune model blobs on startup"},
|
||||||
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests (default 1)"},
|
"OLLAMA_NUM_PARALLEL": {"OLLAMA_NUM_PARALLEL", NumParallel, "Maximum number of parallel requests"},
|
||||||
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
"OLLAMA_ORIGINS": {"OLLAMA_ORIGINS", AllowOrigins, "A comma separated list of allowed origins"},
|
||||||
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
"OLLAMA_RUNNERS_DIR": {"OLLAMA_RUNNERS_DIR", RunnersDir, "Location for runners"},
|
||||||
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
"OLLAMA_SCHED_SPREAD": {"OLLAMA_SCHED_SPREAD", SchedSpread, "Always schedule model across all GPUs"},
|
||||||
|
@ -129,9 +131,10 @@ func clean(key string) string {
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// default values
|
// default values
|
||||||
NumParallel = 1
|
NumParallel = 0 // Autoselect
|
||||||
MaxRunners = 1
|
MaxRunners = 0 // Autoselect
|
||||||
MaxQueuedRequests = 512
|
MaxQueuedRequests = 512
|
||||||
|
KeepAlive = 5 * time.Minute
|
||||||
|
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
}
|
}
|
||||||
|
@ -205,8 +208,8 @@ func LoadConfig() {
|
||||||
|
|
||||||
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
if onp := clean("OLLAMA_NUM_PARALLEL"); onp != "" {
|
||||||
val, err := strconv.Atoi(onp)
|
val, err := strconv.Atoi(onp)
|
||||||
if err != nil || val <= 0 {
|
if err != nil {
|
||||||
slog.Error("invalid setting must be greater than zero", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_NUM_PARALLEL", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
NumParallel = val
|
NumParallel = val
|
||||||
}
|
}
|
||||||
|
@ -251,7 +254,7 @@ func LoadConfig() {
|
||||||
if maxRunners != "" {
|
if maxRunners != "" {
|
||||||
m, err := strconv.Atoi(maxRunners)
|
m, err := strconv.Atoi(maxRunners)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_LOADED_MODELS", maxRunners, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxRunners = m
|
MaxRunners = m
|
||||||
}
|
}
|
||||||
|
@ -260,13 +263,16 @@ func LoadConfig() {
|
||||||
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
if onp := os.Getenv("OLLAMA_MAX_QUEUE"); onp != "" {
|
||||||
p, err := strconv.Atoi(onp)
|
p, err := strconv.Atoi(onp)
|
||||||
if err != nil || p <= 0 {
|
if err != nil || p <= 0 {
|
||||||
slog.Error("invalid setting", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
slog.Error("invalid setting, ignoring", "OLLAMA_MAX_QUEUE", onp, "error", err)
|
||||||
} else {
|
} else {
|
||||||
MaxQueuedRequests = p
|
MaxQueuedRequests = p
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
KeepAlive = clean("OLLAMA_KEEP_ALIVE")
|
ka := clean("OLLAMA_KEEP_ALIVE")
|
||||||
|
if ka != "" {
|
||||||
|
loadKeepAlive(ka)
|
||||||
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
ModelsDir, err = getModelsDir()
|
ModelsDir, err = getModelsDir()
|
||||||
|
@ -344,3 +350,24 @@ func getOllamaHost() (*OllamaHost, error) {
|
||||||
Port: port,
|
Port: port,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadKeepAlive(ka string) {
|
||||||
|
v, err := strconv.Atoi(ka)
|
||||||
|
if err != nil {
|
||||||
|
d, err := time.ParseDuration(ka)
|
||||||
|
if err == nil {
|
||||||
|
if d < 0 {
|
||||||
|
KeepAlive = time.Duration(math.MaxInt64)
|
||||||
|
} else {
|
||||||
|
KeepAlive = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
d := time.Duration(v) * time.Second
|
||||||
|
if d < 0 {
|
||||||
|
KeepAlive = time.Duration(math.MaxInt64)
|
||||||
|
} else {
|
||||||
|
KeepAlive = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -2,8 +2,10 @@ package envconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -23,6 +25,21 @@ func TestConfig(t *testing.T) {
|
||||||
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
t.Setenv("OLLAMA_FLASH_ATTENTION", "1")
|
||||||
LoadConfig()
|
LoadConfig()
|
||||||
require.True(t, FlashAttention)
|
require.True(t, FlashAttention)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 5*time.Minute, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "3")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 3*time.Second, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "1h")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, 1*time.Hour, KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "-1s")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||||
|
t.Setenv("OLLAMA_KEEP_ALIVE", "-1")
|
||||||
|
LoadConfig()
|
||||||
|
require.Equal(t, time.Duration(math.MaxInt64), KeepAlive)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestClientFromEnvironment(t *testing.T) {
|
func TestClientFromEnvironment(t *testing.T) {
|
||||||
|
|
|
@ -115,8 +115,6 @@ func AMDGetGPUInfo() []RocmGPUInfo {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO revisit this once ROCm v6 is available on windows.
|
|
||||||
// v5.7 only reports VRAM used by this process, so it's completely wrong and unusable
|
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "total", format.HumanBytes2(totalMemory))
|
||||||
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
slog.Debug("amdgpu memory", "gpu", i, "available", format.HumanBytes2(freeMemory))
|
||||||
gpuInfo := RocmGPUInfo{
|
gpuInfo := RocmGPUInfo{
|
||||||
|
@ -126,6 +124,9 @@ func AMDGetGPUInfo() []RocmGPUInfo {
|
||||||
TotalMemory: totalMemory,
|
TotalMemory: totalMemory,
|
||||||
FreeMemory: freeMemory,
|
FreeMemory: freeMemory,
|
||||||
},
|
},
|
||||||
|
// Free memory reporting on Windows is not reliable until we bump to ROCm v6.2
|
||||||
|
UnreliableFreeMemory: true,
|
||||||
|
|
||||||
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
ID: strconv.Itoa(i), // TODO this is probably wrong if we specify visible devices
|
||||||
DependencyPath: libDir,
|
DependencyPath: libDir,
|
||||||
MinimumMemory: rocmMinimumMemory,
|
MinimumMemory: rocmMinimumMemory,
|
||||||
|
|
23
gpu/gpu.go
23
gpu/gpu.go
|
@ -202,7 +202,7 @@ func GetGPUInfo() GpuInfoList {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if !bootstrapped {
|
if !bootstrapped {
|
||||||
slog.Debug("Detecting GPUs")
|
slog.Info("looking for compatible GPUs")
|
||||||
needRefresh = false
|
needRefresh = false
|
||||||
cpuCapability = GetCPUCapability()
|
cpuCapability = GetCPUCapability()
|
||||||
var memInfo C.mem_info_t
|
var memInfo C.mem_info_t
|
||||||
|
@ -320,6 +320,9 @@ func GetGPUInfo() GpuInfoList {
|
||||||
|
|
||||||
rocmGPUs = AMDGetGPUInfo()
|
rocmGPUs = AMDGetGPUInfo()
|
||||||
bootstrapped = true
|
bootstrapped = true
|
||||||
|
if len(cudaGPUs) == 0 && len(rocmGPUs) == 0 && len(oneapiGPUs) == 0 {
|
||||||
|
slog.Info("no compatible GPUs were discovered")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// For detected GPUs, load library if not loaded
|
// For detected GPUs, load library if not loaded
|
||||||
|
@ -514,7 +517,23 @@ func LoadNVCUDAMgmt(nvcudaLibPaths []string) (int, *C.nvcuda_handle_t, string) {
|
||||||
defer C.free(unsafe.Pointer(lib))
|
defer C.free(unsafe.Pointer(lib))
|
||||||
C.nvcuda_init(lib, &resp)
|
C.nvcuda_init(lib, &resp)
|
||||||
if resp.err != nil {
|
if resp.err != nil {
|
||||||
slog.Debug("Unable to load nvcuda", "library", libPath, "error", C.GoString(resp.err))
|
// Decide what log level based on the type of error message to help users understand why
|
||||||
|
msg := C.GoString(resp.err)
|
||||||
|
switch resp.cudaErr {
|
||||||
|
case C.CUDA_ERROR_INSUFFICIENT_DRIVER, C.CUDA_ERROR_SYSTEM_DRIVER_MISMATCH:
|
||||||
|
slog.Warn("version mismatch between driver and cuda driver library - reboot or upgrade may be required", "library", libPath, "error", msg)
|
||||||
|
case C.CUDA_ERROR_NO_DEVICE:
|
||||||
|
slog.Info("no nvidia devices detected", "library", libPath)
|
||||||
|
case C.CUDA_ERROR_UNKNOWN:
|
||||||
|
slog.Warn("unknown error initializing cuda driver library", "library", libPath, "error", msg)
|
||||||
|
slog.Warn("see https://github.com/ollama/ollama/blob/main/docs/troubleshooting.md for more information")
|
||||||
|
default:
|
||||||
|
if strings.Contains(msg, "wrong ELF class") {
|
||||||
|
slog.Debug("skipping 32bit library", "library", libPath)
|
||||||
|
} else {
|
||||||
|
slog.Info("unable to load cuda driver library", "library", libPath, "error", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
C.free(unsafe.Pointer(resp.err))
|
C.free(unsafe.Pointer(resp.err))
|
||||||
} else {
|
} else {
|
||||||
return int(resp.num_devices), &resp.ch, libPath
|
return int(resp.num_devices), &resp.ch, libPath
|
||||||
|
|
|
@ -7,6 +7,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
CUresult ret;
|
CUresult ret;
|
||||||
resp->err = NULL;
|
resp->err = NULL;
|
||||||
resp->num_devices = 0;
|
resp->num_devices = 0;
|
||||||
|
resp->cudaErr = CUDA_SUCCESS;
|
||||||
const int buflen = 256;
|
const int buflen = 256;
|
||||||
char buf[buflen + 1];
|
char buf[buflen + 1];
|
||||||
int i;
|
int i;
|
||||||
|
@ -38,6 +39,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
nvcuda_lib_path, msg);
|
nvcuda_lib_path, msg);
|
||||||
free(msg);
|
free(msg);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,6 +54,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
msg);
|
msg);
|
||||||
free(msg);
|
free(msg);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = -1;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,12 +64,9 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
LOG(resp->ch.verbose, "cuInit err: %d\n", ret);
|
||||||
UNLOAD_LIBRARY(resp->ch.handle);
|
UNLOAD_LIBRARY(resp->ch.handle);
|
||||||
resp->ch.handle = NULL;
|
resp->ch.handle = NULL;
|
||||||
if (ret == CUDA_ERROR_INSUFFICIENT_DRIVER) {
|
snprintf(buf, buflen, "cuda driver library init failure: %d", ret);
|
||||||
resp->err = strdup("your nvidia driver is too old or missing. If you have a CUDA GPU please upgrade to run ollama");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
snprintf(buf, buflen, "nvcuda init failure: %d", ret);
|
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = ret;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,6 +91,7 @@ void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp) {
|
||||||
resp->ch.handle = NULL;
|
resp->ch.handle = NULL;
|
||||||
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
snprintf(buf, buflen, "unable to get device count: %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
|
resp->cudaErr = ret;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -106,13 +107,13 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||||
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
CUuuid uuid = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
|
||||||
|
|
||||||
if (h.handle == NULL) {
|
if (h.handle == NULL) {
|
||||||
resp->err = strdup("nvcuda handle isn't initialized");
|
resp->err = strdup("cuda driver library handle isn't initialized");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = (*h.cuDeviceGet)(&device, i);
|
ret = (*h.cuDeviceGet)(&device, i);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda device failed to initialize");
|
snprintf(buf, buflen, "cuda driver library device failed to initialize");
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -168,14 +169,14 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||||
// To get memory we have to set (and release) a context
|
// To get memory we have to set (and release) a context
|
||||||
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda failed to get device context %d", ret);
|
snprintf(buf, buflen, "cuda driver library failed to get device context %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
ret = (*h.cuMemGetInfo_v2)(&memInfo.free, &memInfo.total);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
snprintf(buf, buflen, "nvcuda device memory info lookup failure %d", ret);
|
snprintf(buf, buflen, "cuda driver library device memory info lookup failure %d", ret);
|
||||||
resp->err = strdup(buf);
|
resp->err = strdup(buf);
|
||||||
// Best effort on failure...
|
// Best effort on failure...
|
||||||
(*h.cuCtxDestroy)(ctx);
|
(*h.cuCtxDestroy)(ctx);
|
||||||
|
@ -193,7 +194,7 @@ void nvcuda_bootstrap(nvcuda_handle_t h, int i, mem_info_t *resp) {
|
||||||
|
|
||||||
ret = (*h.cuCtxDestroy)(ctx);
|
ret = (*h.cuCtxDestroy)(ctx);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda failed to release device context %d", ret);
|
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +207,7 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total)
|
||||||
|
|
||||||
ret = (*h.cuDeviceGet)(&device, i);
|
ret = (*h.cuDeviceGet)(&device, i);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda device failed to initialize");
|
LOG(1, "cuda driver library device failed to initialize");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,13 +215,13 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total)
|
||||||
// To get memory we have to set (and release) a context
|
// To get memory we have to set (and release) a context
|
||||||
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
ret = (*h.cuCtxCreate_v3)(&ctx, NULL, 0, 0, device);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda failed to get device context %d", ret);
|
LOG(1, "cuda driver library failed to get device context %d", ret);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = (*h.cuMemGetInfo_v2)(free, total);
|
ret = (*h.cuMemGetInfo_v2)(free, total);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda device memory info lookup failure %d", ret);
|
LOG(1, "cuda driver library device memory info lookup failure %d", ret);
|
||||||
// Best effort on failure...
|
// Best effort on failure...
|
||||||
(*h.cuCtxDestroy)(ctx);
|
(*h.cuCtxDestroy)(ctx);
|
||||||
return;
|
return;
|
||||||
|
@ -228,12 +229,12 @@ void nvcuda_get_free(nvcuda_handle_t h, int i, uint64_t *free, uint64_t *total)
|
||||||
|
|
||||||
ret = (*h.cuCtxDestroy)(ctx);
|
ret = (*h.cuCtxDestroy)(ctx);
|
||||||
if (ret != CUDA_SUCCESS) {
|
if (ret != CUDA_SUCCESS) {
|
||||||
LOG(1, "nvcuda failed to release device context %d", ret);
|
LOG(1, "cuda driver library failed to release device context %d", ret);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvcuda_release(nvcuda_handle_t h) {
|
void nvcuda_release(nvcuda_handle_t h) {
|
||||||
LOG(h.verbose, "releasing nvcuda library\n");
|
LOG(h.verbose, "releasing cuda driver library\n");
|
||||||
UNLOAD_LIBRARY(h.handle);
|
UNLOAD_LIBRARY(h.handle);
|
||||||
// TODO and other context release logic?
|
// TODO and other context release logic?
|
||||||
h.handle = NULL;
|
h.handle = NULL;
|
||||||
|
|
|
@ -7,9 +7,12 @@
|
||||||
typedef enum cudaError_enum {
|
typedef enum cudaError_enum {
|
||||||
CUDA_SUCCESS = 0,
|
CUDA_SUCCESS = 0,
|
||||||
CUDA_ERROR_INVALID_VALUE = 1,
|
CUDA_ERROR_INVALID_VALUE = 1,
|
||||||
CUDA_ERROR_MEMORY_ALLOCATION = 2,
|
CUDA_ERROR_OUT_OF_MEMORY = 2,
|
||||||
CUDA_ERROR_NOT_INITIALIZED = 3,
|
CUDA_ERROR_NOT_INITIALIZED = 3,
|
||||||
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
CUDA_ERROR_INSUFFICIENT_DRIVER = 35,
|
||||||
|
CUDA_ERROR_NO_DEVICE = 100,
|
||||||
|
CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803,
|
||||||
|
CUDA_ERROR_UNKNOWN = 999,
|
||||||
// Other values omitted for now...
|
// Other values omitted for now...
|
||||||
} CUresult;
|
} CUresult;
|
||||||
|
|
||||||
|
@ -64,6 +67,7 @@ typedef struct nvcuda_init_resp {
|
||||||
char *err; // If err is non-null handle is invalid
|
char *err; // If err is non-null handle is invalid
|
||||||
nvcuda_handle_t ch;
|
nvcuda_handle_t ch;
|
||||||
int num_devices;
|
int num_devices;
|
||||||
|
CUresult cudaErr;
|
||||||
} nvcuda_init_resp_t;
|
} nvcuda_init_resp_t;
|
||||||
|
|
||||||
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
void nvcuda_init(char *nvcuda_lib_path, nvcuda_init_resp_t *resp);
|
||||||
|
|
|
@ -29,6 +29,11 @@ type GpuInfo struct {
|
||||||
// Extra environment variables specific to the GPU as list of [key,value]
|
// Extra environment variables specific to the GPU as list of [key,value]
|
||||||
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
EnvWorkarounds [][2]string `json:"envs,omitempty"`
|
||||||
|
|
||||||
|
// Set to true if we can NOT reliably discover FreeMemory. A value of true indicates
|
||||||
|
// the FreeMemory is best effort, and may over or under report actual memory usage
|
||||||
|
// False indicates FreeMemory can generally be trusted on this GPU
|
||||||
|
UnreliableFreeMemory bool
|
||||||
|
|
||||||
// GPU information
|
// GPU information
|
||||||
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
ID string `json:"gpu_id"` // string to use for selection of this specific GPU
|
||||||
Name string `json:"name"` // user friendly name if available
|
Name string `json:"name"` // user friendly name if available
|
||||||
|
|
3
llm/ext_server/CMakeLists.txt
vendored
3
llm/ext_server/CMakeLists.txt
vendored
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
set(TARGET ollama_llama_server)
|
set(TARGET ollama_llama_server)
|
||||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
@ -7,7 +6,7 @@ install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_compile_definitions(${TARGET} PRIVATE
|
target_compile_definitions(${TARGET} PRIVATE
|
||||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||||
)
|
)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE ggml llama common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||||
if (WIN32)
|
if (WIN32)
|
||||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||||
endif()
|
endif()
|
||||||
|
|
42
llm/ext_server/server.cpp
vendored
42
llm/ext_server/server.cpp
vendored
|
@ -1382,12 +1382,50 @@ struct llama_server_context
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string common_prefix(const std::string& str1, const std::string& str2) {
|
||||||
|
auto mismatch_pair = std::mismatch(str1.begin(), str1.end(), str2.begin());
|
||||||
|
return std::string(str1.begin(), mismatch_pair.first);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find the slot that has the greatest common prefix
|
||||||
|
server_slot *prefix_slot(const json &prompt) {
|
||||||
|
if (!prompt.is_string()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string prompt_str = prompt.get<std::string>();
|
||||||
|
server_slot *slot = nullptr;
|
||||||
|
size_t longest = 0;
|
||||||
|
|
||||||
|
for (server_slot &s : slots) {
|
||||||
|
if (s.available() && s.prompt.is_string()) {
|
||||||
|
std::string s_prompt = s.prompt.get<std::string>();
|
||||||
|
std::string prefix = common_prefix(s_prompt, prompt_str);
|
||||||
|
|
||||||
|
if (prefix.size() > longest) {
|
||||||
|
slot = &s;
|
||||||
|
longest = prefix.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!slot) {
|
||||||
|
return get_slot(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("slot with common prefix found", {{
|
||||||
|
"slot_id", slot->id,
|
||||||
|
"characters", longest
|
||||||
|
}});
|
||||||
|
return slot;
|
||||||
|
}
|
||||||
|
|
||||||
void process_single_task(task_server& task)
|
void process_single_task(task_server& task)
|
||||||
{
|
{
|
||||||
switch (task.type)
|
switch (task.type)
|
||||||
{
|
{
|
||||||
case TASK_TYPE_COMPLETION: {
|
case TASK_TYPE_COMPLETION: {
|
||||||
server_slot *slot = get_slot(json_value(task.data, "slot_id", -1));
|
server_slot *slot = prefix_slot(task.data["prompt"]);
|
||||||
if (slot == nullptr)
|
if (slot == nullptr)
|
||||||
{
|
{
|
||||||
// if no slot is available, we defer this task for processing later
|
// if no slot is available, we defer this task for processing later
|
||||||
|
@ -1732,7 +1770,7 @@ struct llama_server_context
|
||||||
slot.n_past -= 1;
|
slot.n_past -= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
slot.n_prompt_tokens_processed = slot.n_prompt_tokens - slot.n_past;
|
slot.n_prompt_tokens_processed = slot.n_prompt_tokens;
|
||||||
|
|
||||||
if (slot.ga_n != 1)
|
if (slot.ga_n != 1)
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,16 +18,16 @@ sign() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
COMMON_DARWIN_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DLLAMA_METAL_EMBED_LIBRARY=on -DLLAMA_OPENMP=off"
|
COMMON_DARWIN_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_METAL_MACOSX_VERSION_MIN=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DGGML_METAL_EMBED_LIBRARY=on -DGGML_OPENMP=off"
|
||||||
|
|
||||||
case "${GOARCH}" in
|
case "${GOARCH}" in
|
||||||
"amd64")
|
"amd64")
|
||||||
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_NATIVE=off"
|
COMMON_CPU_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DGGML_METAL=off -DGGML_NATIVE=off"
|
||||||
|
|
||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DBUILD_SHARED_LIBS=off -DLLAMA_BLAS=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_BLAS=off -DGGML_ACCELERATE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
|
@ -37,7 +37,7 @@ case "${GOARCH}" in
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu"
|
||||||
echo "Building LCD CPU"
|
echo "Building LCD CPU"
|
||||||
build
|
build
|
||||||
|
@ -49,7 +49,7 @@ case "${GOARCH}" in
|
||||||
# Approximately 400% faster than LCD on same CPU
|
# Approximately 400% faster than LCD on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=off -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=off -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx"
|
||||||
echo "Building AVX CPU"
|
echo "Building AVX CPU"
|
||||||
build
|
build
|
||||||
|
@ -61,7 +61,7 @@ case "${GOARCH}" in
|
||||||
# Approximately 10% faster than AVX on same CPU
|
# Approximately 10% faster than AVX on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_ACCELERATE=on -DLLAMA_BLAS=off -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_ACCELERATE=on -DGGML_BLAS=off -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
|
BUILD_DIR="../build/darwin/${ARCH}/cpu_avx2"
|
||||||
echo "Building AVX2 CPU"
|
echo "Building AVX2 CPU"
|
||||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
|
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation"
|
||||||
|
@ -75,14 +75,14 @@ case "${GOARCH}" in
|
||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="-DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DLLAMA_BLAS=off -DCMAKE_SYSTEM_NAME=Darwin -DBUILD_SHARED_LIBS=off -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=off -DLLAMA_ACCELERATE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_OSX_DEPLOYMENT_TARGET=11.3 -DCMAKE_SYSTEM_NAME=Darwin -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}_static"
|
BUILD_DIR="../build/darwin/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
|
|
||||||
if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then
|
if [ -z "$OLLAMA_SKIP_METAL_GENERATE" ]; then
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DLLAMA_ACCELERATE=on -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} -DLLAMA_METAL=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_DARWIN_DEFS} -DCMAKE_SYSTEM_PROCESSOR=${ARCH} -DCMAKE_OSX_ARCHITECTURES=${ARCH} ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/darwin/${ARCH}/metal"
|
BUILD_DIR="../build/darwin/${ARCH}/metal"
|
||||||
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
|
EXTRA_LIBS="${EXTRA_LIBS} -framework Accelerate -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders"
|
||||||
build
|
build
|
||||||
|
|
|
@ -51,7 +51,7 @@ if [ -z "${CUDACXX}" ]; then
|
||||||
export CUDACXX=$(command -v nvcc)
|
export CUDACXX=$(command -v nvcc)
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
COMMON_CMAKE_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off"
|
COMMON_CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off"
|
||||||
source $(dirname $0)/gen_common.sh
|
source $(dirname $0)/gen_common.sh
|
||||||
init_vars
|
init_vars
|
||||||
git_module_setup
|
git_module_setup
|
||||||
|
@ -64,7 +64,7 @@ if [ -z "${OLLAMA_SKIP_STATIC_GENERATE}" -o "${OLLAMA_CPU_TARGET}" = "static" ];
|
||||||
# Static build for linking into the Go binary
|
# Static build for linking into the Go binary
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_TARGETS="--target llama --target ggml"
|
CMAKE_TARGETS="--target llama --target ggml"
|
||||||
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DLLAMA_NATIVE=off -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DLLAMA_OPENMP=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="-DBUILD_SHARED_LIBS=off -DGGML_NATIVE=off -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off -DGGML_OPENMP=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}_static"
|
BUILD_DIR="../build/linux/${ARCH}_static"
|
||||||
echo "Building static library"
|
echo "Building static library"
|
||||||
build
|
build
|
||||||
|
@ -84,22 +84,22 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||||
compress
|
compress
|
||||||
else
|
else
|
||||||
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
|
# Darwin Rosetta x86 emulation does NOT support AVX, AVX2, AVX512
|
||||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||||
# -DLLAMA_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
# -DGGML_F16C -- 2012 Intel Ivy Bridge & AMD 2011 Bulldozer (No significant improvement over just AVX)
|
||||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||||
# Note: the following seem to yield slower results than AVX2 - ymmv
|
# Note: the following seem to yield slower results than AVX2 - ymmv
|
||||||
# -DLLAMA_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
# -DGGML_AVX512 -- 2017 Intel Skylake and High End DeskTop (HEDT)
|
||||||
# -DLLAMA_AVX512_VBMI -- 2018 Intel Cannon Lake
|
# -DGGML_AVX512_VBMI -- 2018 Intel Cannon Lake
|
||||||
# -DLLAMA_AVX512_VNNI -- 2021 Intel Alder Lake
|
# -DGGML_AVX512_VNNI -- 2021 Intel Alder Lake
|
||||||
|
|
||||||
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DLLAMA_NATIVE=off -DLLAMA_OPENMP=off"
|
COMMON_CPU_DEFS="-DCMAKE_POSITION_INDEPENDENT_CODE=on -DGGML_NATIVE=off -DGGML_OPENMP=off"
|
||||||
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
if [ -z "${OLLAMA_CPU_TARGET}" -o "${OLLAMA_CPU_TARGET}" = "cpu" ]; then
|
||||||
#
|
#
|
||||||
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
# CPU first for the default library, set up as lowest common denominator for maximum compatibility (including Rosetta)
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
BUILD_DIR="../build/linux/${ARCH}/cpu"
|
||||||
echo "Building LCD CPU"
|
echo "Building LCD CPU"
|
||||||
build
|
build
|
||||||
|
@ -116,7 +116,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||||
# Approximately 400% faster than LCD on same CPU
|
# Approximately 400% faster than LCD on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_FMA=off -DGGML_F16C=off ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
|
BUILD_DIR="../build/linux/${ARCH}/cpu_avx"
|
||||||
echo "Building AVX CPU"
|
echo "Building AVX CPU"
|
||||||
build
|
build
|
||||||
|
@ -129,7 +129,7 @@ if [ -z "${OLLAMA_SKIP_CPU_GENERATE}" ]; then
|
||||||
# Approximately 10% faster than AVX on same CPU
|
# Approximately 10% faster than AVX on same CPU
|
||||||
#
|
#
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CPU_DEFS} -DLLAMA_AVX=on -DLLAMA_AVX2=on -DLLAMA_AVX512=off -DLLAMA_FMA=on -DLLAMA_F16C=on ${CMAKE_DEFS}"
|
CMAKE_DEFS="${COMMON_CPU_DEFS} -DGGML_AVX=on -DGGML_AVX2=on -DGGML_AVX512=off -DGGML_FMA=on -DGGML_F16C=on ${CMAKE_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
|
BUILD_DIR="../build/linux/${ARCH}/cpu_avx2"
|
||||||
echo "Building AVX2 CPU"
|
echo "Building AVX2 CPU"
|
||||||
build
|
build
|
||||||
|
@ -170,15 +170,15 @@ if [ -z "${OLLAMA_SKIP_CUDA_GENERATE}" -a -d "${CUDA_LIB_DIR}" ]; then
|
||||||
#
|
#
|
||||||
# CUDA compute < 6.0 lacks proper FP16 support on ARM.
|
# CUDA compute < 6.0 lacks proper FP16 support on ARM.
|
||||||
# Disabling has minimal performance effect while maintaining compatibility.
|
# Disabling has minimal performance effect while maintaining compatibility.
|
||||||
ARM64_DEFS="-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
|
ARM64_DEFS="-DGGML_AVX=off -DGGML_AVX2=off -DGGML_AVX512=off -DGGML_CUDA_F16=off"
|
||||||
fi
|
fi
|
||||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||||
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
|
if [ -n "${OLLAMA_CUSTOM_CUDA_DEFS}" ]; then
|
||||||
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
|
echo "OLLAMA_CUSTOM_CUDA_DEFS=\"${OLLAMA_CUSTOM_CUDA_DEFS}\""
|
||||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} ${OLLAMA_CUSTOM_CUDA_DEFS}"
|
||||||
echo "Building custom CUDA GPU"
|
echo "Building custom CUDA GPU"
|
||||||
else
|
else
|
||||||
CMAKE_CUDA_DEFS="-DLLAMA_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES}"
|
CMAKE_CUDA_DEFS="-DGGML_CUDA=on -DCMAKE_CUDA_FLAGS=-t8 -DGGML_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=${CMAKE_CUDA_ARCHITECTURES} -DCMAKE_LIBRARY_PATH=/usr/local/cuda/compat"
|
||||||
fi
|
fi
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} ${ARM64_DEFS} ${CMAKE_CUDA_DEFS}"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
|
BUILD_DIR="../build/linux/${ARCH}/cuda${CUDA_VARIANT}"
|
||||||
|
@ -216,7 +216,7 @@ if [ -z "${OLLAMA_SKIP_ONEAPI_GENERATE}" -a -d "${ONEAPI_ROOT}" ]; then
|
||||||
init_vars
|
init_vars
|
||||||
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
source ${ONEAPI_ROOT}/setvars.sh --force # set up environment variables for oneAPI
|
||||||
CC=icx
|
CC=icx
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_SYCL=ON -DLLAMA_SYCL_F16=OFF"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL=ON -DGGML_SYCL_F16=OFF"
|
||||||
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
BUILD_DIR="../build/linux/${ARCH}/oneapi"
|
||||||
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
EXTRA_LIBS="-fsycl -Wl,-rpath,${ONEAPI_ROOT}/compiler/latest/lib,-rpath,${ONEAPI_ROOT}/mkl/latest/lib,-rpath,${ONEAPI_ROOT}/tbb/latest/lib,-rpath,${ONEAPI_ROOT}/compiler/latest/opt/oclfpga/linux64/lib -lOpenCL -lmkl_core -lmkl_sycl_blas -lmkl_intel_ilp64 -lmkl_tbb_thread -ltbb"
|
||||||
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
DEBUG_FLAGS="" # icx compiles with -O0 if we pass -g, so we must remove it
|
||||||
|
@ -254,7 +254,7 @@ if [ -z "${OLLAMA_SKIP_ROCM_GENERATE}" -a -d "${ROCM_PATH}" ]; then
|
||||||
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
|
ROCM_VARIANT=_v$(ls ${ROCM_PATH}/lib/librocblas.so.*.*.????? | cut -f5 -d. || true)
|
||||||
fi
|
fi
|
||||||
init_vars
|
init_vars
|
||||||
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
CMAKE_DEFS="${COMMON_CMAKE_DEFS} ${CMAKE_DEFS} -DGGML_HIPBLAS=on -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS=$(amdGPUs) -DGPU_TARGETS=$(amdGPUs)"
|
||||||
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
|
||||||
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
|
if [ -n "${OLLAMA_CUSTOM_ROCM_DEFS}" ]; then
|
||||||
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
|
echo "OLLAMA_CUSTOM_ROCM_DEFS=\"${OLLAMA_CUSTOM_ROCM_DEFS}\""
|
||||||
|
|
|
@ -39,8 +39,8 @@ function init_vars {
|
||||||
}
|
}
|
||||||
$script:cmakeDefs = @(
|
$script:cmakeDefs = @(
|
||||||
"-DBUILD_SHARED_LIBS=on",
|
"-DBUILD_SHARED_LIBS=on",
|
||||||
"-DLLAMA_NATIVE=off",
|
"-DGGML_NATIVE=off",
|
||||||
"-DLLAMA_OPENMP=off"
|
"-DGGML_OPENMP=off"
|
||||||
)
|
)
|
||||||
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
|
$script:commonCpuDefs = @("-DCMAKE_POSITION_INDEPENDENT_CODE=on")
|
||||||
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
|
$script:ARCH = $Env:PROCESSOR_ARCHITECTURE.ToLower()
|
||||||
|
@ -182,9 +182,9 @@ function cleanup {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# -DLLAMA_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
# -DGGML_AVX -- 2011 Intel Sandy Bridge & AMD Bulldozer
|
||||||
# -DLLAMA_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
# -DGGML_AVX2 -- 2013 Intel Haswell & 2015 AMD Excavator / 2017 AMD Zen
|
||||||
# -DLLAMA_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
# -DGGML_FMA (FMA3) -- 2013 Intel Haswell & 2012 AMD Piledriver
|
||||||
|
|
||||||
|
|
||||||
function build_static() {
|
function build_static() {
|
||||||
|
@ -204,13 +204,13 @@ function build_static() {
|
||||||
"-DCMAKE_C_COMPILER=gcc.exe",
|
"-DCMAKE_C_COMPILER=gcc.exe",
|
||||||
"-DCMAKE_CXX_COMPILER=g++.exe",
|
"-DCMAKE_CXX_COMPILER=g++.exe",
|
||||||
"-DBUILD_SHARED_LIBS=off",
|
"-DBUILD_SHARED_LIBS=off",
|
||||||
"-DLLAMA_NATIVE=off",
|
"-DGGML_NATIVE=off",
|
||||||
"-DLLAMA_AVX=off",
|
"-DGGML_AVX=off",
|
||||||
"-DLLAMA_AVX2=off",
|
"-DGGML_AVX2=off",
|
||||||
"-DLLAMA_AVX512=off",
|
"-DGGML_AVX512=off",
|
||||||
"-DLLAMA_F16C=off",
|
"-DGGML_F16C=off",
|
||||||
"-DLLAMA_FMA=off",
|
"-DGGML_FMA=off",
|
||||||
"-DLLAMA_OPENMP=off")
|
"-DGGML_OPENMP=off")
|
||||||
$script:buildDir="../build/windows/${script:ARCH}_static"
|
$script:buildDir="../build/windows/${script:ARCH}_static"
|
||||||
write-host "Building static library"
|
write-host "Building static library"
|
||||||
build
|
build
|
||||||
|
@ -224,7 +224,7 @@ function build_cpu($gen_arch) {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu"))) {
|
||||||
# remaining llama.cpp builds use MSVC
|
# remaining llama.cpp builds use MSVC
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DLLAMA_AVX=off", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", $gen_arch, "-DGGML_AVX=off", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu"
|
||||||
$script:distDir="$script:DIST_BASE\cpu"
|
$script:distDir="$script:DIST_BASE\cpu"
|
||||||
write-host "Building LCD CPU"
|
write-host "Building LCD CPU"
|
||||||
|
@ -239,7 +239,7 @@ function build_cpu($gen_arch) {
|
||||||
function build_cpu_avx() {
|
function build_cpu_avx() {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx"))) {
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=off", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=off", "-DLLAMA_F16C=off") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=off", "-DGGML_AVX512=off", "-DGGML_FMA=off", "-DGGML_F16C=off") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx"
|
||||||
$script:distDir="$script:DIST_BASE\cpu_avx"
|
$script:distDir="$script:DIST_BASE\cpu_avx"
|
||||||
write-host "Building AVX CPU"
|
write-host "Building AVX CPU"
|
||||||
|
@ -254,7 +254,7 @@ function build_cpu_avx() {
|
||||||
function build_cpu_avx2() {
|
function build_cpu_avx2() {
|
||||||
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
|
if ((-not "${env:OLLAMA_SKIP_CPU_GENERATE}" ) -and ((-not "${env:OLLAMA_CPU_TARGET}") -or ("${env:OLLAMA_CPU_TARGET}" -eq "cpu_avx2"))) {
|
||||||
init_vars
|
init_vars
|
||||||
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DLLAMA_AVX=on", "-DLLAMA_AVX2=on", "-DLLAMA_AVX512=off", "-DLLAMA_FMA=on", "-DLLAMA_F16C=on") + $script:cmakeDefs
|
$script:cmakeDefs = $script:commonCpuDefs + @("-A", "x64", "-DGGML_AVX=on", "-DGGML_AVX2=on", "-DGGML_AVX512=off", "-DGGML_FMA=on", "-DGGML_F16C=on") + $script:cmakeDefs
|
||||||
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
|
$script:buildDir="../build/windows/${script:ARCH}/cpu_avx2"
|
||||||
$script:distDir="$script:DIST_BASE\cpu_avx2"
|
$script:distDir="$script:DIST_BASE\cpu_avx2"
|
||||||
write-host "Building AVX2 CPU"
|
write-host "Building AVX2 CPU"
|
||||||
|
@ -279,9 +279,9 @@ function build_cuda() {
|
||||||
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
|
$script:distDir="$script:DIST_BASE\cuda$script:CUDA_VARIANT"
|
||||||
$script:cmakeDefs += @(
|
$script:cmakeDefs += @(
|
||||||
"-A", "x64",
|
"-A", "x64",
|
||||||
"-DLLAMA_CUDA=ON",
|
"-DGGML_CUDA=ON",
|
||||||
"-DLLAMA_AVX=on",
|
"-DGGML_AVX=on",
|
||||||
"-DLLAMA_AVX2=off",
|
"-DGGML_AVX2=off",
|
||||||
"-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR",
|
"-DCUDAToolkit_INCLUDE_DIR=$script:CUDA_INCLUDE_DIR",
|
||||||
"-DCMAKE_CUDA_FLAGS=-t8",
|
"-DCMAKE_CUDA_FLAGS=-t8",
|
||||||
"-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}"
|
"-DCMAKE_CUDA_ARCHITECTURES=${script:CMAKE_CUDA_ARCHITECTURES}"
|
||||||
|
@ -319,7 +319,7 @@ function build_oneapi() {
|
||||||
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
$script:distDir ="$script:DIST_BASE\oneapi$script:ONEAPI_VARIANT"
|
||||||
$script:cmakeDefs += @(
|
$script:cmakeDefs += @(
|
||||||
"-G", "MinGW Makefiles",
|
"-G", "MinGW Makefiles",
|
||||||
"-DLLAMA_SYCL=ON",
|
"-DGGML_SYCL=ON",
|
||||||
"-DCMAKE_C_COMPILER=icx",
|
"-DCMAKE_C_COMPILER=icx",
|
||||||
"-DCMAKE_CXX_COMPILER=icx",
|
"-DCMAKE_CXX_COMPILER=icx",
|
||||||
"-DCMAKE_BUILD_TYPE=Release"
|
"-DCMAKE_BUILD_TYPE=Release"
|
||||||
|
@ -365,10 +365,10 @@ function build_rocm() {
|
||||||
"-G", "Ninja",
|
"-G", "Ninja",
|
||||||
"-DCMAKE_C_COMPILER=clang.exe",
|
"-DCMAKE_C_COMPILER=clang.exe",
|
||||||
"-DCMAKE_CXX_COMPILER=clang++.exe",
|
"-DCMAKE_CXX_COMPILER=clang++.exe",
|
||||||
"-DLLAMA_HIPBLAS=on",
|
"-DGGML_HIPBLAS=on",
|
||||||
"-DHIP_PLATFORM=amd",
|
"-DHIP_PLATFORM=amd",
|
||||||
"-DLLAMA_AVX=on",
|
"-DGGML_AVX=on",
|
||||||
"-DLLAMA_AVX2=off",
|
"-DGGML_AVX2=off",
|
||||||
"-DCMAKE_POSITION_INDEPENDENT_CODE=on",
|
"-DCMAKE_POSITION_INDEPENDENT_CODE=on",
|
||||||
"-DAMDGPU_TARGETS=$(amdGPUs)",
|
"-DAMDGPU_TARGETS=$(amdGPUs)",
|
||||||
"-DGPU_TARGETS=$(amdGPUs)"
|
"-DGPU_TARGETS=$(amdGPUs)"
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7c26775adb579e92b59c82e8084c07a1d0f75e9c
|
Subproject commit d7fd29fff16456ce9c3a23fd2d09a66256b05aff
|
16
llm/llm.go
16
llm/llm.go
|
@ -1,12 +1,14 @@
|
||||||
package llm
|
package llm
|
||||||
|
|
||||||
// #cgo CFLAGS: -Illama.cpp
|
// #cgo CFLAGS: -Illama.cpp -Illama.cpp/include -Illama.cpp/ggml/include
|
||||||
// #cgo darwin,arm64 LDFLAGS: ${SRCDIR}/build/darwin/arm64_static/libllama.a -lstdc++
|
// #cgo windows LDFLAGS: -static-libstdc++
|
||||||
// #cgo darwin,amd64 LDFLAGS: ${SRCDIR}/build/darwin/x86_64_static/libllama.a -lstdc++
|
// #cgo LDFLAGS: -lllama -lggml -lstdc++ -lpthread
|
||||||
// #cgo windows,amd64 LDFLAGS: ${SRCDIR}/build/windows/amd64_static/libllama.a -static -lstdc++
|
// #cgo darwin,arm64 LDFLAGS: -L${SRCDIR}/build/darwin/arm64_static -L${SRCDIR}/build/darwin/arm64_static/src -L${SRCDIR}/build/darwin/arm64_static/ggml/src -framework Accelerate -framework Metal
|
||||||
// #cgo windows,arm64 LDFLAGS: ${SRCDIR}/build/windows/arm64_static/libllama.a -static -lstdc++
|
// #cgo darwin,amd64 LDFLAGS: -L${SRCDIR}/build/darwin/x86_64_static -L${SRCDIR}/build/darwin/x86_64_static/src -L${SRCDIR}/build/darwin/x86_64_static/ggml/src
|
||||||
// #cgo linux,amd64 LDFLAGS: ${SRCDIR}/build/linux/x86_64_static/libllama.a -lstdc++
|
// #cgo windows,amd64 LDFLAGS: -L${SRCDIR}/build/windows/amd64_static -L${SRCDIR}/build/windows/amd64_static/src -L${SRCDIR}/build/windows/amd64_static/ggml/src
|
||||||
// #cgo linux,arm64 LDFLAGS: ${SRCDIR}/build/linux/arm64_static/libllama.a -lstdc++
|
// #cgo windows,arm64 LDFLAGS: -L${SRCDIR}/build/windows/arm64_static -L${SRCDIR}/build/windows/arm64_static/src -L${SRCDIR}/build/windows/arm64_static/ggml/src
|
||||||
|
// #cgo linux,amd64 LDFLAGS: -L${SRCDIR}/build/linux/x86_64_static -L${SRCDIR}/build/linux/x86_64_static/src -L${SRCDIR}/build/linux/x86_64_static/ggml/src
|
||||||
|
// #cgo linux,arm64 LDFLAGS: -L${SRCDIR}/build/linux/arm64_static -L${SRCDIR}/build/linux/arm64_static/src -L${SRCDIR}/build/linux/arm64_static/ggml/src
|
||||||
// #include <stdlib.h>
|
// #include <stdlib.h>
|
||||||
// #include "llama.h"
|
// #include "llama.h"
|
||||||
import "C"
|
import "C"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
diff --git a/common/common.cpp b/common/common.cpp
|
diff --git a/common/common.cpp b/common/common.cpp
|
||||||
index 73ff0e85..6adb1a92 100644
|
index 2c05a4d4..927f0e3d 100644
|
||||||
--- a/common/common.cpp
|
--- a/common/common.cpp
|
||||||
+++ b/common/common.cpp
|
+++ b/common/common.cpp
|
||||||
@@ -2447,6 +2447,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
@@ -2093,6 +2093,8 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
|
||||||
mparams.use_mmap = params.use_mmap;
|
mparams.use_mmap = params.use_mmap;
|
||||||
mparams.use_mlock = params.use_mlock;
|
mparams.use_mlock = params.use_mlock;
|
||||||
mparams.check_tensors = params.check_tensors;
|
mparams.check_tensors = params.check_tensors;
|
||||||
|
@ -12,10 +12,10 @@ index 73ff0e85..6adb1a92 100644
|
||||||
mparams.kv_overrides = NULL;
|
mparams.kv_overrides = NULL;
|
||||||
} else {
|
} else {
|
||||||
diff --git a/common/common.h b/common/common.h
|
diff --git a/common/common.h b/common/common.h
|
||||||
index 58ed72f4..0bb2605e 100644
|
index 65c0ef81..ebca2c77 100644
|
||||||
--- a/common/common.h
|
--- a/common/common.h
|
||||||
+++ b/common/common.h
|
+++ b/common/common.h
|
||||||
@@ -180,6 +180,13 @@ struct gpt_params {
|
@@ -184,6 +184,13 @@ struct gpt_params {
|
||||||
std::string mmproj = ""; // path to multimodal projector
|
std::string mmproj = ""; // path to multimodal projector
|
||||||
std::vector<std::string> image; // path to image file(s)
|
std::vector<std::string> image; // path to image file(s)
|
||||||
|
|
||||||
|
@ -26,6 +26,6 @@ index 58ed72f4..0bb2605e 100644
|
||||||
+ // context pointer passed to the progress callback
|
+ // context pointer passed to the progress callback
|
||||||
+ void * progress_callback_user_data;
|
+ void * progress_callback_user_data;
|
||||||
+
|
+
|
||||||
// server params
|
// embedding
|
||||||
int32_t port = 8080; // server listens on this network port
|
bool embedding = false; // get only sentence embedding
|
||||||
int32_t timeout_read = 600; // http read timeout in seconds
|
int32_t embd_normalize = 2; // normalisation for embendings (-1=none, 0=max absolute int16, 1=taxicab, 2=euclidean, >2=p-norm)
|
||||||
|
|
|
@ -1,17 +1,8 @@
|
||||||
From 544a2d2e646d39e878d87dfbb3398a356bc560ab Mon Sep 17 00:00:00 2001
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
From: Michael Yang <mxyng@pm.me>
|
index 73f52435..58a00fb1 100644
|
||||||
Date: Thu, 23 May 2024 11:18:45 -0700
|
--- a/src/llama.cpp
|
||||||
Subject: [PATCH] throw exception on load errors
|
+++ b/src/llama.cpp
|
||||||
|
@@ -7241,7 +7241,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||||
---
|
|
||||||
llama.cpp | 25 ++++++++++++++++---------
|
|
||||||
1 file changed, 16 insertions(+), 9 deletions(-)
|
|
||||||
|
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
|
||||||
index 15c66077..8ba90b6a 100644
|
|
||||||
--- a/llama.cpp
|
|
||||||
+++ b/llama.cpp
|
|
||||||
@@ -6346,7 +6346,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
|
||||||
}
|
}
|
||||||
} catch (const std::exception & err) {
|
} catch (const std::exception & err) {
|
||||||
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
LLAMA_LOG_ERROR("%s: error loading model: %s\n", __func__, err.what());
|
||||||
|
@ -20,7 +11,7 @@ index 15c66077..8ba90b6a 100644
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -15600,16 +15600,23 @@ struct llama_model * llama_load_model_from_file(
|
@@ -17564,16 +17564,23 @@ struct llama_model * llama_load_model_from_file(
|
||||||
}
|
}
|
||||||
model->rpc_servers.push_back(servers);
|
model->rpc_servers.push_back(servers);
|
||||||
}
|
}
|
||||||
|
@ -52,6 +43,3 @@ index 15c66077..8ba90b6a 100644
|
||||||
}
|
}
|
||||||
|
|
||||||
return model;
|
return model;
|
||||||
--
|
|
||||||
2.45.1
|
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
diff --git a/ggml-metal.m b/ggml-metal.m
|
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
|
||||||
index 0207b787..b5e9884b 100644
|
index 0207b787..b5e9884b 100644
|
||||||
--- a/ggml-metal.m
|
--- a/ggml/src/ggml-metal.m
|
||||||
+++ b/ggml-metal.m
|
+++ b/ggml/src/ggml-metal.m
|
||||||
@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute(
|
@@ -1396,27 +1396,23 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
// to the matrix-vector kernel
|
// to the matrix-vector kernel
|
||||||
int ne11_mm_min = 1;
|
int ne11_mm_min = 1;
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
index 61948751..4b72a293 100644
|
index 73f52435..2b81b4bd 100644
|
||||||
--- a/llama.cpp
|
--- a/src/llama.cpp
|
||||||
+++ b/llama.cpp
|
+++ b/src/llama.cpp
|
||||||
@@ -4824,16 +4824,7 @@ static void llm_load_vocab(
|
@@ -5092,16 +5092,7 @@ static void llm_load_vocab(
|
||||||
|
|
||||||
// for now, only BPE models have pre-tokenizers
|
// for now, only BPE models have pre-tokenizers
|
||||||
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
if (vocab.type == LLAMA_VOCAB_TYPE_BPE) {
|
||||||
|
@ -20,13 +20,13 @@ index 61948751..4b72a293 100644
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "llama3" ||
|
tokenizer_pre == "llama3" ||
|
||||||
@@ -4888,7 +4879,8 @@ static void llm_load_vocab(
|
@@ -5164,7 +5155,8 @@ static void llm_load_vocab(
|
||||||
tokenizer_pre == "poro-chat") {
|
tokenizer_pre == "jais") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_PORO;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
|
||||||
} else {
|
} else {
|
||||||
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
- throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
|
||||||
+ LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
|
+ LLAMA_LOG_WARN("%s: missing or unrecognized pre-tokenizer type, using: 'default'\n", __func__);
|
||||||
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
}
|
}
|
||||||
} else {
|
} else if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
index 40d2ec2c..f34eb79a 100644
|
index 40d2ec2c..f34eb79a 100644
|
||||||
--- a/llama.cpp
|
--- a/src/llama.cpp
|
||||||
+++ b/llama.cpp
|
+++ b/src/llama.cpp
|
||||||
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
@@ -6943,7 +6943,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||||
cb(kq, "kq", il);
|
cb(kq, "kq", il);
|
||||||
|
|
45
llm/patches/07-embeddings.diff
Normal file
45
llm/patches/07-embeddings.diff
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
|
index 1fe2b9f7..a43312a7 100644
|
||||||
|
--- a/src/llama.cpp
|
||||||
|
+++ b/src/llama.cpp
|
||||||
|
@@ -13689,7 +13689,7 @@ static size_t llama_output_reserve(llama_context & lctx, size_t n_outputs) {
|
||||||
|
const auto n_embd = hparams.n_embd;
|
||||||
|
|
||||||
|
// TODO: use a per-batch flag for logits presence instead
|
||||||
|
- const bool has_logits = !cparams.embeddings;
|
||||||
|
+ const bool has_logits = cparams.causal_attn;
|
||||||
|
const bool has_embd = lctx.is_encoding || (cparams.embeddings && (cparams.pooling_type == LLAMA_POOLING_TYPE_NONE));
|
||||||
|
|
||||||
|
const size_t logits_size = has_logits ? n_vocab*n_outputs_max : 0;
|
||||||
|
@@ -13959,17 +13959,25 @@ static int llama_decode_internal(
|
||||||
|
// no output
|
||||||
|
res = nullptr;
|
||||||
|
embd = nullptr;
|
||||||
|
- } else if (cparams.embeddings) {
|
||||||
|
- res = nullptr; // do not extract logits for embedding case
|
||||||
|
- embd = gf->nodes[gf->n_nodes - 1];
|
||||||
|
- if (strcmp(embd->name, "result_embd_pooled") != 0) {
|
||||||
|
- embd = gf->nodes[gf->n_nodes - 2];
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (cparams.embeddings) {
|
||||||
|
+ for (int i = gf->n_nodes - 1; i >= 0; --i) {
|
||||||
|
+ embd = gf->nodes[i];
|
||||||
|
+ if (strcmp(embd->name, "result_embd_pooled") == 0) {
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0 && "missing embeddings tensor");
|
||||||
|
- } else {
|
||||||
|
+ } else {
|
||||||
|
embd = nullptr; // do not extract embeddings when not needed
|
||||||
|
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+ if (!cparams.causal_attn) {
|
||||||
|
+ res = nullptr; // do not extract logits when not needed
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
|
||||||
|
|
||||||
|
ggml_backend_sched_alloc_graph(lctx.sched, gf);
|
|
@ -1,305 +0,0 @@
|
||||||
From 5cadb45f39d001ffbad95b690d6cf0abcb4a6d96 Mon Sep 17 00:00:00 2001
|
|
||||||
From: Ollama maintainers <hello@ollama.com>
|
|
||||||
Date: Wed, 26 Jun 2024 16:18:09 -0700
|
|
||||||
Subject: [PATCH] Architecture support
|
|
||||||
|
|
||||||
---
|
|
||||||
llama.cpp | 194 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
|
|
||||||
1 file changed, 193 insertions(+), 1 deletion(-)
|
|
||||||
|
|
||||||
diff --git a/llama.cpp b/llama.cpp
|
|
||||||
index 61948751..3b4196f5 100644
|
|
||||||
--- a/llama.cpp
|
|
||||||
+++ b/llama.cpp
|
|
||||||
@@ -217,6 +217,7 @@ enum llm_arch {
|
|
||||||
LLM_ARCH_INTERNLM2,
|
|
||||||
LLM_ARCH_MINICPM,
|
|
||||||
LLM_ARCH_GEMMA,
|
|
||||||
+ LLM_ARCH_GEMMA2,
|
|
||||||
LLM_ARCH_STARCODER2,
|
|
||||||
LLM_ARCH_MAMBA,
|
|
||||||
LLM_ARCH_XVERSE,
|
|
||||||
@@ -255,6 +256,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
|
||||||
{ LLM_ARCH_INTERNLM2, "internlm2" },
|
|
||||||
{ LLM_ARCH_MINICPM, "minicpm" },
|
|
||||||
{ LLM_ARCH_GEMMA, "gemma" },
|
|
||||||
+ { LLM_ARCH_GEMMA2, "gemma2" },
|
|
||||||
{ LLM_ARCH_STARCODER2, "starcoder2" },
|
|
||||||
{ LLM_ARCH_MAMBA, "mamba" },
|
|
||||||
{ LLM_ARCH_XVERSE, "xverse" },
|
|
||||||
@@ -464,10 +466,12 @@ enum llm_tensor {
|
|
||||||
LLM_TENSOR_ATTN_NORM,
|
|
||||||
LLM_TENSOR_ATTN_NORM_2,
|
|
||||||
LLM_TENSOR_ATTN_OUT_NORM,
|
|
||||||
+ LLM_TENSOR_ATTN_POST_NORM,
|
|
||||||
LLM_TENSOR_ATTN_ROT_EMBD,
|
|
||||||
LLM_TENSOR_FFN_GATE_INP,
|
|
||||||
LLM_TENSOR_FFN_GATE_INP_SHEXP,
|
|
||||||
LLM_TENSOR_FFN_NORM,
|
|
||||||
+ LLM_TENSOR_FFN_POST_NORM,
|
|
||||||
LLM_TENSOR_FFN_GATE,
|
|
||||||
LLM_TENSOR_FFN_DOWN,
|
|
||||||
LLM_TENSOR_FFN_UP,
|
|
||||||
@@ -960,6 +964,24 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
|
||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
+ {
|
|
||||||
+ LLM_ARCH_GEMMA2,
|
|
||||||
+ {
|
|
||||||
+ { LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
|
||||||
+ { LLM_TENSOR_OUTPUT_NORM, "output_norm" },
|
|
||||||
+ { LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
|
|
||||||
+ { LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
|
||||||
+ { LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
|
||||||
+ { LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
|
||||||
+ { LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
|
||||||
+ { LLM_TENSOR_ATTN_POST_NORM, "blk.%d.post_attention_norm" },
|
|
||||||
+ { LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
|
|
||||||
+ { LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
|
||||||
+ { LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
|
||||||
+ { LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
|
||||||
+ { LLM_TENSOR_FFN_POST_NORM, "blk.%d.post_ffw_norm" },
|
|
||||||
+ },
|
|
||||||
+ },
|
|
||||||
{
|
|
||||||
LLM_ARCH_STARCODER2,
|
|
||||||
{
|
|
||||||
@@ -1941,6 +1963,8 @@ enum e_model {
|
|
||||||
MODEL_8x22B,
|
|
||||||
MODEL_16x12B,
|
|
||||||
MODEL_10B_128x3_66B,
|
|
||||||
+ MODEL_9B,
|
|
||||||
+ MODEL_27B,
|
|
||||||
};
|
|
||||||
|
|
||||||
static const size_t kiB = 1024;
|
|
||||||
@@ -2114,6 +2138,7 @@ struct llama_layer {
|
|
||||||
struct ggml_tensor * attn_out_norm_b;
|
|
||||||
struct ggml_tensor * attn_q_a_norm;
|
|
||||||
struct ggml_tensor * attn_kv_a_norm;
|
|
||||||
+ struct ggml_tensor * attn_post_norm;
|
|
||||||
|
|
||||||
// attention
|
|
||||||
struct ggml_tensor * wq;
|
|
||||||
@@ -2136,6 +2161,7 @@ struct llama_layer {
|
|
||||||
// normalization
|
|
||||||
struct ggml_tensor * ffn_norm;
|
|
||||||
struct ggml_tensor * ffn_norm_b;
|
|
||||||
+ struct ggml_tensor * ffn_post_norm;
|
|
||||||
struct ggml_tensor * layer_out_norm;
|
|
||||||
struct ggml_tensor * layer_out_norm_b;
|
|
||||||
struct ggml_tensor * ffn_norm_exps;
|
|
||||||
@@ -4529,6 +4555,16 @@ static void llm_load_hparams(
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
case LLM_ARCH_GEMMA:
|
|
||||||
+ {
|
|
||||||
+ ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
||||||
+
|
|
||||||
+ switch (hparams.n_layer) {
|
|
||||||
+ case 18: model.type = e_model::MODEL_9B; break;
|
|
||||||
+ case 28: model.type = e_model::MODEL_27B; break;
|
|
||||||
+ default: model.type = e_model::MODEL_UNKNOWN;
|
|
||||||
+ }
|
|
||||||
+ } break;
|
|
||||||
+ case LLM_ARCH_GEMMA2:
|
|
||||||
{
|
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
|
||||||
|
|
||||||
@@ -6305,6 +6341,40 @@ static bool llm_load_tensors(
|
|
||||||
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
|
||||||
}
|
|
||||||
} break;
|
|
||||||
+ case LLM_ARCH_GEMMA2:
|
|
||||||
+ {
|
|
||||||
+ model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
|
||||||
+
|
|
||||||
+ // output
|
|
||||||
+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
|
|
||||||
+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
|
|
||||||
+
|
|
||||||
+ const int64_t n_ff = hparams.n_ff;
|
|
||||||
+ const int64_t n_embd_head_k = hparams.n_embd_head_k;
|
|
||||||
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa();
|
|
||||||
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa();
|
|
||||||
+
|
|
||||||
+ for (uint32_t i = 0; i < n_layer; ++i) {
|
|
||||||
+ ggml_context * ctx_layer = ctx_for_layer(i);
|
|
||||||
+ ggml_context * ctx_split = ctx_for_layer_split(i);
|
|
||||||
+
|
|
||||||
+ auto & layer = model.layers[i];
|
|
||||||
+
|
|
||||||
+ layer.attn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd});
|
|
||||||
+
|
|
||||||
+ layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd_head_k * hparams.n_head});
|
|
||||||
+ layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_k_gqa});
|
|
||||||
+ layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_v_gqa});
|
|
||||||
+ layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd_head_k * hparams.n_head, n_embd});
|
|
||||||
+ layer.attn_post_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_POST_NORM, "weight", i), {n_embd});
|
|
||||||
+
|
|
||||||
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
|
|
||||||
+ layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
|
||||||
+ layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
|
||||||
+ layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
|
|
||||||
+ layer.ffn_post_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_POST_NORM, "weight", i), {n_embd});
|
|
||||||
+ }
|
|
||||||
+ } break;
|
|
||||||
case LLM_ARCH_STARCODER2:
|
|
||||||
{
|
|
||||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
|
||||||
@@ -10614,6 +10684,123 @@ struct llm_build_context {
|
|
||||||
return gf;
|
|
||||||
}
|
|
||||||
|
|
||||||
+ struct ggml_cgraph * build_gemma2() {
|
|
||||||
+ struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
|
||||||
+
|
|
||||||
+ const int64_t n_embd_head_k = hparams.n_embd_head_k;
|
|
||||||
+
|
|
||||||
+ struct ggml_tensor * cur;
|
|
||||||
+ struct ggml_tensor * inpL;
|
|
||||||
+
|
|
||||||
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
|
|
||||||
+
|
|
||||||
+ inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
|
|
||||||
+ cb(inpL, "inp_scaled", -1);
|
|
||||||
+
|
|
||||||
+ // inp_pos - contains the positions
|
|
||||||
+ struct ggml_tensor * inp_pos = build_inp_pos();
|
|
||||||
+
|
|
||||||
+ // KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
|
||||||
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
|
||||||
+
|
|
||||||
+ for (int il = 0; il < n_layer; ++il) {
|
|
||||||
+ // norm
|
|
||||||
+ cur = llm_build_norm(ctx0, inpL, hparams,
|
|
||||||
+ model.layers[il].attn_norm, NULL,
|
|
||||||
+ LLM_NORM_RMS, cb, il);
|
|
||||||
+ cb(cur, "attn_norm", il);
|
|
||||||
+
|
|
||||||
+ // self-attention
|
|
||||||
+ {
|
|
||||||
+ // compute Q and K and RoPE them
|
|
||||||
+ struct ggml_tensor * Qcur = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
|
|
||||||
+ cb(Qcur, "Qcur", il);
|
|
||||||
+
|
|
||||||
+ struct ggml_tensor * Kcur = ggml_mul_mat(ctx0, model.layers[il].wk, cur);
|
|
||||||
+ cb(Kcur, "Kcur", il);
|
|
||||||
+
|
|
||||||
+ struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
|
|
||||||
+ cb(Vcur, "Vcur", il);
|
|
||||||
+
|
|
||||||
+ Qcur = ggml_rope_ext(
|
|
||||||
+ ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head_k, n_head, n_tokens), inp_pos, nullptr,
|
|
||||||
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
||||||
+ ext_factor, attn_factor, beta_fast, beta_slow);
|
|
||||||
+ cb(Qcur, "Qcur", il);
|
|
||||||
+
|
|
||||||
+ Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(float(n_embd_head_k)));
|
|
||||||
+ cb(Qcur, "Qcur_scaled", il);
|
|
||||||
+
|
|
||||||
+ Kcur = ggml_rope_ext(
|
|
||||||
+ ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head_k, n_head_kv, n_tokens), inp_pos, nullptr,
|
|
||||||
+ n_embd_head_k, rope_type, n_ctx_orig, freq_base, freq_scale,
|
|
||||||
+ ext_factor, attn_factor, beta_fast, beta_slow);
|
|
||||||
+ cb(Kcur, "Kcur", il);
|
|
||||||
+
|
|
||||||
+ cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
|
||||||
+ model.layers[il].wo, NULL,
|
|
||||||
+ Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ if (il == n_layer - 1) {
|
|
||||||
+ // skip computing output for unused tokens
|
|
||||||
+ struct ggml_tensor * inp_out_ids = build_inp_out_ids();
|
|
||||||
+ cur = ggml_get_rows(ctx0, cur, inp_out_ids);
|
|
||||||
+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ cur = llm_build_norm(ctx0, cur, hparams,
|
|
||||||
+ model.layers[il].attn_post_norm, NULL,
|
|
||||||
+ LLM_NORM_RMS, cb, il);
|
|
||||||
+ cb(cur, "attn_post_norm", il);
|
|
||||||
+
|
|
||||||
+ struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
|
|
||||||
+ cb(sa_out, "sa_out", il);
|
|
||||||
+
|
|
||||||
+ cur = llm_build_norm(ctx0, sa_out, hparams,
|
|
||||||
+ model.layers[il].ffn_norm, NULL,
|
|
||||||
+ LLM_NORM_RMS, cb, il);
|
|
||||||
+ cb(cur, "ffn_norm", il);
|
|
||||||
+
|
|
||||||
+ // feed-forward network
|
|
||||||
+ {
|
|
||||||
+ cur = llm_build_ffn(ctx0, cur,
|
|
||||||
+ model.layers[il].ffn_up, NULL,
|
|
||||||
+ model.layers[il].ffn_gate, NULL,
|
|
||||||
+ model.layers[il].ffn_down, NULL,
|
|
||||||
+ NULL,
|
|
||||||
+ LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
|
|
||||||
+ cb(cur, "ffn_out", il);
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ cur = llm_build_norm(ctx0, cur, hparams,
|
|
||||||
+ model.layers[il].ffn_post_norm, NULL,
|
|
||||||
+ LLM_NORM_RMS, cb, -1);
|
|
||||||
+ cb(cur, "ffn_post_norm", -1);
|
|
||||||
+
|
|
||||||
+ cur = ggml_add(ctx0, cur, sa_out);
|
|
||||||
+ cb(cur, "l_out", il);
|
|
||||||
+
|
|
||||||
+ // input for next layer
|
|
||||||
+ inpL = cur;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
+ cur = inpL;
|
|
||||||
+
|
|
||||||
+ cur = llm_build_norm(ctx0, cur, hparams,
|
|
||||||
+ model.output_norm, NULL,
|
|
||||||
+ LLM_NORM_RMS, cb, -1);
|
|
||||||
+ cb(cur, "result_norm", -1);
|
|
||||||
+
|
|
||||||
+ // lm_head
|
|
||||||
+ cur = ggml_mul_mat(ctx0, model.output, cur);
|
|
||||||
+ cb(cur, "result_output", -1);
|
|
||||||
+
|
|
||||||
+ ggml_build_forward_expand(gf, cur);
|
|
||||||
+
|
|
||||||
+ return gf;
|
|
||||||
+ }
|
|
||||||
+
|
|
||||||
struct ggml_cgraph * build_starcoder2() {
|
|
||||||
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
|
|
||||||
|
|
||||||
@@ -11847,6 +12034,10 @@ static struct ggml_cgraph * llama_build_graph(
|
|
||||||
{
|
|
||||||
result = llm.build_gemma();
|
|
||||||
} break;
|
|
||||||
+ case LLM_ARCH_GEMMA2:
|
|
||||||
+ {
|
|
||||||
+ result = llm.build_gemma2();
|
|
||||||
+ } break;
|
|
||||||
case LLM_ARCH_STARCODER2:
|
|
||||||
{
|
|
||||||
result = llm.build_starcoder2();
|
|
||||||
@@ -16671,6 +16862,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
|
||||||
case LLM_ARCH_PHI2:
|
|
||||||
case LLM_ARCH_PHI3:
|
|
||||||
case LLM_ARCH_GEMMA:
|
|
||||||
+ case LLM_ARCH_GEMMA2:
|
|
||||||
case LLM_ARCH_STARCODER2:
|
|
||||||
case LLM_ARCH_GPTNEOX:
|
|
||||||
return LLAMA_ROPE_TYPE_NEOX;
|
|
||||||
@@ -18551,7 +18743,7 @@ static int32_t llama_chat_apply_template_internal(
|
|
||||||
if (add_ass) {
|
|
||||||
ss << "<s>assistant\n";
|
|
||||||
}
|
|
||||||
- } else if (tmpl == "gemma" || tmpl.find("<start_of_turn>") != std::string::npos) {
|
|
||||||
+ } else if (tmpl == "gemma" || tmpl == "gemma2" || tmpl.find("<start_of_turn>") != std::string::npos) {
|
|
||||||
// google/gemma-7b-it
|
|
||||||
std::string system_prompt = "";
|
|
||||||
for (auto message : chat) {
|
|
||||||
--
|
|
||||||
2.45.2
|
|
||||||
|
|
42
llm/patches/08-clip-unicode.diff
Normal file
42
llm/patches/08-clip-unicode.diff
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
|
||||||
|
index 95fbe3d0..5a02a6ec 100644
|
||||||
|
--- a/examples/llava/clip.cpp
|
||||||
|
+++ b/examples/llava/clip.cpp
|
||||||
|
@@ -32,6 +33,14 @@
|
||||||
|
#include <cinttypes>
|
||||||
|
#include <limits>
|
||||||
|
|
||||||
|
+#if defined(_WIN32)
|
||||||
|
+#define WIN32_LEAN_AND_MEAN
|
||||||
|
+#ifndef NOMINMAX
|
||||||
|
+ #define NOMINMAX
|
||||||
|
+#endif
|
||||||
|
+#include <windows.h>
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
//#define CLIP_DEBUG_FUNCTIONS
|
||||||
|
|
||||||
|
// RGB uint8 image
|
||||||
|
@@ -1055,7 +1064,22 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifdef _WIN32
|
||||||
|
+ int wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, NULL, 0);
|
||||||
|
+ if (!wlen) {
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+ wchar_t * wbuf = (wchar_t *) malloc(wlen * sizeof(wchar_t));
|
||||||
|
+ wlen = MultiByteToWideChar(CP_UTF8, 0, fname, -1, wbuf, wlen);
|
||||||
|
+ if (!wlen) {
|
||||||
|
+ free(wbuf);
|
||||||
|
+ return NULL;
|
||||||
|
+ }
|
||||||
|
+ auto fin = std::ifstream(wbuf, std::ios::binary);
|
||||||
|
+ free(wbuf);
|
||||||
|
+#else
|
||||||
|
auto fin = std::ifstream(fname, std::ios::binary);
|
||||||
|
+#endif
|
||||||
|
if (!fin) {
|
||||||
|
LOG_TEE("cannot open model file for loading tensors\n");
|
||||||
|
clip_free(new_clip);
|
60
llm/patches/09-pooling.diff
Normal file
60
llm/patches/09-pooling.diff
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
diff --git a/src/llama.cpp b/src/llama.cpp
|
||||||
|
index 721b8f4e..cfe7ac40 100644
|
||||||
|
--- a/src/llama.cpp
|
||||||
|
+++ b/src/llama.cpp
|
||||||
|
@@ -8420,14 +8420,14 @@ struct llm_build_context {
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * build_inp_mean() {
|
||||||
|
- lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
||||||
|
+ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, cparams.n_seq_max);
|
||||||
|
cb(lctx.inp_mean, "inp_mean", -1);
|
||||||
|
ggml_set_input(lctx.inp_mean);
|
||||||
|
return lctx.inp_mean;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * build_inp_cls() {
|
||||||
|
- lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
||||||
|
+ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, cparams.n_seq_max);
|
||||||
|
cb(lctx.inp_cls, "inp_cls", -1);
|
||||||
|
ggml_set_input(lctx.inp_cls);
|
||||||
|
return lctx.inp_cls;
|
||||||
|
@@ -13847,19 +13847,16 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
|
||||||
|
|
||||||
|
float * data = (float *) lctx.inp_mean->data;
|
||||||
|
- memset(lctx.inp_mean->data, 0, n_tokens * n_tokens * ggml_element_size(lctx.inp_mean));
|
||||||
|
+ memset(lctx.inp_mean->data, 0, n_tokens * cparams.n_seq_max * ggml_element_size(lctx.inp_mean));
|
||||||
|
|
||||||
|
std::vector<uint64_t> sum(n_tokens, 0);
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
-
|
||||||
|
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == MEAN");
|
||||||
|
-
|
||||||
|
sum[seq_id] += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
- std::vector<float> div(n_tokens, 0.0f);
|
||||||
|
- for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
+ std::vector<float> div(cparams.n_seq_max, 0.0f);
|
||||||
|
+ for (uint32_t i = 0; i < cparams.n_seq_max; ++i) {
|
||||||
|
const uint64_t s = sum[i];
|
||||||
|
if (s > 0) {
|
||||||
|
div[i] = 1.0f/float(s);
|
||||||
|
@@ -13879,14 +13876,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
|
||||||
|
|
||||||
|
uint32_t * data = (uint32_t *) lctx.inp_cls->data;
|
||||||
|
- memset(lctx.inp_cls->data, 0, n_tokens * ggml_element_size(lctx.inp_cls));
|
||||||
|
+ memset(lctx.inp_cls->data, 0, cparams.n_seq_max * ggml_element_size(lctx.inp_cls));
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[i][0];
|
||||||
|
const llama_pos pos = batch.pos[i];
|
||||||
|
-
|
||||||
|
- GGML_ASSERT(seq_id < n_tokens && "seq_id cannot be larger than n_tokens with pooling_type == CLS");
|
||||||
|
-
|
||||||
|
if (pos == 0) {
|
||||||
|
data[seq_id] = i;
|
||||||
|
}
|
|
@ -38,7 +38,7 @@ func Init() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var variants []string
|
var variants []string
|
||||||
for v := range availableServers() {
|
for v := range getAvailableServers() {
|
||||||
variants = append(variants, v)
|
variants = append(variants, v)
|
||||||
}
|
}
|
||||||
slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants))
|
slog.Info(fmt.Sprintf("Dynamic LLM libraries %v", variants))
|
||||||
|
@ -50,7 +50,7 @@ func Init() error {
|
||||||
// binary names may contain an optional variant separated by '_'
|
// binary names may contain an optional variant separated by '_'
|
||||||
// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2"
|
// For example, "ollama_rocm_v6" and "ollama_rocm_v5" or "ollama_cpu" and "ollama_cpu_avx2"
|
||||||
// Any library without a variant is the lowest common denominator
|
// Any library without a variant is the lowest common denominator
|
||||||
func availableServers() map[string]string {
|
func getAvailableServers() map[string]string {
|
||||||
payloadsDir, err := gpu.PayloadsDir()
|
payloadsDir, err := gpu.PayloadsDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("payload lookup error", "error", err)
|
slog.Error("payload lookup error", "error", err)
|
||||||
|
@ -80,7 +80,7 @@ func availableServers() map[string]string {
|
||||||
// TODO - switch to metadata based mapping
|
// TODO - switch to metadata based mapping
|
||||||
func serversForGpu(info gpu.GpuInfo) []string {
|
func serversForGpu(info gpu.GpuInfo) []string {
|
||||||
// glob workDir for files that start with ollama_
|
// glob workDir for files that start with ollama_
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
requested := info.Library
|
requested := info.Library
|
||||||
if info.Variant != gpu.CPUCapabilityNone {
|
if info.Variant != gpu.CPUCapabilityNone {
|
||||||
requested += "_" + info.Variant.String()
|
requested += "_" + info.Variant.String()
|
||||||
|
@ -115,6 +115,7 @@ func serversForGpu(info gpu.GpuInfo) []string {
|
||||||
servers = append(servers, alt...)
|
servers = append(servers, alt...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !(runtime.GOOS == "darwin" && runtime.GOARCH == "arm64") {
|
||||||
// Load up the best CPU variant if not primary requested
|
// Load up the best CPU variant if not primary requested
|
||||||
if info.Library != "cpu" {
|
if info.Library != "cpu" {
|
||||||
variant := gpu.GetCPUCapability()
|
variant := gpu.GetCPUCapability()
|
||||||
|
@ -137,6 +138,7 @@ func serversForGpu(info gpu.GpuInfo) []string {
|
||||||
if len(servers) == 0 {
|
if len(servers) == 0 {
|
||||||
servers = []string{"cpu"}
|
servers = []string{"cpu"}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
|
@ -147,7 +149,7 @@ func serverForCpu() string {
|
||||||
return "metal"
|
return "metal"
|
||||||
}
|
}
|
||||||
variant := gpu.GetCPUCapability()
|
variant := gpu.GetCPUCapability()
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
if variant != gpu.CPUCapabilityNone {
|
if variant != gpu.CPUCapabilityNone {
|
||||||
for cmp := range availableServers {
|
for cmp := range availableServers {
|
||||||
if cmp == "cpu_"+variant.String() {
|
if cmp == "cpu_"+variant.String() {
|
||||||
|
|
|
@ -82,7 +82,7 @@ func LoadModel(model string, maxArraySize int) (*GGML, error) {
|
||||||
|
|
||||||
// NewLlamaServer will run a server for the given GPUs
|
// NewLlamaServer will run a server for the given GPUs
|
||||||
// The gpu list must be a single family.
|
// The gpu list must be a single family.
|
||||||
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options) (LlamaServer, error) {
|
func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, projectors []string, opts api.Options, numParallel int) (LlamaServer, error) {
|
||||||
var err error
|
var err error
|
||||||
var cpuRunner string
|
var cpuRunner string
|
||||||
var estimate MemoryEstimate
|
var estimate MemoryEstimate
|
||||||
|
@ -131,7 +131,20 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
return nil, errors.New("ollama supports only one lora adapter, but multiple were provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
availableServers := availableServers()
|
availableServers := getAvailableServers()
|
||||||
|
if len(availableServers) == 0 {
|
||||||
|
if runtime.GOOS != "windows" {
|
||||||
|
slog.Warn("llama server binary disappeared, reinitializing payloads")
|
||||||
|
err = Init()
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("failed to reinitialize payloads", "error", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
availableServers = getAvailableServers()
|
||||||
|
} else {
|
||||||
|
return nil, finalErr
|
||||||
|
}
|
||||||
|
}
|
||||||
var servers []string
|
var servers []string
|
||||||
if cpuRunner != "" {
|
if cpuRunner != "" {
|
||||||
servers = []string{cpuRunner}
|
servers = []string{cpuRunner}
|
||||||
|
@ -208,7 +221,8 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
if g.Library == "metal" &&
|
if g.Library == "metal" &&
|
||||||
uint64(opts.NumGPU) > 0 &&
|
uint64(opts.NumGPU) > 0 &&
|
||||||
uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
uint64(opts.NumGPU) < ggml.KV().BlockCount()+1 {
|
||||||
opts.UseMMap = api.TriStateFalse
|
opts.UseMMap = new(bool)
|
||||||
|
*opts.UseMMap = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,9 +232,11 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
|
|
||||||
// Windows CUDA should not use mmap for best performance
|
// Windows CUDA should not use mmap for best performance
|
||||||
// Linux with a model larger than free space, mmap leads to thrashing
|
// Linux with a model larger than free space, mmap leads to thrashing
|
||||||
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == api.TriStateUndefined) ||
|
// For CPU loads we want the memory to be allocated, not FS cache
|
||||||
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == api.TriStateUndefined) ||
|
if (runtime.GOOS == "windows" && gpus[0].Library == "cuda" && opts.UseMMap == nil) ||
|
||||||
opts.UseMMap == api.TriStateFalse {
|
(runtime.GOOS == "linux" && systemFreeMemory < estimate.TotalSize && opts.UseMMap == nil) ||
|
||||||
|
(gpus[0].Library == "cpu" && opts.UseMMap == nil) ||
|
||||||
|
(opts.UseMMap != nil && !*opts.UseMMap) {
|
||||||
params = append(params, "--no-mmap")
|
params = append(params, "--no-mmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,15 +248,6 @@ func NewLlamaServer(gpus gpu.GpuInfoList, model string, ggml *GGML, adapters, pr
|
||||||
params = append(params, "--numa")
|
params = append(params, "--numa")
|
||||||
}
|
}
|
||||||
|
|
||||||
numParallel := envconfig.NumParallel
|
|
||||||
|
|
||||||
// TODO (jmorganca): multimodal models don't support parallel yet
|
|
||||||
// see https://github.com/ollama/ollama/issues/4165
|
|
||||||
if len(projectors) > 0 {
|
|
||||||
numParallel = 1
|
|
||||||
slog.Warn("multimodal models don't support parallel requests yet")
|
|
||||||
}
|
|
||||||
|
|
||||||
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
params = append(params, "--parallel", fmt.Sprintf("%d", numParallel))
|
||||||
|
|
||||||
if estimate.TensorSplit != "" {
|
if estimate.TensorSplit != "" {
|
||||||
|
@ -567,6 +574,9 @@ func (s *llmServer) WaitUntilRunning(ctx context.Context) error {
|
||||||
if s.status != nil && s.status.LastErrMsg != "" {
|
if s.status != nil && s.status.LastErrMsg != "" {
|
||||||
msg = s.status.LastErrMsg
|
msg = s.status.LastErrMsg
|
||||||
}
|
}
|
||||||
|
if strings.Contains(msg, "unknown model") {
|
||||||
|
return fmt.Errorf("this model is not supported by your version of Ollama. You may need to upgrade")
|
||||||
|
}
|
||||||
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
return fmt.Errorf("llama runner process has terminated: %v %s", err, msg)
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ var errorPrefixes = []string{
|
||||||
"CUDA error",
|
"CUDA error",
|
||||||
"cudaMalloc failed",
|
"cudaMalloc failed",
|
||||||
"\"ERR\"",
|
"\"ERR\"",
|
||||||
|
"error loading model",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *StatusWriter) Write(b []byte) (int, error) {
|
func (w *StatusWriter) Write(b []byte) (int, error) {
|
||||||
|
|
382
openai/openai.go
382
openai/openai.go
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/types/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Error struct {
|
type Error struct {
|
||||||
|
@ -42,6 +43,12 @@ type ChunkChoice struct {
|
||||||
FinishReason *string `json:"finish_reason"`
|
FinishReason *string `json:"finish_reason"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CompleteChunkChoice struct {
|
||||||
|
Text string `json:"text"`
|
||||||
|
Index int `json:"index"`
|
||||||
|
FinishReason *string `json:"finish_reason"`
|
||||||
|
}
|
||||||
|
|
||||||
type Usage struct {
|
type Usage struct {
|
||||||
PromptTokens int `json:"prompt_tokens"`
|
PromptTokens int `json:"prompt_tokens"`
|
||||||
CompletionTokens int `json:"completion_tokens"`
|
CompletionTokens int `json:"completion_tokens"`
|
||||||
|
@ -85,6 +92,51 @@ type ChatCompletionChunk struct {
|
||||||
Choices []ChunkChoice `json:"choices"`
|
Choices []ChunkChoice `json:"choices"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO (https://github.com/ollama/ollama/issues/5259): support []string, []int and [][]int
|
||||||
|
type CompletionRequest struct {
|
||||||
|
Model string `json:"model"`
|
||||||
|
Prompt string `json:"prompt"`
|
||||||
|
FrequencyPenalty float32 `json:"frequency_penalty"`
|
||||||
|
MaxTokens *int `json:"max_tokens"`
|
||||||
|
PresencePenalty float32 `json:"presence_penalty"`
|
||||||
|
Seed *int `json:"seed"`
|
||||||
|
Stop any `json:"stop"`
|
||||||
|
Stream bool `json:"stream"`
|
||||||
|
Temperature *float32 `json:"temperature"`
|
||||||
|
TopP float32 `json:"top_p"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Completion struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
Model string `json:"model"`
|
||||||
|
SystemFingerprint string `json:"system_fingerprint"`
|
||||||
|
Choices []CompleteChunkChoice `json:"choices"`
|
||||||
|
Usage Usage `json:"usage,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CompletionChunk struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
Choices []CompleteChunkChoice `json:"choices"`
|
||||||
|
Model string `json:"model"`
|
||||||
|
SystemFingerprint string `json:"system_fingerprint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Model struct {
|
||||||
|
Id string `json:"id"`
|
||||||
|
Object string `json:"object"`
|
||||||
|
Created int64 `json:"created"`
|
||||||
|
OwnedBy string `json:"owned_by"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ListCompletion struct {
|
||||||
|
Object string `json:"object"`
|
||||||
|
Data []Model `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
func NewError(code int, message string) ErrorResponse {
|
func NewError(code int, message string) ErrorResponse {
|
||||||
var etype string
|
var etype string
|
||||||
switch code {
|
switch code {
|
||||||
|
@ -145,7 +197,79 @@ func toChunk(id string, r api.ChatResponse) ChatCompletionChunk {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
func toCompletion(id string, r api.GenerateResponse) Completion {
|
||||||
|
return Completion{
|
||||||
|
Id: id,
|
||||||
|
Object: "text_completion",
|
||||||
|
Created: r.CreatedAt.Unix(),
|
||||||
|
Model: r.Model,
|
||||||
|
SystemFingerprint: "fp_ollama",
|
||||||
|
Choices: []CompleteChunkChoice{{
|
||||||
|
Text: r.Response,
|
||||||
|
Index: 0,
|
||||||
|
FinishReason: func(reason string) *string {
|
||||||
|
if len(reason) > 0 {
|
||||||
|
return &reason
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(r.DoneReason),
|
||||||
|
}},
|
||||||
|
Usage: Usage{
|
||||||
|
// TODO: ollama returns 0 for prompt eval if the prompt was cached, but openai returns the actual count
|
||||||
|
PromptTokens: r.PromptEvalCount,
|
||||||
|
CompletionTokens: r.EvalCount,
|
||||||
|
TotalTokens: r.PromptEvalCount + r.EvalCount,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toCompleteChunk(id string, r api.GenerateResponse) CompletionChunk {
|
||||||
|
return CompletionChunk{
|
||||||
|
Id: id,
|
||||||
|
Object: "text_completion",
|
||||||
|
Created: time.Now().Unix(),
|
||||||
|
Model: r.Model,
|
||||||
|
SystemFingerprint: "fp_ollama",
|
||||||
|
Choices: []CompleteChunkChoice{{
|
||||||
|
Text: r.Response,
|
||||||
|
Index: 0,
|
||||||
|
FinishReason: func(reason string) *string {
|
||||||
|
if len(reason) > 0 {
|
||||||
|
return &reason
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}(r.DoneReason),
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toListCompletion(r api.ListResponse) ListCompletion {
|
||||||
|
var data []Model
|
||||||
|
for _, m := range r.Models {
|
||||||
|
data = append(data, Model{
|
||||||
|
Id: m.Name,
|
||||||
|
Object: "model",
|
||||||
|
Created: m.ModifiedAt.Unix(),
|
||||||
|
OwnedBy: model.ParseName(m.Name).Namespace,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ListCompletion{
|
||||||
|
Object: "list",
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toModel(r api.ShowResponse, m string) Model {
|
||||||
|
return Model{
|
||||||
|
Id: m,
|
||||||
|
Object: "model",
|
||||||
|
Created: r.ModifiedAt.Unix(),
|
||||||
|
OwnedBy: model.ParseName(m).Namespace,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromChatRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||||
var messages []api.Message
|
var messages []api.Message
|
||||||
for _, msg := range r.Messages {
|
for _, msg := range r.Messages {
|
||||||
messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content})
|
messages = append(messages, api.Message{Role: msg.Role, Content: msg.Content})
|
||||||
|
@ -156,7 +280,7 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||||
switch stop := r.Stop.(type) {
|
switch stop := r.Stop.(type) {
|
||||||
case string:
|
case string:
|
||||||
options["stop"] = []string{stop}
|
options["stop"] = []string{stop}
|
||||||
case []interface{}:
|
case []any:
|
||||||
var stops []string
|
var stops []string
|
||||||
for _, s := range stop {
|
for _, s := range stop {
|
||||||
if str, ok := s.(string); ok {
|
if str, ok := s.(string); ok {
|
||||||
|
@ -208,13 +332,78 @@ func fromRequest(r ChatCompletionRequest) api.ChatRequest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type writer struct {
|
func fromCompleteRequest(r CompletionRequest) (api.GenerateRequest, error) {
|
||||||
stream bool
|
options := make(map[string]any)
|
||||||
id string
|
|
||||||
|
switch stop := r.Stop.(type) {
|
||||||
|
case string:
|
||||||
|
options["stop"] = []string{stop}
|
||||||
|
case []string:
|
||||||
|
options["stop"] = stop
|
||||||
|
default:
|
||||||
|
if r.Stop != nil {
|
||||||
|
return api.GenerateRequest{}, fmt.Errorf("invalid type for 'stop' field: %T", r.Stop)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.MaxTokens != nil {
|
||||||
|
options["num_predict"] = *r.MaxTokens
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Temperature != nil {
|
||||||
|
options["temperature"] = *r.Temperature * 2.0
|
||||||
|
} else {
|
||||||
|
options["temperature"] = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Seed != nil {
|
||||||
|
options["seed"] = *r.Seed
|
||||||
|
}
|
||||||
|
|
||||||
|
options["frequency_penalty"] = r.FrequencyPenalty * 2.0
|
||||||
|
|
||||||
|
options["presence_penalty"] = r.PresencePenalty * 2.0
|
||||||
|
|
||||||
|
if r.TopP != 0.0 {
|
||||||
|
options["top_p"] = r.TopP
|
||||||
|
} else {
|
||||||
|
options["top_p"] = 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
return api.GenerateRequest{
|
||||||
|
Model: r.Model,
|
||||||
|
Prompt: r.Prompt,
|
||||||
|
Options: options,
|
||||||
|
Stream: &r.Stream,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type BaseWriter struct {
|
||||||
gin.ResponseWriter
|
gin.ResponseWriter
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) writeError(code int, data []byte) (int, error) {
|
type ChatWriter struct {
|
||||||
|
stream bool
|
||||||
|
id string
|
||||||
|
BaseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
type CompleteWriter struct {
|
||||||
|
stream bool
|
||||||
|
id string
|
||||||
|
BaseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
type ListWriter struct {
|
||||||
|
BaseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
type RetrieveWriter struct {
|
||||||
|
BaseWriter
|
||||||
|
model string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *BaseWriter) writeError(code int, data []byte) (int, error) {
|
||||||
var serr api.StatusError
|
var serr api.StatusError
|
||||||
err := json.Unmarshal(data, &serr)
|
err := json.Unmarshal(data, &serr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -230,7 +419,7 @@ func (w *writer) writeError(code int, data []byte) (int, error) {
|
||||||
return len(data), nil
|
return len(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) writeResponse(data []byte) (int, error) {
|
func (w *ChatWriter) writeResponse(data []byte) (int, error) {
|
||||||
var chatResponse api.ChatResponse
|
var chatResponse api.ChatResponse
|
||||||
err := json.Unmarshal(data, &chatResponse)
|
err := json.Unmarshal(data, &chatResponse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -270,7 +459,7 @@ func (w *writer) writeResponse(data []byte) (int, error) {
|
||||||
return len(data), nil
|
return len(data), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *writer) Write(data []byte) (int, error) {
|
func (w *ChatWriter) Write(data []byte) (int, error) {
|
||||||
code := w.ResponseWriter.Status()
|
code := w.ResponseWriter.Status()
|
||||||
if code != http.StatusOK {
|
if code != http.StatusOK {
|
||||||
return w.writeError(code, data)
|
return w.writeError(code, data)
|
||||||
|
@ -279,7 +468,176 @@ func (w *writer) Write(data []byte) (int, error) {
|
||||||
return w.writeResponse(data)
|
return w.writeResponse(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func Middleware() gin.HandlerFunc {
|
func (w *CompleteWriter) writeResponse(data []byte) (int, error) {
|
||||||
|
var generateResponse api.GenerateResponse
|
||||||
|
err := json.Unmarshal(data, &generateResponse)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// completion chunk
|
||||||
|
if w.stream {
|
||||||
|
d, err := json.Marshal(toCompleteChunk(w.id, generateResponse))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.ResponseWriter.Header().Set("Content-Type", "text/event-stream")
|
||||||
|
_, err = w.ResponseWriter.Write([]byte(fmt.Sprintf("data: %s\n\n", d)))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if generateResponse.Done {
|
||||||
|
_, err = w.ResponseWriter.Write([]byte("data: [DONE]\n\n"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// completion
|
||||||
|
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||||
|
err = json.NewEncoder(w.ResponseWriter).Encode(toCompletion(w.id, generateResponse))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *CompleteWriter) Write(data []byte) (int, error) {
|
||||||
|
code := w.ResponseWriter.Status()
|
||||||
|
if code != http.StatusOK {
|
||||||
|
return w.writeError(code, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.writeResponse(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ListWriter) writeResponse(data []byte) (int, error) {
|
||||||
|
var listResponse api.ListResponse
|
||||||
|
err := json.Unmarshal(data, &listResponse)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||||
|
err = json.NewEncoder(w.ResponseWriter).Encode(toListCompletion(listResponse))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *ListWriter) Write(data []byte) (int, error) {
|
||||||
|
code := w.ResponseWriter.Status()
|
||||||
|
if code != http.StatusOK {
|
||||||
|
return w.writeError(code, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.writeResponse(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *RetrieveWriter) writeResponse(data []byte) (int, error) {
|
||||||
|
var showResponse api.ShowResponse
|
||||||
|
err := json.Unmarshal(data, &showResponse)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// retrieve completion
|
||||||
|
w.ResponseWriter.Header().Set("Content-Type", "application/json")
|
||||||
|
err = json.NewEncoder(w.ResponseWriter).Encode(toModel(showResponse, w.model))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *RetrieveWriter) Write(data []byte) (int, error) {
|
||||||
|
code := w.ResponseWriter.Status()
|
||||||
|
if code != http.StatusOK {
|
||||||
|
return w.writeError(code, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.writeResponse(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ListMiddleware() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
w := &ListWriter{
|
||||||
|
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Writer = w
|
||||||
|
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func RetrieveMiddleware() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
var b bytes.Buffer
|
||||||
|
if err := json.NewEncoder(&b).Encode(api.ShowRequest{Name: c.Param("model")}); err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Request.Body = io.NopCloser(&b)
|
||||||
|
|
||||||
|
// response writer
|
||||||
|
w := &RetrieveWriter{
|
||||||
|
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||||
|
model: c.Param("model"),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Writer = w
|
||||||
|
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func CompletionsMiddleware() gin.HandlerFunc {
|
||||||
|
return func(c *gin.Context) {
|
||||||
|
var req CompletionRequest
|
||||||
|
err := c.ShouldBindJSON(&req)
|
||||||
|
if err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
genReq, err := fromCompleteRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusBadRequest, NewError(http.StatusBadRequest, err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewEncoder(&b).Encode(genReq); err != nil {
|
||||||
|
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Request.Body = io.NopCloser(&b)
|
||||||
|
|
||||||
|
w := &CompleteWriter{
|
||||||
|
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||||
|
stream: req.Stream,
|
||||||
|
id: fmt.Sprintf("cmpl-%d", rand.Intn(999)),
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Writer = w
|
||||||
|
|
||||||
|
c.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ChatMiddleware() gin.HandlerFunc {
|
||||||
return func(c *gin.Context) {
|
return func(c *gin.Context) {
|
||||||
var req ChatCompletionRequest
|
var req ChatCompletionRequest
|
||||||
err := c.ShouldBindJSON(&req)
|
err := c.ShouldBindJSON(&req)
|
||||||
|
@ -294,15 +652,15 @@ func Middleware() gin.HandlerFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
if err := json.NewEncoder(&b).Encode(fromRequest(req)); err != nil {
|
if err := json.NewEncoder(&b).Encode(fromChatRequest(req)); err != nil {
|
||||||
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
c.AbortWithStatusJSON(http.StatusInternalServerError, NewError(http.StatusInternalServerError, err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Request.Body = io.NopCloser(&b)
|
c.Request.Body = io.NopCloser(&b)
|
||||||
|
|
||||||
w := &writer{
|
w := &ChatWriter{
|
||||||
ResponseWriter: c.Writer,
|
BaseWriter: BaseWriter{ResponseWriter: c.Writer},
|
||||||
stream: req.Stream,
|
stream: req.Stream,
|
||||||
id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
|
id: fmt.Sprintf("chatcmpl-%d", rand.Intn(999)),
|
||||||
}
|
}
|
||||||
|
|
298
openai/openai_test.go
Normal file
298
openai/openai_test.go
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
package openai
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMiddleware(t *testing.T) {
|
||||||
|
type testCase struct {
|
||||||
|
Name string
|
||||||
|
Method string
|
||||||
|
Path string
|
||||||
|
TestPath string
|
||||||
|
Handler func() gin.HandlerFunc
|
||||||
|
Endpoint func(c *gin.Context)
|
||||||
|
Setup func(t *testing.T, req *http.Request)
|
||||||
|
Expected func(t *testing.T, resp *httptest.ResponseRecorder)
|
||||||
|
}
|
||||||
|
|
||||||
|
testCases := []testCase{
|
||||||
|
{
|
||||||
|
Name: "chat handler",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/api/chat",
|
||||||
|
TestPath: "/api/chat",
|
||||||
|
Handler: ChatMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
var chatReq api.ChatRequest
|
||||||
|
if err := c.ShouldBindJSON(&chatReq); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
userMessage := chatReq.Messages[0].Content
|
||||||
|
var assistantMessage string
|
||||||
|
|
||||||
|
switch userMessage {
|
||||||
|
case "Hello":
|
||||||
|
assistantMessage = "Hello!"
|
||||||
|
default:
|
||||||
|
assistantMessage = "I'm not sure how to respond to that."
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, api.ChatResponse{
|
||||||
|
Message: api.Message{
|
||||||
|
Role: "assistant",
|
||||||
|
Content: assistantMessage,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Setup: func(t *testing.T, req *http.Request) {
|
||||||
|
body := ChatCompletionRequest{
|
||||||
|
Model: "test-model",
|
||||||
|
Messages: []Message{{Role: "user", Content: "Hello"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := json.Marshal(body)
|
||||||
|
|
||||||
|
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
assert.Equal(t, http.StatusOK, resp.Code)
|
||||||
|
|
||||||
|
var chatResp ChatCompletion
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&chatResp); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if chatResp.Object != "chat.completion" {
|
||||||
|
t.Fatalf("expected chat.completion, got %s", chatResp.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
if chatResp.Choices[0].Message.Content != "Hello!" {
|
||||||
|
t.Fatalf("expected Hello!, got %s", chatResp.Choices[0].Message.Content)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "completions handler",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/api/generate",
|
||||||
|
TestPath: "/api/generate",
|
||||||
|
Handler: CompletionsMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, api.GenerateResponse{
|
||||||
|
Response: "Hello!",
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Setup: func(t *testing.T, req *http.Request) {
|
||||||
|
body := CompletionRequest{
|
||||||
|
Model: "test-model",
|
||||||
|
Prompt: "Hello",
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := json.Marshal(body)
|
||||||
|
|
||||||
|
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
assert.Equal(t, http.StatusOK, resp.Code)
|
||||||
|
var completionResp Completion
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if completionResp.Object != "text_completion" {
|
||||||
|
t.Fatalf("expected text_completion, got %s", completionResp.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
if completionResp.Choices[0].Text != "Hello!" {
|
||||||
|
t.Fatalf("expected Hello!, got %s", completionResp.Choices[0].Text)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "completions handler with params",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/api/generate",
|
||||||
|
TestPath: "/api/generate",
|
||||||
|
Handler: CompletionsMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
var generateReq api.GenerateRequest
|
||||||
|
if err := c.ShouldBindJSON(&generateReq); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
temperature := generateReq.Options["temperature"].(float64)
|
||||||
|
var assistantMessage string
|
||||||
|
|
||||||
|
switch temperature {
|
||||||
|
case 1.6:
|
||||||
|
assistantMessage = "Received temperature of 1.6"
|
||||||
|
default:
|
||||||
|
assistantMessage = fmt.Sprintf("Received temperature of %f", temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.JSON(http.StatusOK, api.GenerateResponse{
|
||||||
|
Response: assistantMessage,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Setup: func(t *testing.T, req *http.Request) {
|
||||||
|
temp := float32(0.8)
|
||||||
|
body := CompletionRequest{
|
||||||
|
Model: "test-model",
|
||||||
|
Prompt: "Hello",
|
||||||
|
Temperature: &temp,
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := json.Marshal(body)
|
||||||
|
|
||||||
|
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
assert.Equal(t, http.StatusOK, resp.Code)
|
||||||
|
var completionResp Completion
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&completionResp); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if completionResp.Object != "text_completion" {
|
||||||
|
t.Fatalf("expected text_completion, got %s", completionResp.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
if completionResp.Choices[0].Text != "Received temperature of 1.6" {
|
||||||
|
t.Fatalf("expected Received temperature of 1.6, got %s", completionResp.Choices[0].Text)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "completions handler with error",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
Path: "/api/generate",
|
||||||
|
TestPath: "/api/generate",
|
||||||
|
Handler: CompletionsMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusBadRequest, gin.H{"error": "invalid request"})
|
||||||
|
},
|
||||||
|
Setup: func(t *testing.T, req *http.Request) {
|
||||||
|
body := CompletionRequest{
|
||||||
|
Model: "test-model",
|
||||||
|
Prompt: "Hello",
|
||||||
|
}
|
||||||
|
|
||||||
|
bodyBytes, _ := json.Marshal(body)
|
||||||
|
|
||||||
|
req.Body = io.NopCloser(bytes.NewReader(bodyBytes))
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
if resp.Code != http.StatusBadRequest {
|
||||||
|
t.Fatalf("expected 400, got %d", resp.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(resp.Body.String(), `"invalid request"`) {
|
||||||
|
t.Fatalf("error was not forwarded")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "list handler",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/api/tags",
|
||||||
|
TestPath: "/api/tags",
|
||||||
|
Handler: ListMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, api.ListResponse{
|
||||||
|
Models: []api.ListModelResponse{
|
||||||
|
{
|
||||||
|
Name: "Test Model",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
assert.Equal(t, http.StatusOK, resp.Code)
|
||||||
|
|
||||||
|
var listResp ListCompletion
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if listResp.Object != "list" {
|
||||||
|
t.Fatalf("expected list, got %s", listResp.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(listResp.Data) != 1 {
|
||||||
|
t.Fatalf("expected 1, got %d", len(listResp.Data))
|
||||||
|
}
|
||||||
|
|
||||||
|
if listResp.Data[0].Id != "Test Model" {
|
||||||
|
t.Fatalf("expected Test Model, got %s", listResp.Data[0].Id)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "retrieve model",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/api/show/:model",
|
||||||
|
TestPath: "/api/show/test-model",
|
||||||
|
Handler: RetrieveMiddleware,
|
||||||
|
Endpoint: func(c *gin.Context) {
|
||||||
|
c.JSON(http.StatusOK, api.ShowResponse{
|
||||||
|
ModifiedAt: time.Date(2024, 6, 17, 13, 45, 0, 0, time.UTC),
|
||||||
|
})
|
||||||
|
},
|
||||||
|
Expected: func(t *testing.T, resp *httptest.ResponseRecorder) {
|
||||||
|
var retrieveResp Model
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&retrieveResp); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retrieveResp.Object != "model" {
|
||||||
|
t.Fatalf("Expected object to be model, got %s", retrieveResp.Object)
|
||||||
|
}
|
||||||
|
|
||||||
|
if retrieveResp.Id != "test-model" {
|
||||||
|
t.Fatalf("Expected id to be test-model, got %s", retrieveResp.Id)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
gin.SetMode(gin.TestMode)
|
||||||
|
router := gin.New()
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.Name, func(t *testing.T) {
|
||||||
|
router = gin.New()
|
||||||
|
router.Use(tc.Handler())
|
||||||
|
router.Handle(tc.Method, tc.Path, tc.Endpoint)
|
||||||
|
req, _ := http.NewRequest(tc.Method, tc.TestPath, nil)
|
||||||
|
|
||||||
|
if tc.Setup != nil {
|
||||||
|
tc.Setup(t, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
router.ServeHTTP(resp, req)
|
||||||
|
|
||||||
|
tc.Expected(t, resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -124,7 +124,7 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||||
case stateComment, stateNil:
|
case stateComment, stateNil:
|
||||||
// pass
|
// pass
|
||||||
case stateValue:
|
case stateValue:
|
||||||
s, ok := unquote(b.String())
|
s, ok := unquote(strings.TrimSpace(b.String()))
|
||||||
if !ok || isSpace(r) {
|
if !ok || isSpace(r) {
|
||||||
if _, err := b.WriteRune(r); err != nil {
|
if _, err := b.WriteRune(r); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -158,7 +158,7 @@ func ParseFile(r io.Reader) (*File, error) {
|
||||||
case stateComment, stateNil:
|
case stateComment, stateNil:
|
||||||
// pass; nothing to flush
|
// pass; nothing to flush
|
||||||
case stateValue:
|
case stateValue:
|
||||||
s, ok := unquote(b.String())
|
s, ok := unquote(strings.TrimSpace(b.String()))
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, io.ErrUnexpectedEOF
|
return nil, io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,13 @@ ADAPTER adapter1
|
||||||
LICENSE MIT
|
LICENSE MIT
|
||||||
PARAMETER param1 value1
|
PARAMETER param1 value1
|
||||||
PARAMETER param2 value2
|
PARAMETER param2 value2
|
||||||
TEMPLATE template1
|
TEMPLATE """{{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Response }}<|eot_id|>"""
|
||||||
`
|
`
|
||||||
|
|
||||||
reader := strings.NewReader(input)
|
reader := strings.NewReader(input)
|
||||||
|
@ -36,7 +42,40 @@ TEMPLATE template1
|
||||||
{Name: "license", Args: "MIT"},
|
{Name: "license", Args: "MIT"},
|
||||||
{Name: "param1", Args: "value1"},
|
{Name: "param1", Args: "value1"},
|
||||||
{Name: "param2", Args: "value2"},
|
{Name: "param2", Args: "value2"},
|
||||||
{Name: "template", Args: "template1"},
|
{Name: "template", Args: "{{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|>"},
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, expectedCommands, modelfile.Commands)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseFileTrimSpace(t *testing.T) {
|
||||||
|
input := `
|
||||||
|
FROM " model 1"
|
||||||
|
ADAPTER adapter3
|
||||||
|
LICENSE "MIT "
|
||||||
|
PARAMETER param1 value1
|
||||||
|
PARAMETER param2 value2
|
||||||
|
TEMPLATE """ {{ if .System }}<|start_header_id|>system<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>
|
||||||
|
|
||||||
|
{{ .Response }}<|eot_id|> """
|
||||||
|
`
|
||||||
|
|
||||||
|
reader := strings.NewReader(input)
|
||||||
|
|
||||||
|
modelfile, err := ParseFile(reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expectedCommands := []Command{
|
||||||
|
{Name: "model", Args: " model 1"},
|
||||||
|
{Name: "adapter", Args: "adapter3"},
|
||||||
|
{Name: "license", Args: "MIT "},
|
||||||
|
{Name: "param1", Args: "value1"},
|
||||||
|
{Name: "param2", Args: "value2"},
|
||||||
|
{Name: "template", Args: " {{ if .System }}<|start_header_id|>system<|end_header_id|>\n\n{{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|>\n\n{{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|>\n\n{{ .Response }}<|eot_id|> "},
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(t, expectedCommands, modelfile.Commands)
|
assert.Equal(t, expectedCommands, modelfile.Commands)
|
||||||
|
@ -48,6 +87,26 @@ func TestParseFileFrom(t *testing.T) {
|
||||||
expected []Command
|
expected []Command
|
||||||
err error
|
err error
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
"FROM \"FOO BAR \"",
|
||||||
|
[]Command{{Name: "model", Args: "FOO BAR "}},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"FROM \"FOO BAR\"\nPARAMETER param1 value1",
|
||||||
|
[]Command{{Name: "model", Args: "FOO BAR"}, {Name: "param1", Args: "value1"}},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"FROM FOOO BAR ",
|
||||||
|
[]Command{{Name: "model", Args: "FOOO BAR"}},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"FROM /what/is/the path ",
|
||||||
|
[]Command{{Name: "model", Args: "/what/is/the path"}},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"FROM foo",
|
"FROM foo",
|
||||||
[]Command{{Name: "model", Args: "foo"}},
|
[]Command{{Name: "model", Args: "foo"}},
|
||||||
|
@ -86,6 +145,11 @@ func TestParseFileFrom(t *testing.T) {
|
||||||
[]Command{{Name: "param1", Args: "value1"}, {Name: "model", Args: "foo"}},
|
[]Command{{Name: "param1", Args: "value1"}, {Name: "model", Args: "foo"}},
|
||||||
nil,
|
nil,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"PARAMETER what the \nFROM lemons make lemonade ",
|
||||||
|
[]Command{{Name: "what", Args: "the"}, {Name: "model", Args: "lemons make lemonade"}},
|
||||||
|
nil,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, c := range cases {
|
for _, c := range cases {
|
||||||
|
@ -399,7 +463,7 @@ func TestParseFileParameters(t *testing.T) {
|
||||||
"mirostat_eta 1.0": {"mirostat_eta", "1.0"},
|
"mirostat_eta 1.0": {"mirostat_eta", "1.0"},
|
||||||
"penalize_newline true": {"penalize_newline", "true"},
|
"penalize_newline true": {"penalize_newline", "true"},
|
||||||
"stop ### User:": {"stop", "### User:"},
|
"stop ### User:": {"stop", "### User:"},
|
||||||
"stop ### User: ": {"stop", "### User: "},
|
"stop ### User: ": {"stop", "### User:"},
|
||||||
"stop \"### User:\"": {"stop", "### User:"},
|
"stop \"### User:\"": {"stop", "### User:"},
|
||||||
"stop \"### User: \"": {"stop", "### User: "},
|
"stop \"### User: \"": {"stop", "### User: "},
|
||||||
"stop \"\"\"### User:\"\"\"": {"stop", "### User:"},
|
"stop \"\"\"### User:\"\"\"": {"stop", "### User:"},
|
||||||
|
|
|
@ -9,7 +9,8 @@ if [ -n "$INIT_MODELS" ]; then
|
||||||
echo "FROM /models/$MODEL_NAME" > /tmp/Modelfile
|
echo "FROM /models/$MODEL_NAME" > /tmp/Modelfile
|
||||||
echo "PARAMETER temperature 1" >> /tmp/Modelfile
|
echo "PARAMETER temperature 1" >> /tmp/Modelfile
|
||||||
echo "PARAMETER num_ctx 4096" >> /tmp/Modelfile
|
echo "PARAMETER num_ctx 4096" >> /tmp/Modelfile
|
||||||
echo 'PARAMETER stop ["<|im_start|>","<|im_end|>"]' >> /tmp/Modelfile
|
echo 'PARAMETER stop "<|im_start|>"' >> /tmp/Modelfile
|
||||||
|
echo 'PARAMETER stop "<|im_end|>"' >> /tmp/Modelfile
|
||||||
echo 'TEMPLATE """{{ if .System }}<|im_start|>system' >> /tmp/Modelfile
|
echo 'TEMPLATE """{{ if .System }}<|im_start|>system' >> /tmp/Modelfile
|
||||||
echo "{{ .System }}<|im_end|>" >> /tmp/Modelfile
|
echo "{{ .System }}<|im_end|>" >> /tmp/Modelfile
|
||||||
echo "{{ end }}{{ if .Prompt }}<|im_start|>user" >> /tmp/Modelfile
|
echo "{{ end }}{{ if .Prompt }}<|im_start|>user" >> /tmp/Modelfile
|
||||||
|
|
|
@ -6,10 +6,21 @@ set -ex
|
||||||
MACHINE=$(uname -m)
|
MACHINE=$(uname -m)
|
||||||
|
|
||||||
if grep -i "centos" /etc/system-release >/dev/null; then
|
if grep -i "centos" /etc/system-release >/dev/null; then
|
||||||
|
# As of 7/1/2024 mirrorlist.centos.org has been taken offline, so adjust accordingly
|
||||||
|
sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||||
|
sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||||
|
sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
|
||||||
|
|
||||||
# Centos 7 derivatives have too old of a git version to run our generate script
|
# Centos 7 derivatives have too old of a git version to run our generate script
|
||||||
# uninstall and ignore failures
|
# uninstall and ignore failures
|
||||||
yum remove -y git
|
yum remove -y git
|
||||||
yum -y install epel-release centos-release-scl
|
yum -y install epel-release centos-release-scl
|
||||||
|
|
||||||
|
# The release packages reinstate the mirrors, undo that again
|
||||||
|
sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo
|
||||||
|
sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo
|
||||||
|
sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo
|
||||||
|
|
||||||
yum -y install dnf
|
yum -y install dnf
|
||||||
if [ "${MACHINE}" = "x86_64" ]; then
|
if [ "${MACHINE}" = "x86_64" ]; then
|
||||||
yum -y install https://repo.ius.io/ius-release-el7.rpm
|
yum -y install https://repo.ius.io/ius-release-el7.rpm
|
||||||
|
|
|
@ -28,11 +28,16 @@ import (
|
||||||
"github.com/ollama/ollama/format"
|
"github.com/ollama/ollama/format"
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
"github.com/ollama/ollama/parser"
|
"github.com/ollama/ollama/parser"
|
||||||
|
"github.com/ollama/ollama/template"
|
||||||
"github.com/ollama/ollama/types/errtypes"
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type Capability string
|
||||||
|
|
||||||
|
const CapabilityCompletion = Capability("completion")
|
||||||
|
|
||||||
type registryOptions struct {
|
type registryOptions struct {
|
||||||
Insecure bool
|
Insecure bool
|
||||||
Username string
|
Username string
|
||||||
|
@ -48,16 +53,43 @@ type Model struct {
|
||||||
ParentModel string
|
ParentModel string
|
||||||
AdapterPaths []string
|
AdapterPaths []string
|
||||||
ProjectorPaths []string
|
ProjectorPaths []string
|
||||||
Template string
|
|
||||||
System string
|
System string
|
||||||
License []string
|
License []string
|
||||||
Digest string
|
Digest string
|
||||||
Options map[string]interface{}
|
Options map[string]interface{}
|
||||||
Messages []Message
|
Messages []Message
|
||||||
|
|
||||||
|
Template *template.Template
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) IsEmbedding() bool {
|
func (m *Model) Has(caps ...Capability) bool {
|
||||||
return slices.Contains(m.Config.ModelFamilies, "bert") || slices.Contains(m.Config.ModelFamilies, "nomic-bert")
|
for _, cap := range caps {
|
||||||
|
switch cap {
|
||||||
|
case CapabilityCompletion:
|
||||||
|
f, err := os.Open(m.ModelPath)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("couldn't open model file", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
// TODO(mxyng): decode the GGML into model to avoid doing this multiple times
|
||||||
|
ggml, _, err := llm.DecodeGGML(f, 0)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("couldn't decode ggml", "error", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := ggml.KV()[fmt.Sprintf("%s.pooling_type", ggml.KV().Architecture())]; ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
slog.Error("unknown capability", "capability", cap)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Model) String() string {
|
func (m *Model) String() string {
|
||||||
|
@ -82,10 +114,10 @@ func (m *Model) String() string {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Template != "" {
|
if m.Template != nil {
|
||||||
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
modelfile.Commands = append(modelfile.Commands, parser.Command{
|
||||||
Name: "template",
|
Name: "template",
|
||||||
Args: m.Template,
|
Args: m.Template.String(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,13 +167,6 @@ type Message struct {
|
||||||
Content string `json:"content"`
|
Content string `json:"content"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ManifestV2 struct {
|
|
||||||
SchemaVersion int `json:"schemaVersion"`
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
Config *Layer `json:"config"`
|
|
||||||
Layers []*Layer `json:"layers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ConfigV2 struct {
|
type ConfigV2 struct {
|
||||||
ModelFormat string `json:"model_format"`
|
ModelFormat string `json:"model_format"`
|
||||||
ModelFamily string `json:"model_family"`
|
ModelFamily string `json:"model_family"`
|
||||||
|
@ -160,7 +185,7 @@ type RootFS struct {
|
||||||
DiffIDs []string `json:"diff_ids"`
|
DiffIDs []string `json:"diff_ids"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetManifest(mp ModelPath) (*ManifestV2, string, error) {
|
func GetManifest(mp ModelPath) (*Manifest, string, error) {
|
||||||
fp, err := mp.GetManifestPath()
|
fp, err := mp.GetManifestPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -170,7 +195,7 @@ func GetManifest(mp ModelPath) (*ManifestV2, string, error) {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
var manifest *ManifestV2
|
var manifest *Manifest
|
||||||
|
|
||||||
bts, err := os.ReadFile(fp)
|
bts, err := os.ReadFile(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -198,8 +223,7 @@ func GetModel(name string) (*Model, error) {
|
||||||
Name: mp.GetFullTagname(),
|
Name: mp.GetFullTagname(),
|
||||||
ShortName: mp.GetShortTagname(),
|
ShortName: mp.GetShortTagname(),
|
||||||
Digest: digest,
|
Digest: digest,
|
||||||
Template: "{{ .Prompt }}",
|
Template: template.DefaultTemplate,
|
||||||
License: []string{},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
filename, err := GetBlobsPath(manifest.Config.Digest)
|
filename, err := GetBlobsPath(manifest.Config.Digest)
|
||||||
|
@ -235,13 +259,17 @@ func GetModel(name string) (*Model, error) {
|
||||||
model.AdapterPaths = append(model.AdapterPaths, filename)
|
model.AdapterPaths = append(model.AdapterPaths, filename)
|
||||||
case "application/vnd.ollama.image.projector":
|
case "application/vnd.ollama.image.projector":
|
||||||
model.ProjectorPaths = append(model.ProjectorPaths, filename)
|
model.ProjectorPaths = append(model.ProjectorPaths, filename)
|
||||||
case "application/vnd.ollama.image.template":
|
case "application/vnd.ollama.image.prompt",
|
||||||
|
"application/vnd.ollama.image.template":
|
||||||
bts, err := os.ReadFile(filename)
|
bts, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
model.Template = string(bts)
|
model.Template, err = template.Parse(string(bts))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
case "application/vnd.ollama.image.system":
|
case "application/vnd.ollama.image.system":
|
||||||
bts, err := os.ReadFile(filename)
|
bts, err := os.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -249,13 +277,6 @@ func GetModel(name string) (*Model, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
model.System = string(bts)
|
model.System = string(bts)
|
||||||
case "application/vnd.ollama.image.prompt":
|
|
||||||
bts, err := os.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
model.Template = string(bts)
|
|
||||||
case "application/vnd.ollama.image.params":
|
case "application/vnd.ollama.image.params":
|
||||||
params, err := os.Open(filename)
|
params, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -822,7 +843,7 @@ func PushModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
||||||
func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error {
|
func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn func(api.ProgressResponse)) error {
|
||||||
mp := ParseModelPath(name)
|
mp := ParseModelPath(name)
|
||||||
|
|
||||||
var manifest *ManifestV2
|
var manifest *Manifest
|
||||||
var err error
|
var err error
|
||||||
var noprune string
|
var noprune string
|
||||||
|
|
||||||
|
@ -929,7 +950,7 @@ func PullModel(ctx context.Context, name string, regOpts *registryOptions, fn fu
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*ManifestV2, error) {
|
func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptions) (*Manifest, error) {
|
||||||
requestURL := mp.BaseURL().JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag)
|
requestURL := mp.BaseURL().JoinPath("v2", mp.GetNamespaceRepository(), "manifests", mp.Tag)
|
||||||
|
|
||||||
headers := make(http.Header)
|
headers := make(http.Header)
|
||||||
|
@ -940,7 +961,7 @@ func pullModelManifest(ctx context.Context, mp ModelPath, regOpts *registryOptio
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
defer resp.Body.Close()
|
||||||
|
|
||||||
var m *ManifestV2
|
var m *Manifest
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
|
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
type Manifest struct {
|
type Manifest struct {
|
||||||
ManifestV2
|
SchemaVersion int `json:"schemaVersion"`
|
||||||
|
MediaType string `json:"mediaType"`
|
||||||
|
Config *Layer `json:"config"`
|
||||||
|
Layers []*Layer `json:"layers"`
|
||||||
|
|
||||||
filepath string
|
filepath string
|
||||||
fi os.FileInfo
|
fi os.FileInfo
|
||||||
|
@ -66,7 +69,7 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||||
|
|
||||||
p := filepath.Join(manifests, n.Filepath())
|
p := filepath.Join(manifests, n.Filepath())
|
||||||
|
|
||||||
var m ManifestV2
|
var m Manifest
|
||||||
f, err := os.Open(p)
|
f, err := os.Open(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -83,12 +86,11 @@ func ParseNamedManifest(n model.Name) (*Manifest, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Manifest{
|
m.filepath = p
|
||||||
ManifestV2: m,
|
m.fi = fi
|
||||||
filepath: p,
|
m.digest = fmt.Sprintf("%x", sha256sum.Sum(nil))
|
||||||
fi: fi,
|
|
||||||
digest: fmt.Sprintf("%x", sha256sum.Sum(nil)),
|
return &m, nil
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
||||||
|
@ -108,7 +110,7 @@ func WriteManifest(name model.Name, config *Layer, layers []*Layer) error {
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
m := ManifestV2{
|
m := Manifest{
|
||||||
SchemaVersion: 2,
|
SchemaVersion: 2,
|
||||||
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
|
MediaType: "application/vnd.docker.distribution.manifest.v2+json",
|
||||||
Config: config,
|
Config: config,
|
||||||
|
|
|
@ -25,7 +25,7 @@ func createManifest(t *testing.T, path, name string) {
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
if err := json.NewEncoder(f).Encode(ManifestV2{}); err != nil {
|
if err := json.NewEncoder(f).Encode(Manifest{}); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,12 +11,11 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/convert"
|
"github.com/ollama/ollama/convert"
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
"github.com/ollama/ollama/templates"
|
"github.com/ollama/ollama/template"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -91,12 +90,11 @@ func extractFromZipFile(p string, file *os.File, fn func(api.ProgressResponse))
|
||||||
|
|
||||||
fn(api.ProgressResponse{Status: "unpacking model metadata"})
|
fn(api.ProgressResponse{Status: "unpacking model metadata"})
|
||||||
for _, f := range r.File {
|
for _, f := range r.File {
|
||||||
n := filepath.Join(p, f.Name)
|
if !filepath.IsLocal(f.Name) {
|
||||||
if !strings.HasPrefix(n, p) {
|
return fmt.Errorf("%w: %s", zip.ErrInsecurePath, f.Name)
|
||||||
slog.Warn("skipped extracting file outside of context", "name", f.Name)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
n := filepath.Join(p, f.Name)
|
||||||
if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil {
|
if err := os.MkdirAll(filepath.Dir(n), 0o750); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -258,7 +256,7 @@ func parseFromFile(ctx context.Context, file *os.File, digest string, fn func(ap
|
||||||
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
|
func detectChatTemplate(layers []*layerGGML) ([]*layerGGML, error) {
|
||||||
for _, layer := range layers {
|
for _, layer := range layers {
|
||||||
if s := layer.GGML.KV().ChatTemplate(); s != "" {
|
if s := layer.GGML.KV().ChatTemplate(); s != "" {
|
||||||
if t, err := templates.NamedTemplate(s); err != nil {
|
if t, err := template.Named(s); err != nil {
|
||||||
slog.Debug("template detection", "error", err)
|
slog.Debug("template detection", "error", err)
|
||||||
} else {
|
} else {
|
||||||
tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template")
|
tmpl, err := NewLayer(t.Reader(), "application/vnd.ollama.image.template")
|
||||||
|
|
|
@ -3,10 +3,12 @@ package server
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
"archive/zip"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
@ -39,13 +41,31 @@ func TestExtractFromZipFile(t *testing.T) {
|
||||||
cases := []struct {
|
cases := []struct {
|
||||||
name string
|
name string
|
||||||
expect []string
|
expect []string
|
||||||
|
err error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "good",
|
name: "good",
|
||||||
expect: []string{"good"},
|
expect: []string{"good"},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: filepath.Join("..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"),
|
name: strings.Join([]string{"path", "..", "to", "good"}, string(os.PathSeparator)),
|
||||||
|
expect: []string{filepath.Join("to", "good")},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: strings.Join([]string{"path", "..", "to", "..", "good"}, string(os.PathSeparator)),
|
||||||
|
expect: []string{"good"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: strings.Join([]string{"path", "to", "..", "..", "good"}, string(os.PathSeparator)),
|
||||||
|
expect: []string{"good"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: strings.Join([]string{"..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "..", "bad"}, string(os.PathSeparator)),
|
||||||
|
err: zip.ErrInsecurePath,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: strings.Join([]string{"path", "..", "..", "to", "bad"}, string(os.PathSeparator)),
|
||||||
|
err: zip.ErrInsecurePath,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +75,7 @@ func TestExtractFromZipFile(t *testing.T) {
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); err != nil {
|
if err := extractFromZipFile(tempDir, f, func(api.ProgressResponse) {}); !errors.Is(err, tt.err) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -103,18 +103,9 @@ func (mp ModelPath) GetShortTagname() string {
|
||||||
return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag)
|
return fmt.Sprintf("%s/%s/%s:%s", mp.Registry, mp.Namespace, mp.Repository, mp.Tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// modelsDir returns the value of the OLLAMA_MODELS environment variable or the user's home directory if OLLAMA_MODELS is not set.
|
|
||||||
// The models directory is where Ollama stores its model files and manifests.
|
|
||||||
func modelsDir() (string, error) {
|
|
||||||
return envconfig.ModelsDir, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist.
|
// GetManifestPath returns the path to the manifest file for the given model path, it is up to the caller to create the directory if it does not exist.
|
||||||
func (mp ModelPath) GetManifestPath() (string, error) {
|
func (mp ModelPath) GetManifestPath() (string, error) {
|
||||||
dir, err := modelsDir()
|
dir := envconfig.ModelsDir
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return filepath.Join(dir, "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag), nil
|
return filepath.Join(dir, "manifests", mp.Registry, mp.Namespace, mp.Repository, mp.Tag), nil
|
||||||
}
|
}
|
||||||
|
@ -127,10 +118,7 @@ func (mp ModelPath) BaseURL() *url.URL {
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetManifestPath() (string, error) {
|
func GetManifestPath() (string, error) {
|
||||||
dir, err := modelsDir()
|
dir := envconfig.ModelsDir
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
path := filepath.Join(dir, "manifests")
|
path := filepath.Join(dir, "manifests")
|
||||||
if err := os.MkdirAll(path, 0o755); err != nil {
|
if err := os.MkdirAll(path, 0o755); err != nil {
|
||||||
|
@ -141,10 +129,7 @@ func GetManifestPath() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetBlobsPath(digest string) (string, error) {
|
func GetBlobsPath(digest string) (string, error) {
|
||||||
dir, err := modelsDir()
|
dir := envconfig.ModelsDir
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
// only accept actual sha256 digests
|
// only accept actual sha256 digests
|
||||||
pattern := "^sha256[:-][0-9a-fA-F]{64}$"
|
pattern := "^sha256[:-][0-9a-fA-F]{64}$"
|
||||||
|
|
|
@ -4,10 +4,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"strings"
|
"strings"
|
||||||
"text/template"
|
|
||||||
"text/template/parse"
|
"text/template/parse"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/template"
|
||||||
)
|
)
|
||||||
|
|
||||||
// isResponseNode checks if the node contains .Response
|
// isResponseNode checks if the node contains .Response
|
||||||
|
@ -53,13 +54,8 @@ func formatTemplateForResponse(tmpl *template.Template, generate bool) {
|
||||||
|
|
||||||
// Prompt renders a prompt from a template. If generate is set to true,
|
// Prompt renders a prompt from a template. If generate is set to true,
|
||||||
// the response and parts of the template following it are not rendered
|
// the response and parts of the template following it are not rendered
|
||||||
func Prompt(tmpl, system, prompt, response string, generate bool) (string, error) {
|
func Prompt(tmpl *template.Template, system, prompt, response string, generate bool) (string, error) {
|
||||||
parsed, err := template.New("").Option("missingkey=zero").Parse(tmpl)
|
formatTemplateForResponse(tmpl, generate)
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
formatTemplateForResponse(parsed, generate)
|
|
||||||
|
|
||||||
vars := map[string]any{
|
vars := map[string]any{
|
||||||
"System": system,
|
"System": system,
|
||||||
|
@ -68,14 +64,14 @@ func Prompt(tmpl, system, prompt, response string, generate bool) (string, error
|
||||||
}
|
}
|
||||||
|
|
||||||
var sb strings.Builder
|
var sb strings.Builder
|
||||||
if err := parsed.Execute(&sb, vars); err != nil {
|
if err := tmpl.Execute(&sb, vars); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sb.String(), nil
|
return sb.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func countTokens(tmpl string, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) {
|
func countTokens(tmpl *template.Template, system string, prompt string, response string, encode func(string) ([]int, error)) (int, error) {
|
||||||
rendered, err := Prompt(tmpl, system, prompt, response, false)
|
rendered, err := Prompt(tmpl, system, prompt, response, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
|
@ -91,7 +87,7 @@ func countTokens(tmpl string, system string, prompt string, response string, enc
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatPrompt builds up a prompt from a series of messages, truncating based on context window size
|
// ChatPrompt builds up a prompt from a series of messages, truncating based on context window size
|
||||||
func ChatPrompt(tmpl string, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) {
|
func ChatPrompt(tmpl *template.Template, messages []api.Message, window int, encode func(string) ([]int, error)) (string, error) {
|
||||||
type prompt struct {
|
type prompt struct {
|
||||||
System string
|
System string
|
||||||
Prompt string
|
Prompt string
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
|
"github.com/ollama/ollama/template"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestPrompt(t *testing.T) {
|
func TestPrompt(t *testing.T) {
|
||||||
|
@ -61,7 +62,12 @@ func TestPrompt(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
got, err := Prompt(tc.template, tc.system, tc.prompt, tc.response, tc.generate)
|
tmpl, err := template.Parse(tc.template)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := Prompt(tmpl, tc.system, tc.prompt, tc.response, tc.generate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("error = %v", err)
|
t.Errorf("error = %v", err)
|
||||||
}
|
}
|
||||||
|
@ -192,7 +198,12 @@ func TestChatPrompt(t *testing.T) {
|
||||||
|
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
got, err := ChatPrompt(tc.template, tc.messages, tc.window, encode)
|
tmpl, err := template.Parse(tc.template)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
got, err := ChatPrompt(tmpl, tc.messages, tc.window, encode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("error = %v", err)
|
t.Errorf("error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"math"
|
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
@ -17,7 +16,6 @@ import (
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"slices"
|
"slices"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
@ -31,6 +29,7 @@ import (
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
"github.com/ollama/ollama/openai"
|
"github.com/ollama/ollama/openai"
|
||||||
"github.com/ollama/ollama/parser"
|
"github.com/ollama/ollama/parser"
|
||||||
|
"github.com/ollama/ollama/template"
|
||||||
"github.com/ollama/ollama/types/errtypes"
|
"github.com/ollama/ollama/types/errtypes"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
|
@ -55,8 +54,6 @@ func init() {
|
||||||
gin.SetMode(mode)
|
gin.SetMode(mode)
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultSessionDuration = 5 * time.Minute
|
|
||||||
|
|
||||||
func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) {
|
func modelOptions(model *Model, requestOpts map[string]interface{}) (api.Options, error) {
|
||||||
opts := api.DefaultOptions()
|
opts := api.DefaultOptions()
|
||||||
if err := opts.FromMap(model.Options); err != nil {
|
if err := opts.FromMap(model.Options); err != nil {
|
||||||
|
@ -121,8 +118,8 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if model.IsEmbedding() {
|
if !model.Has(CapabilityCompletion) {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support generate"})
|
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support generate", req.Model)})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,14 +129,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var sessionDuration time.Duration
|
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||||
if req.KeepAlive == nil {
|
|
||||||
sessionDuration = getDefaultSessionDuration()
|
|
||||||
} else {
|
|
||||||
sessionDuration = req.KeepAlive.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
|
||||||
var runner *runnerRef
|
var runner *runnerRef
|
||||||
select {
|
select {
|
||||||
case runner = <-rCh:
|
case runner = <-rCh:
|
||||||
|
@ -161,6 +151,12 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.Parse(req.Template)
|
||||||
|
if err != nil {
|
||||||
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
checkpointLoaded := time.Now()
|
checkpointLoaded := time.Now()
|
||||||
|
|
||||||
var prompt string
|
var prompt string
|
||||||
|
@ -169,7 +165,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
prompt = req.Prompt
|
prompt = req.Prompt
|
||||||
case req.Prompt != "":
|
case req.Prompt != "":
|
||||||
if req.Template == "" {
|
if req.Template == "" {
|
||||||
req.Template = model.Template
|
tmpl = model.Template
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.System == "" {
|
if req.System == "" {
|
||||||
|
@ -187,7 +183,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
|
|
||||||
sb.WriteString(req.Prompt)
|
sb.WriteString(req.Prompt)
|
||||||
|
|
||||||
p, err := Prompt(req.Template, req.System, sb.String(), "", true)
|
p, err := Prompt(tmpl, req.System, sb.String(), "", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
|
@ -242,7 +238,7 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
resp.LoadDuration = checkpointLoaded.Sub(checkpointStart)
|
resp.LoadDuration = checkpointLoaded.Sub(checkpointStart)
|
||||||
|
|
||||||
if !req.Raw {
|
if !req.Raw {
|
||||||
p, err := Prompt(req.Template, req.System, req.Prompt, generated.String(), false)
|
p, err := Prompt(tmpl, req.System, req.Prompt, generated.String(), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
|
||||||
return
|
return
|
||||||
|
@ -313,32 +309,6 @@ func (s *Server) GenerateHandler(c *gin.Context) {
|
||||||
streamResponse(c, ch)
|
streamResponse(c, ch)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultSessionDuration() time.Duration {
|
|
||||||
if envconfig.KeepAlive != "" {
|
|
||||||
v, err := strconv.Atoi(envconfig.KeepAlive)
|
|
||||||
if err != nil {
|
|
||||||
d, err := time.ParseDuration(envconfig.KeepAlive)
|
|
||||||
if err != nil {
|
|
||||||
return defaultSessionDuration
|
|
||||||
}
|
|
||||||
|
|
||||||
if d < 0 {
|
|
||||||
return time.Duration(math.MaxInt64)
|
|
||||||
}
|
|
||||||
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
d := time.Duration(v) * time.Second
|
|
||||||
if d < 0 {
|
|
||||||
return time.Duration(math.MaxInt64)
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
return defaultSessionDuration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
||||||
var req api.EmbeddingRequest
|
var req api.EmbeddingRequest
|
||||||
err := c.ShouldBindJSON(&req)
|
err := c.ShouldBindJSON(&req)
|
||||||
|
@ -373,14 +343,7 @@ func (s *Server) EmbeddingsHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var sessionDuration time.Duration
|
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||||
if req.KeepAlive == nil {
|
|
||||||
sessionDuration = getDefaultSessionDuration()
|
|
||||||
} else {
|
|
||||||
sessionDuration = req.KeepAlive.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
|
||||||
var runner *runnerRef
|
var runner *runnerRef
|
||||||
select {
|
select {
|
||||||
case runner = <-rCh:
|
case runner = <-rCh:
|
||||||
|
@ -680,7 +643,10 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Template != "" {
|
if req.Template != "" {
|
||||||
m.Template = req.Template
|
m.Template, err = template.Parse(req.Template)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
msgs := make([]api.Message, 0)
|
msgs := make([]api.Message, 0)
|
||||||
|
@ -701,7 +667,7 @@ func GetModelInfo(req api.ShowRequest) (*api.ShowResponse, error) {
|
||||||
resp := &api.ShowResponse{
|
resp := &api.ShowResponse{
|
||||||
License: strings.Join(m.License, "\n"),
|
License: strings.Join(m.License, "\n"),
|
||||||
System: m.System,
|
System: m.System,
|
||||||
Template: m.Template,
|
Template: m.Template.String(),
|
||||||
Details: modelDetails,
|
Details: modelDetails,
|
||||||
Messages: msgs,
|
Messages: msgs,
|
||||||
ModifiedAt: manifest.fi.ModTime(),
|
ModifiedAt: manifest.fi.ModTime(),
|
||||||
|
@ -1039,7 +1005,10 @@ func (s *Server) GenerateRoutes() http.Handler {
|
||||||
r.GET("/api/ps", s.ProcessHandler)
|
r.GET("/api/ps", s.ProcessHandler)
|
||||||
|
|
||||||
// Compatibility endpoints
|
// Compatibility endpoints
|
||||||
r.POST("/v1/chat/completions", openai.Middleware(), s.ChatHandler)
|
r.POST("/v1/chat/completions", openai.ChatMiddleware(), s.ChatHandler)
|
||||||
|
r.POST("/v1/completions", openai.CompletionsMiddleware(), s.GenerateHandler)
|
||||||
|
r.GET("/v1/models", openai.ListMiddleware(), s.ListModelsHandler)
|
||||||
|
r.GET("/v1/models/:model", openai.RetrieveMiddleware(), s.ShowModelHandler)
|
||||||
|
|
||||||
for _, method := range []string{http.MethodGet, http.MethodHead} {
|
for _, method := range []string{http.MethodGet, http.MethodHead} {
|
||||||
r.Handle(method, "/", func(c *gin.Context) {
|
r.Handle(method, "/", func(c *gin.Context) {
|
||||||
|
@ -1237,11 +1206,16 @@ func (s *Server) ProcessHandler(c *gin.Context) {
|
||||||
models = append(models, mr)
|
models = append(models, mr)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
slices.SortStableFunc(models, func(i, j api.ProcessModelResponse) int {
|
||||||
|
// longest duration remaining listed first
|
||||||
|
return cmp.Compare(j.ExpiresAt.Unix(), i.ExpiresAt.Unix())
|
||||||
|
})
|
||||||
|
|
||||||
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
c.JSON(http.StatusOK, api.ProcessResponse{Models: models})
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
|
// ChatPrompt builds up a prompt from a series of messages for the currently `loaded` model
|
||||||
func chatPrompt(ctx context.Context, runner *runnerRef, template string, messages []api.Message, numCtx int) (string, error) {
|
func chatPrompt(ctx context.Context, runner *runnerRef, template *template.Template, messages []api.Message, numCtx int) (string, error) {
|
||||||
encode := func(s string) ([]int, error) {
|
encode := func(s string) ([]int, error) {
|
||||||
return runner.llama.Tokenize(ctx, s)
|
return runner.llama.Tokenize(ctx, s)
|
||||||
}
|
}
|
||||||
|
@ -1289,8 +1263,8 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if model.IsEmbedding() {
|
if !model.Has(CapabilityCompletion) {
|
||||||
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"error": "embedding models do not support chat"})
|
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("%s does not support chat", req.Model)})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1300,14 +1274,7 @@ func (s *Server) ChatHandler(c *gin.Context) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var sessionDuration time.Duration
|
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, req.KeepAlive)
|
||||||
if req.KeepAlive == nil {
|
|
||||||
sessionDuration = getDefaultSessionDuration()
|
|
||||||
} else {
|
|
||||||
sessionDuration = req.KeepAlive.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
rCh, eCh := s.sched.GetRunner(c.Request.Context(), model, opts, sessionDuration)
|
|
||||||
var runner *runnerRef
|
var runner *runnerRef
|
||||||
select {
|
select {
|
||||||
case runner = <-rCh:
|
case runner = <-rCh:
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"github.com/ollama/ollama/api"
|
"github.com/ollama/ollama/api"
|
||||||
"github.com/ollama/ollama/envconfig"
|
"github.com/ollama/ollama/envconfig"
|
||||||
"github.com/ollama/ollama/llm"
|
"github.com/ollama/ollama/llm"
|
||||||
|
"github.com/ollama/ollama/openai"
|
||||||
"github.com/ollama/ollama/parser"
|
"github.com/ollama/ollama/parser"
|
||||||
"github.com/ollama/ollama/types/model"
|
"github.com/ollama/ollama/types/model"
|
||||||
"github.com/ollama/ollama/version"
|
"github.com/ollama/ollama/version"
|
||||||
|
@ -105,6 +106,24 @@ func Test_Routes(t *testing.T) {
|
||||||
assert.Empty(t, len(modelList.Models))
|
assert.Empty(t, len(modelList.Models))
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "openai empty list",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/v1/models",
|
||||||
|
Expected: func(t *testing.T, resp *http.Response) {
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
assert.Equal(t, "application/json", contentType)
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var modelList openai.ListCompletion
|
||||||
|
err = json.Unmarshal(body, &modelList)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "list", modelList.Object)
|
||||||
|
assert.Empty(t, modelList.Data)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "Tags Handler (yes tags)",
|
Name: "Tags Handler (yes tags)",
|
||||||
Method: http.MethodGet,
|
Method: http.MethodGet,
|
||||||
|
@ -128,6 +147,25 @@ func Test_Routes(t *testing.T) {
|
||||||
assert.Equal(t, "test-model:latest", modelList.Models[0].Name)
|
assert.Equal(t, "test-model:latest", modelList.Models[0].Name)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "openai list models with tags",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/v1/models",
|
||||||
|
Expected: func(t *testing.T, resp *http.Response) {
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
assert.Equal(t, "application/json", contentType)
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var modelList openai.ListCompletion
|
||||||
|
err = json.Unmarshal(body, &modelList)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Len(t, modelList.Data, 1)
|
||||||
|
assert.Equal(t, "test-model:latest", modelList.Data[0].Id)
|
||||||
|
assert.Equal(t, "library", modelList.Data[0].OwnedBy)
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Name: "Create Model Handler",
|
Name: "Create Model Handler",
|
||||||
Method: http.MethodPost,
|
Method: http.MethodPost,
|
||||||
|
@ -216,6 +254,24 @@ func Test_Routes(t *testing.T) {
|
||||||
assert.InDelta(t, 0, showResp.ModelInfo["general.parameter_count"], 1e-9, "Parameter count should be 0")
|
assert.InDelta(t, 0, showResp.ModelInfo["general.parameter_count"], 1e-9, "Parameter count should be 0")
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "openai retrieve model handler",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
Path: "/v1/models/show-model",
|
||||||
|
Expected: func(t *testing.T, resp *http.Response) {
|
||||||
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
assert.Equal(t, "application/json", contentType)
|
||||||
|
body, err := io.ReadAll(resp.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var retrieveResp api.RetrieveModelResponse
|
||||||
|
err = json.Unmarshal(body, &retrieveResp)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, "show-model", retrieveResp.Id)
|
||||||
|
assert.Equal(t, "library", retrieveResp.OwnedBy)
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
t.Setenv("OLLAMA_MODELS", t.TempDir())
|
||||||
|
|
148
server/sched.go
148
server/sched.go
|
@ -23,7 +23,8 @@ type LlmRequest struct {
|
||||||
ctx context.Context //nolint:containedctx
|
ctx context.Context //nolint:containedctx
|
||||||
model *Model
|
model *Model
|
||||||
opts api.Options
|
opts api.Options
|
||||||
sessionDuration time.Duration
|
origNumCtx int // Track the initial ctx request
|
||||||
|
sessionDuration *api.Duration
|
||||||
successCh chan *runnerRef
|
successCh chan *runnerRef
|
||||||
errCh chan error
|
errCh chan error
|
||||||
schedAttempts uint
|
schedAttempts uint
|
||||||
|
@ -38,13 +39,23 @@ type Scheduler struct {
|
||||||
loaded map[string]*runnerRef
|
loaded map[string]*runnerRef
|
||||||
loadedMu sync.Mutex
|
loadedMu sync.Mutex
|
||||||
|
|
||||||
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
loadFn func(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int)
|
||||||
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error)
|
newServerFn func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error)
|
||||||
getGpuFn func() gpu.GpuInfoList
|
getGpuFn func() gpu.GpuInfoList
|
||||||
getCpuFn func() gpu.GpuInfoList
|
getCpuFn func() gpu.GpuInfoList
|
||||||
reschedDelay time.Duration
|
reschedDelay time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Default automatic value for number of models we allow per GPU
|
||||||
|
// Model will still need to fit in VRAM, but loading many small models
|
||||||
|
// on a large GPU can cause stalling
|
||||||
|
var defaultModelsPerGPU = 3
|
||||||
|
|
||||||
|
// Default automatic value for parallel setting
|
||||||
|
// Model will still need to fit in VRAM. If this setting wont fit
|
||||||
|
// we'll back off down to 1 to try to get it to fit
|
||||||
|
var defaultParallel = 4
|
||||||
|
|
||||||
var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded")
|
var ErrMaxQueue = fmt.Errorf("server busy, please try again. maximum pending requests exceeded")
|
||||||
|
|
||||||
func InitScheduler(ctx context.Context) *Scheduler {
|
func InitScheduler(ctx context.Context) *Scheduler {
|
||||||
|
@ -64,14 +75,11 @@ func InitScheduler(ctx context.Context) *Scheduler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// context must be canceled to decrement ref count and release the runner
|
// context must be canceled to decrement ref count and release the runner
|
||||||
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration time.Duration) (chan *runnerRef, chan error) {
|
func (s *Scheduler) GetRunner(c context.Context, model *Model, opts api.Options, sessionDuration *api.Duration) (chan *runnerRef, chan error) {
|
||||||
// allocate a large enough kv cache for all parallel requests
|
|
||||||
if opts.NumCtx < 4 {
|
if opts.NumCtx < 4 {
|
||||||
opts.NumCtx = 4
|
opts.NumCtx = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
opts.NumCtx *= envconfig.NumParallel
|
|
||||||
|
|
||||||
req := &LlmRequest{
|
req := &LlmRequest{
|
||||||
ctx: c,
|
ctx: c,
|
||||||
model: model,
|
model: model,
|
||||||
|
@ -110,13 +118,32 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||||
case pending := <-s.pendingReqCh:
|
case pending := <-s.pendingReqCh:
|
||||||
// Block other requests until we get this pending request running
|
// Block other requests until we get this pending request running
|
||||||
pending.schedAttempts++
|
pending.schedAttempts++
|
||||||
|
if pending.origNumCtx == 0 {
|
||||||
|
pending.origNumCtx = pending.opts.NumCtx
|
||||||
|
}
|
||||||
|
|
||||||
if pending.ctx.Err() != nil {
|
if pending.ctx.Err() != nil {
|
||||||
slog.Debug("pending request cancelled or timed out, skipping scheduling")
|
slog.Debug("pending request cancelled or timed out, skipping scheduling")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
numParallel := envconfig.NumParallel
|
||||||
|
// TODO (jmorganca): multimodal models don't support parallel yet
|
||||||
|
// see https://github.com/ollama/ollama/issues/4165
|
||||||
|
if len(pending.model.ProjectorPaths) > 0 && numParallel != 1 {
|
||||||
|
numParallel = 1
|
||||||
|
slog.Warn("multimodal models don't support parallel requests yet")
|
||||||
|
}
|
||||||
|
// Keep NumCtx and numParallel in sync
|
||||||
|
if numParallel > 1 {
|
||||||
|
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||||
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
cpus := s.getCpuFn()
|
||||||
|
var systemMem gpu.GpuInfo
|
||||||
|
if len(cpus) > 0 {
|
||||||
|
systemMem = cpus[0]
|
||||||
|
}
|
||||||
var runnerToExpire *runnerRef
|
var runnerToExpire *runnerRef
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
runner := s.loaded[pending.model.ModelPath]
|
runner := s.loaded[pending.model.ModelPath]
|
||||||
|
@ -143,6 +170,26 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||||
gpus = s.getGpuFn()
|
gpus = s.getGpuFn()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if envconfig.MaxRunners <= 0 {
|
||||||
|
// No user specified MaxRunners, so figure out what automatic setting to use
|
||||||
|
// If all GPUs have reliable free memory reporting, defaultModelsPerGPU * the number of GPUs
|
||||||
|
// if any GPU has unreliable free memory reporting, 1x the number of GPUs
|
||||||
|
allReliable := true
|
||||||
|
for _, gpu := range gpus {
|
||||||
|
if gpu.UnreliableFreeMemory {
|
||||||
|
allReliable = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if allReliable {
|
||||||
|
envconfig.MaxRunners = defaultModelsPerGPU * len(gpus)
|
||||||
|
slog.Debug("updating default concurrency", "OLLAMA_MAX_LOADED_MODELS", envconfig.MaxRunners, "gpu_count", len(gpus))
|
||||||
|
} else {
|
||||||
|
slog.Info("one or more GPUs detected that are unable to accurately report free memory - disabling default concurrency")
|
||||||
|
envconfig.MaxRunners = len(gpus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load model for fitting
|
// Load model for fitting
|
||||||
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
ggml, err := llm.LoadModel(pending.model.ModelPath, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -150,28 +197,55 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Block attempting to load a model larger than system memory + GPU memory
|
||||||
|
estimate := llm.EstimateGPULayers(gpus, ggml, pending.model.ProjectorPaths, pending.opts)
|
||||||
|
maxSize := systemMem.FreeMemory
|
||||||
|
for _, gpu := range gpus {
|
||||||
|
if gpu.Library == "cpu" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if loadedCount == 0 {
|
||||||
|
// If no other models are loaded, set the limit based on what's available
|
||||||
|
maxSize += gpu.FreeMemory
|
||||||
|
} else {
|
||||||
|
// Other models could be unloaded, favor total memory for limit
|
||||||
|
maxSize += gpu.TotalMemory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if estimate.TotalSize > maxSize {
|
||||||
|
slog.Warn("model request too large for system", "requested", format.HumanBytes2(estimate.TotalSize), "system", format.HumanBytes2(maxSize))
|
||||||
|
pending.errCh <- fmt.Errorf("requested model (%s) is too large for this system (%s)", format.HumanBytes2(estimate.TotalSize), format.HumanBytes2(maxSize))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
// Evaluate if the model will fit in the available system memory, or if we should unload a model first
|
// Evaluate if the model will fit in the available system memory, or if we should unload a model first
|
||||||
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
if len(gpus) == 1 && gpus[0].Library == "cpu" {
|
||||||
|
// simplifying assumption of defaultParallel when in CPU mode
|
||||||
|
if numParallel <= 0 {
|
||||||
|
numParallel = defaultParallel
|
||||||
|
pending.opts.NumCtx = pending.origNumCtx * numParallel
|
||||||
|
}
|
||||||
|
|
||||||
if loadedCount == 0 {
|
if loadedCount == 0 {
|
||||||
slog.Debug("cpu mode with first model, loading")
|
slog.Debug("cpu mode with first model, loading")
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus)
|
runnerToExpire = s.maybeFindCPURunnerToUnload(pending, ggml, gpus)
|
||||||
if runnerToExpire == nil {
|
if runnerToExpire == nil {
|
||||||
slog.Debug("cpu mode with available system memory or first model, loading")
|
slog.Debug("cpu mode with available system memory or first model, loading")
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
// else we need to expire a runner
|
// else we need to expire a runner
|
||||||
} else if loadedCount == 0 {
|
} else if loadedCount == 0 {
|
||||||
// No models loaded. Load the model but prefer the best fit.
|
// No models loaded. Load the model but prefer the best fit.
|
||||||
slog.Debug("loading first model", "model", pending.model.ModelPath)
|
slog.Debug("loading first model", "model", pending.model.ModelPath)
|
||||||
g := pickBestFitGPUs(pending, ggml, gpus)
|
g := pickBestFitGPUs(pending, ggml, gpus, &numParallel)
|
||||||
if g != nil {
|
if g != nil {
|
||||||
gpus = g
|
gpus = g
|
||||||
}
|
}
|
||||||
s.loadFn(pending, ggml, gpus)
|
s.loadFn(pending, ggml, gpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,10 +260,10 @@ func (s *Scheduler) processPending(ctx context.Context) {
|
||||||
|
|
||||||
// Update free memory from currently loaded models
|
// Update free memory from currently loaded models
|
||||||
s.updateFreeSpace(availGpus)
|
s.updateFreeSpace(availGpus)
|
||||||
fitGpus := pickBestFitGPUs(pending, ggml, availGpus)
|
fitGpus := pickBestFitGPUs(pending, ggml, availGpus, &numParallel)
|
||||||
if fitGpus != nil {
|
if fitGpus != nil {
|
||||||
slog.Debug("new model fits with existing models, loading")
|
slog.Debug("new model fits with existing models, loading")
|
||||||
s.loadFn(pending, ggml, fitGpus)
|
s.loadFn(pending, ggml, fitGpus, numParallel)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -341,7 +415,9 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm
|
||||||
runner.expireTimer.Stop()
|
runner.expireTimer.Stop()
|
||||||
runner.expireTimer = nil
|
runner.expireTimer = nil
|
||||||
}
|
}
|
||||||
runner.sessionDuration = pending.sessionDuration
|
if pending.sessionDuration != nil {
|
||||||
|
runner.sessionDuration = pending.sessionDuration.Duration
|
||||||
|
}
|
||||||
pending.successCh <- runner
|
pending.successCh <- runner
|
||||||
go func() {
|
go func() {
|
||||||
<-pending.ctx.Done()
|
<-pending.ctx.Done()
|
||||||
|
@ -350,8 +426,15 @@ func (pending *LlmRequest) useLoadedRunner(runner *runnerRef, finished chan *Llm
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) {
|
func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel int) {
|
||||||
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts)
|
if numParallel < 1 {
|
||||||
|
numParallel = 1
|
||||||
|
}
|
||||||
|
sessionDuration := envconfig.KeepAlive
|
||||||
|
if req.sessionDuration != nil {
|
||||||
|
sessionDuration = req.sessionDuration.Duration
|
||||||
|
}
|
||||||
|
llama, err := s.newServerFn(gpus, req.model.ModelPath, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts, numParallel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// some older models are not compatible with newer versions of llama.cpp
|
// some older models are not compatible with newer versions of llama.cpp
|
||||||
// show a generalized compatibility error until there is a better way to
|
// show a generalized compatibility error until there is a better way to
|
||||||
|
@ -368,13 +451,14 @@ func (s *Scheduler) load(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList)
|
||||||
modelPath: req.model.ModelPath,
|
modelPath: req.model.ModelPath,
|
||||||
llama: llama,
|
llama: llama,
|
||||||
Options: &req.opts,
|
Options: &req.opts,
|
||||||
sessionDuration: req.sessionDuration,
|
sessionDuration: sessionDuration,
|
||||||
gpus: gpus,
|
gpus: gpus,
|
||||||
estimatedVRAM: llama.EstimatedVRAM(),
|
estimatedVRAM: llama.EstimatedVRAM(),
|
||||||
estimatedTotal: llama.EstimatedTotal(),
|
estimatedTotal: llama.EstimatedTotal(),
|
||||||
loading: true,
|
loading: true,
|
||||||
refCount: 1,
|
refCount: 1,
|
||||||
}
|
}
|
||||||
|
runner.numParallel = numParallel
|
||||||
runner.refMu.Lock()
|
runner.refMu.Lock()
|
||||||
|
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
|
@ -485,6 +569,7 @@ type runnerRef struct {
|
||||||
|
|
||||||
model *Model
|
model *Model
|
||||||
modelPath string
|
modelPath string
|
||||||
|
numParallel int
|
||||||
*api.Options
|
*api.Options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -525,6 +610,9 @@ func (runner *runnerRef) needsReload(ctx context.Context, req *LlmRequest) bool
|
||||||
optsNew.NumGPU = -1
|
optsNew.NumGPU = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Normalize the NumCtx for parallelism
|
||||||
|
optsExisting.NumCtx = optsExisting.NumCtx / runner.numParallel
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
|
if !reflect.DeepEqual(runner.model.AdapterPaths, req.model.AdapterPaths) || // have the adapters changed?
|
||||||
|
@ -611,36 +699,56 @@ func (a ByDuration) Less(i, j int) bool {
|
||||||
|
|
||||||
// pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits
|
// pickBestFitGPUs will try to find the optimal placement of the model in the available GPUs where the model fully fits
|
||||||
// If the model can not be fit fully within the available GPU(s) nil is returned
|
// If the model can not be fit fully within the available GPU(s) nil is returned
|
||||||
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList) gpu.GpuInfoList {
|
// If numParallel is <= 0, this will attempt try to optimize parallism based on available VRAM, and adjust
|
||||||
|
// opts.NumCtx accordingly
|
||||||
|
func pickBestFitGPUs(req *LlmRequest, ggml *llm.GGML, gpus gpu.GpuInfoList, numParallel *int) gpu.GpuInfoList {
|
||||||
var estimatedVRAM uint64
|
var estimatedVRAM uint64
|
||||||
|
|
||||||
|
var numParallelToTry []int
|
||||||
|
if *numParallel <= 0 {
|
||||||
|
// If no specific parallel setting was provided, try larger then smaller, always end with 1
|
||||||
|
numParallelToTry = append(numParallelToTry, defaultParallel, 1)
|
||||||
|
} else {
|
||||||
|
numParallelToTry = []int{*numParallel}
|
||||||
|
}
|
||||||
|
|
||||||
for _, gl := range gpus.ByLibrary() {
|
for _, gl := range gpus.ByLibrary() {
|
||||||
var ok bool
|
var ok bool
|
||||||
sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...)
|
sgl := append(make(gpu.GpuInfoList, 0, len(gl)), gl...)
|
||||||
|
|
||||||
// TODO - potentially sort by performance capability, existing models loaded, etc.
|
// TODO - potentially sort by performance capability, existing models loaded, etc.
|
||||||
|
// TODO - Eliminate any GPUs that already have envconfig.MaxRunners loaded on them
|
||||||
// Note: at present, this will favor more VRAM over faster GPU speed in mixed setups
|
// Note: at present, this will favor more VRAM over faster GPU speed in mixed setups
|
||||||
sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl)))
|
sort.Sort(sort.Reverse(gpu.ByFreeMemory(sgl)))
|
||||||
|
|
||||||
// First attempt to fit the model into a single GPU
|
// First attempt to fit the model into a single GPU
|
||||||
|
for _, p := range numParallelToTry {
|
||||||
|
req.opts.NumCtx = req.origNumCtx * p
|
||||||
if !envconfig.SchedSpread {
|
if !envconfig.SchedSpread {
|
||||||
for _, g := range sgl {
|
for _, g := range sgl {
|
||||||
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
if ok, estimatedVRAM = llm.PredictServerFit([]gpu.GpuInfo{g}, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||||
slog.Debug("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
slog.Info("new model will fit in available VRAM in single GPU, loading", "model", req.model.ModelPath, "gpu", g.ID, "parallel", p, "available", g.FreeMemory, "required", format.HumanBytes2(estimatedVRAM))
|
||||||
|
*numParallel = p
|
||||||
return []gpu.GpuInfo{g}
|
return []gpu.GpuInfo{g}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO future refinements
|
// TODO future refinements
|
||||||
// - if multiple Libraries, see if any single GPU in any Library will fit
|
// - if multiple Libraries, see if any single GPU in any Library will fit
|
||||||
// - try subsets of GPUs instead of just falling back to 1 or all in a family
|
// - try subsets of GPUs instead of just falling back to 1 or all in a family
|
||||||
|
|
||||||
// Now try all the GPUs
|
// Now try all the GPUs
|
||||||
|
for _, p := range numParallelToTry {
|
||||||
|
req.opts.NumCtx = req.origNumCtx * p
|
||||||
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
if ok, estimatedVRAM = llm.PredictServerFit(sgl, ggml, req.model.AdapterPaths, req.model.ProjectorPaths, req.opts); ok {
|
||||||
slog.Debug("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "required", format.HumanBytes2(estimatedVRAM))
|
slog.Info("new model will fit in available VRAM, loading", "model", req.model.ModelPath, "library", sgl[0].Library, "parallel", p, "required", format.HumanBytes2(estimatedVRAM))
|
||||||
|
*numParallel = p
|
||||||
return sgl
|
return sgl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,14 +44,14 @@ func TestLoad(t *testing.T) {
|
||||||
opts: api.DefaultOptions(),
|
opts: api.DefaultOptions(),
|
||||||
successCh: make(chan *runnerRef, 1),
|
successCh: make(chan *runnerRef, 1),
|
||||||
errCh: make(chan error, 1),
|
errCh: make(chan error, 1),
|
||||||
sessionDuration: 2,
|
sessionDuration: &api.Duration{Duration: 2 * time.Second},
|
||||||
}
|
}
|
||||||
// Fail to load model first
|
// Fail to load model first
|
||||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return nil, fmt.Errorf("something failed to load model blah")
|
return nil, fmt.Errorf("something failed to load model blah")
|
||||||
}
|
}
|
||||||
gpus := gpu.GpuInfoList{}
|
gpus := gpu.GpuInfoList{}
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
require.Empty(t, req.successCh)
|
require.Empty(t, req.successCh)
|
||||||
require.Len(t, req.errCh, 1)
|
require.Len(t, req.errCh, 1)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
|
@ -61,10 +61,10 @@ func TestLoad(t *testing.T) {
|
||||||
require.Contains(t, err.Error(), "this model may be incompatible")
|
require.Contains(t, err.Error(), "this model may be incompatible")
|
||||||
|
|
||||||
server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}}
|
server := &mockLlm{estimatedVRAM: 10, estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
s.newServerFn = func(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return server, nil
|
return server, nil
|
||||||
}
|
}
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
select {
|
select {
|
||||||
case err := <-req.errCh:
|
case err := <-req.errCh:
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -78,12 +78,12 @@ func TestLoad(t *testing.T) {
|
||||||
|
|
||||||
req.model.ModelPath = "dummy_model_path"
|
req.model.ModelPath = "dummy_model_path"
|
||||||
server.waitResp = fmt.Errorf("wait failure")
|
server.waitResp = fmt.Errorf("wait failure")
|
||||||
s.load(req, ggml, gpus)
|
s.load(req, ggml, gpus, 0)
|
||||||
select {
|
select {
|
||||||
case err := <-req.errCh:
|
case err := <-req.errCh:
|
||||||
require.Contains(t, err.Error(), "wait failure")
|
require.Contains(t, err.Error(), "wait failure")
|
||||||
case resp := <-req.successCh:
|
case resp := <-req.successCh:
|
||||||
t.Errorf("unexpected success %v", resp)
|
t.Fatalf("unexpected success %v", resp)
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
runner := s.loaded["dummy_model_path"]
|
runner := s.loaded["dummy_model_path"]
|
||||||
|
@ -102,7 +102,7 @@ type bundle struct {
|
||||||
ggml *llm.GGML
|
ggml *llm.GGML
|
||||||
}
|
}
|
||||||
|
|
||||||
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options) (llm.LlamaServer, error) {
|
func (scenario *bundle) newServer(gpus gpu.GpuInfoList, model string, ggml *llm.GGML, adapters []string, projectors []string, opts api.Options, numParallel int) (llm.LlamaServer, error) {
|
||||||
return scenario.srv, nil
|
return scenario.srv, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ func newScenario(t *testing.T, ctx context.Context, modelName string, estimatedV
|
||||||
ctx: scenario.ctx,
|
ctx: scenario.ctx,
|
||||||
model: model,
|
model: model,
|
||||||
opts: api.DefaultOptions(),
|
opts: api.DefaultOptions(),
|
||||||
sessionDuration: 5 * time.Millisecond,
|
sessionDuration: &api.Duration{Duration: 5 * time.Millisecond},
|
||||||
successCh: make(chan *runnerRef, 1),
|
successCh: make(chan *runnerRef, 1),
|
||||||
errCh: make(chan error, 1),
|
errCh: make(chan error, 1),
|
||||||
}
|
}
|
||||||
|
@ -156,18 +156,18 @@ func TestRequests(t *testing.T) {
|
||||||
|
|
||||||
// Same model, same request
|
// Same model, same request
|
||||||
scenario1a := newScenario(t, ctx, "ollama-model-1", 10)
|
scenario1a := newScenario(t, ctx, "ollama-model-1", 10)
|
||||||
scenario1a.req.sessionDuration = 5 * time.Millisecond
|
scenario1a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond}
|
||||||
scenario1b := newScenario(t, ctx, "ollama-model-1", 11)
|
scenario1b := newScenario(t, ctx, "ollama-model-1", 11)
|
||||||
scenario1b.req.model = scenario1a.req.model
|
scenario1b.req.model = scenario1a.req.model
|
||||||
scenario1b.ggml = scenario1a.ggml
|
scenario1b.ggml = scenario1a.ggml
|
||||||
scenario1b.req.sessionDuration = 0
|
scenario1b.req.sessionDuration = &api.Duration{Duration: 0}
|
||||||
|
|
||||||
// simple reload of same model
|
// simple reload of same model
|
||||||
scenario2a := newScenario(t, ctx, "ollama-model-1", 20)
|
scenario2a := newScenario(t, ctx, "ollama-model-1", 20)
|
||||||
tmpModel := *scenario1a.req.model
|
tmpModel := *scenario1a.req.model
|
||||||
scenario2a.req.model = &tmpModel
|
scenario2a.req.model = &tmpModel
|
||||||
scenario2a.ggml = scenario1a.ggml
|
scenario2a.ggml = scenario1a.ggml
|
||||||
scenario2a.req.sessionDuration = 5 * time.Millisecond
|
scenario2a.req.sessionDuration = &api.Duration{Duration: 5 * time.Millisecond}
|
||||||
|
|
||||||
// Multiple loaded models
|
// Multiple loaded models
|
||||||
scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte)
|
scenario3a := newScenario(t, ctx, "ollama-model-3a", 1*format.GigaByte)
|
||||||
|
@ -199,8 +199,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario1a.srv)
|
require.Equal(t, resp.llama, scenario1a.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario1a.req.errCh)
|
require.Empty(t, scenario1a.req.errCh)
|
||||||
|
case err := <-scenario1a.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Same runner as first request due to not needing a reload
|
// Same runner as first request due to not needing a reload
|
||||||
|
@ -212,8 +214,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario1a.srv)
|
require.Equal(t, resp.llama, scenario1a.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario1b.req.errCh)
|
require.Empty(t, scenario1b.req.errCh)
|
||||||
|
case err := <-scenario1b.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trigger a reload
|
// Trigger a reload
|
||||||
|
@ -230,8 +234,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario2a.srv)
|
require.Equal(t, resp.llama, scenario2a.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario2a.req.errCh)
|
require.Empty(t, scenario2a.req.errCh)
|
||||||
|
case err := <-scenario2a.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
|
|
||||||
envconfig.MaxRunners = 1
|
envconfig.MaxRunners = 1
|
||||||
|
@ -246,8 +252,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario3a.srv)
|
require.Equal(t, resp.llama, scenario3a.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3a.req.errCh)
|
require.Empty(t, scenario3a.req.errCh)
|
||||||
|
case err := <-scenario3a.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 1)
|
require.Len(t, s.loaded, 1)
|
||||||
|
@ -262,8 +270,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario3b.srv)
|
require.Equal(t, resp.llama, scenario3b.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3b.req.errCh)
|
require.Empty(t, scenario3b.req.errCh)
|
||||||
|
case err := <-scenario3b.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 2)
|
require.Len(t, s.loaded, 2)
|
||||||
|
@ -278,8 +288,10 @@ func TestRequests(t *testing.T) {
|
||||||
require.Equal(t, resp.llama, scenario3c.srv)
|
require.Equal(t, resp.llama, scenario3c.srv)
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3c.req.errCh)
|
require.Empty(t, scenario3c.req.errCh)
|
||||||
|
case err := <-scenario3c.req.errCh:
|
||||||
|
t.Fatal(err.Error())
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 3)
|
require.Len(t, s.loaded, 3)
|
||||||
|
@ -306,7 +318,7 @@ func TestRequests(t *testing.T) {
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, scenario3d.req.errCh)
|
require.Empty(t, scenario3d.req.errCh)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
require.Len(t, s.loaded, 2)
|
require.Len(t, s.loaded, 2)
|
||||||
|
@ -318,11 +330,11 @@ func TestGetRunner(t *testing.T) {
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
scenario1a := newScenario(t, ctx, "ollama-model-1a", 10)
|
scenario1a := newScenario(t, ctx, "ollama-model-1a", 10)
|
||||||
scenario1a.req.sessionDuration = 0
|
scenario1a.req.sessionDuration = &api.Duration{Duration: 0}
|
||||||
scenario1b := newScenario(t, ctx, "ollama-model-1b", 10)
|
scenario1b := newScenario(t, ctx, "ollama-model-1b", 10)
|
||||||
scenario1b.req.sessionDuration = 0
|
scenario1b.req.sessionDuration = &api.Duration{Duration: 0}
|
||||||
scenario1c := newScenario(t, ctx, "ollama-model-1c", 10)
|
scenario1c := newScenario(t, ctx, "ollama-model-1c", 10)
|
||||||
scenario1c.req.sessionDuration = 0
|
scenario1c.req.sessionDuration = &api.Duration{Duration: 0}
|
||||||
envconfig.MaxQueuedRequests = 1
|
envconfig.MaxQueuedRequests = 1
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.getGpuFn = func() gpu.GpuInfoList {
|
s.getGpuFn = func() gpu.GpuInfoList {
|
||||||
|
@ -349,7 +361,7 @@ func TestGetRunner(t *testing.T) {
|
||||||
require.Empty(t, s.pendingReqCh)
|
require.Empty(t, s.pendingReqCh)
|
||||||
require.Empty(t, errCh1a)
|
require.Empty(t, errCh1a)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
scenario1a.ctxDone()
|
scenario1a.ctxDone()
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
|
@ -400,9 +412,9 @@ func TestPrematureExpired(t *testing.T) {
|
||||||
slog.Info("sending premature expired event now")
|
slog.Info("sending premature expired event now")
|
||||||
s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
|
s.expiredCh <- resp // Shouldn't happen in real life, but make sure its safe
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
time.Sleep(scenario1a.req.sessionDuration)
|
time.Sleep(scenario1a.req.sessionDuration.Duration)
|
||||||
scenario1a.ctxDone()
|
scenario1a.ctxDone()
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(20 * time.Millisecond)
|
||||||
require.LessOrEqual(t, len(s.finishedReqCh), 1)
|
require.LessOrEqual(t, len(s.finishedReqCh), 1)
|
||||||
|
@ -423,11 +435,11 @@ func TestUseLoadedRunner(t *testing.T) {
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
opts: api.DefaultOptions(),
|
opts: api.DefaultOptions(),
|
||||||
successCh: make(chan *runnerRef, 1),
|
successCh: make(chan *runnerRef, 1),
|
||||||
sessionDuration: 2,
|
sessionDuration: &api.Duration{Duration: 2},
|
||||||
}
|
}
|
||||||
finished := make(chan *LlmRequest)
|
finished := make(chan *LlmRequest)
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
r1 := &runnerRef{llama: llm1, sessionDuration: 1}
|
r1 := &runnerRef{llama: llm1, sessionDuration: 1, numParallel: 1}
|
||||||
req.useLoadedRunner(r1, finished)
|
req.useLoadedRunner(r1, finished)
|
||||||
require.Equal(t, uint(1), r1.refCount)
|
require.Equal(t, uint(1), r1.refCount)
|
||||||
require.Equal(t, time.Duration(2), r1.sessionDuration)
|
require.Equal(t, time.Duration(2), r1.sessionDuration)
|
||||||
|
@ -435,7 +447,7 @@ func TestUseLoadedRunner(t *testing.T) {
|
||||||
case success := <-req.successCh:
|
case success := <-req.successCh:
|
||||||
require.Equal(t, r1, success)
|
require.Equal(t, r1, success)
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
t.Errorf("timeout")
|
t.Fatal("timeout")
|
||||||
}
|
}
|
||||||
done()
|
done()
|
||||||
fin := <-finished
|
fin := <-finished
|
||||||
|
@ -461,8 +473,8 @@ func TestUpdateFreeSpace(t *testing.T) {
|
||||||
gpus[1].FreeMemory = 1900
|
gpus[1].FreeMemory = 1900
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 50, "2": 50}}
|
||||||
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}}
|
llm2 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{"1": 125, "2": 75}}
|
||||||
r1 := &runnerRef{llama: llm1, gpus: gpus}
|
r1 := &runnerRef{llama: llm1, gpus: gpus, numParallel: 1}
|
||||||
r2 := &runnerRef{llama: llm2, gpus: gpus}
|
r2 := &runnerRef{llama: llm2, gpus: gpus, numParallel: 1}
|
||||||
|
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
|
@ -513,8 +525,8 @@ func TestFindRunnerToUnload(t *testing.T) {
|
||||||
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
ctx, done := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
||||||
defer done()
|
defer done()
|
||||||
|
|
||||||
r1 := &runnerRef{refCount: 1, sessionDuration: 1}
|
r1 := &runnerRef{refCount: 1, sessionDuration: 1, numParallel: 1}
|
||||||
r2 := &runnerRef{sessionDuration: 2}
|
r2 := &runnerRef{sessionDuration: 2, numParallel: 1}
|
||||||
|
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
|
@ -536,9 +548,13 @@ func TestNeedsReload(t *testing.T) {
|
||||||
llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
do := api.DefaultOptions()
|
do := api.DefaultOptions()
|
||||||
runner := &runnerRef{
|
runner := &runnerRef{
|
||||||
model: &Model{AdapterPaths: []string{"adapter1"}, ProjectorPaths: []string{"projector1"}},
|
model: &Model{
|
||||||
|
AdapterPaths: []string{"adapter1"},
|
||||||
|
ProjectorPaths: []string{"projector1"},
|
||||||
|
},
|
||||||
Options: &do,
|
Options: &do,
|
||||||
llama: llm,
|
llama: llm,
|
||||||
|
numParallel: 1,
|
||||||
}
|
}
|
||||||
req := &LlmRequest{
|
req := &LlmRequest{
|
||||||
model: &Model{
|
model: &Model{
|
||||||
|
@ -581,8 +597,8 @@ func TestUnloadAllRunners(t *testing.T) {
|
||||||
s := InitScheduler(ctx)
|
s := InitScheduler(ctx)
|
||||||
s.unloadAllRunners()
|
s.unloadAllRunners()
|
||||||
|
|
||||||
r1 := &runnerRef{llama: llm1}
|
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||||
r2 := &runnerRef{llama: llm2}
|
r2 := &runnerRef{llama: llm2, numParallel: 1}
|
||||||
|
|
||||||
s.loadedMu.Lock()
|
s.loadedMu.Lock()
|
||||||
s.loaded["a"] = r1
|
s.loaded["a"] = r1
|
||||||
|
@ -596,14 +612,32 @@ func TestUnloadAllRunners(t *testing.T) {
|
||||||
|
|
||||||
func TestUnload(t *testing.T) {
|
func TestUnload(t *testing.T) {
|
||||||
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
llm1 := &mockLlm{estimatedVRAMByGPU: map[string]uint64{}}
|
||||||
r1 := &runnerRef{llama: llm1}
|
r1 := &runnerRef{llama: llm1, numParallel: 1}
|
||||||
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}}
|
r2 := &runnerRef{model: &Model{AdapterPaths: []string{"A"}}, numParallel: 1}
|
||||||
r1.unload()
|
r1.unload()
|
||||||
require.True(t, llm1.closeCalled)
|
require.True(t, llm1.closeCalled)
|
||||||
r2.unload()
|
r2.unload()
|
||||||
require.Nil(t, r2.model)
|
require.Nil(t, r2.model)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlreadyCanceled(t *testing.T) {
|
||||||
|
ctx, done := context.WithTimeout(context.Background(), 500*time.Millisecond)
|
||||||
|
defer done()
|
||||||
|
dctx, done2 := context.WithCancel(ctx)
|
||||||
|
done2()
|
||||||
|
scenario1a := newScenario(t, dctx, "ollama-model-1", 10)
|
||||||
|
scenario1a.req.sessionDuration = &api.Duration{Duration: 0}
|
||||||
|
s := InitScheduler(ctx)
|
||||||
|
slog.Info("scenario1a")
|
||||||
|
s.pendingReqCh <- scenario1a.req
|
||||||
|
require.Len(t, s.pendingReqCh, 1)
|
||||||
|
s.Run(ctx)
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
require.Empty(t, s.pendingReqCh)
|
||||||
|
require.Empty(t, scenario1a.req.errCh)
|
||||||
|
require.Empty(t, scenario1a.req.successCh)
|
||||||
|
}
|
||||||
|
|
||||||
type mockLlm struct {
|
type mockLlm struct {
|
||||||
pingResp error
|
pingResp error
|
||||||
waitResp error
|
waitResp error
|
||||||
|
|
158
template/template.go
Normal file
158
template/template.go
Normal file
|
@ -0,0 +1,158 @@
|
||||||
|
package template
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"embed"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/template"
|
||||||
|
"text/template/parse"
|
||||||
|
|
||||||
|
"github.com/agnivade/levenshtein"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed index.json
|
||||||
|
var indexBytes []byte
|
||||||
|
|
||||||
|
//go:embed *.gotmpl
|
||||||
|
var templatesFS embed.FS
|
||||||
|
|
||||||
|
var templatesOnce = sync.OnceValues(func() ([]*named, error) {
|
||||||
|
var templates []*named
|
||||||
|
if err := json.Unmarshal(indexBytes, &templates); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range templates {
|
||||||
|
bts, err := templatesFS.ReadFile(t.Name + ".gotmpl")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize line endings
|
||||||
|
t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n"))
|
||||||
|
}
|
||||||
|
|
||||||
|
return templates, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
type named struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Template string `json:"template"`
|
||||||
|
Bytes []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t named) Reader() io.Reader {
|
||||||
|
return bytes.NewReader(t.Bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Named(s string) (*named, error) {
|
||||||
|
templates, err := templatesOnce()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var template *named
|
||||||
|
score := math.MaxInt
|
||||||
|
for _, t := range templates {
|
||||||
|
if s := levenshtein.ComputeDistance(s, t.Template); s < score {
|
||||||
|
score = s
|
||||||
|
template = t
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if score < 100 {
|
||||||
|
return template, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("no matching template found")
|
||||||
|
}
|
||||||
|
|
||||||
|
type Template struct {
|
||||||
|
*template.Template
|
||||||
|
raw string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Template) String() string {
|
||||||
|
return t.raw
|
||||||
|
}
|
||||||
|
|
||||||
|
var DefaultTemplate, _ = Parse("{{ .Prompt }}")
|
||||||
|
|
||||||
|
func Parse(s string) (*Template, error) {
|
||||||
|
t, err := template.New("").Option("missingkey=zero").Parse(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Template{Template: t, raw: s}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Template) Vars() []string {
|
||||||
|
var vars []string
|
||||||
|
for _, n := range t.Tree.Root.Nodes {
|
||||||
|
vars = append(vars, parseNode(n)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
set := make(map[string]struct{})
|
||||||
|
for _, n := range vars {
|
||||||
|
set[strings.ToLower(n)] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
vars = maps.Keys(set)
|
||||||
|
slices.Sort(vars)
|
||||||
|
return vars
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseNode(n parse.Node) []string {
|
||||||
|
switch n := n.(type) {
|
||||||
|
case *parse.ActionNode:
|
||||||
|
return parseNode(n.Pipe)
|
||||||
|
case *parse.IfNode:
|
||||||
|
names := parseNode(n.Pipe)
|
||||||
|
names = append(names, parseNode(n.List)...)
|
||||||
|
if n.ElseList != nil {
|
||||||
|
names = append(names, parseNode(n.ElseList)...)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
case *parse.RangeNode:
|
||||||
|
names := parseNode(n.Pipe)
|
||||||
|
names = append(names, parseNode(n.List)...)
|
||||||
|
if n.ElseList != nil {
|
||||||
|
names = append(names, parseNode(n.ElseList)...)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
case *parse.WithNode:
|
||||||
|
names := parseNode(n.Pipe)
|
||||||
|
names = append(names, parseNode(n.List)...)
|
||||||
|
if n.ElseList != nil {
|
||||||
|
names = append(names, parseNode(n.ElseList)...)
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
case *parse.PipeNode:
|
||||||
|
var names []string
|
||||||
|
for _, c := range n.Cmds {
|
||||||
|
for _, a := range c.Args {
|
||||||
|
names = append(names, parseNode(a)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return names
|
||||||
|
case *parse.ListNode:
|
||||||
|
var names []string
|
||||||
|
for _, n := range n.Nodes {
|
||||||
|
names = append(names, parseNode(n)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return names
|
||||||
|
case *parse.FieldNode:
|
||||||
|
return n.Ident
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
89
template/template_test.go
Normal file
89
template/template_test.go
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package template
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"github.com/ollama/ollama/llm"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNamed(t *testing.T) {
|
||||||
|
f, err := os.Open(filepath.Join("testdata", "templates.jsonl"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(f)
|
||||||
|
for scanner.Scan() {
|
||||||
|
var ss map[string]string
|
||||||
|
if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range ss {
|
||||||
|
t.Run(k, func(t *testing.T) {
|
||||||
|
kv := llm.KV{"tokenizer.chat_template": v}
|
||||||
|
s := kv.ChatTemplate()
|
||||||
|
r, err := Named(s)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Name != k {
|
||||||
|
t.Errorf("expected %q, got %q", k, r.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
if _, err := io.Copy(&b, r.Reader()); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.New(s).Parse(b.String())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tmpl.Tree.Root.String() == "" {
|
||||||
|
t.Errorf("empty %s template", k)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParse(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
template string
|
||||||
|
vars []string
|
||||||
|
}{
|
||||||
|
{"{{ .Prompt }}", []string{"prompt"}},
|
||||||
|
{"{{ .System }} {{ .Prompt }}", []string{"prompt", "system"}},
|
||||||
|
{"{{ .System }} {{ .Prompt }} {{ .Response }}", []string{"prompt", "response", "system"}},
|
||||||
|
{"{{ with .Tools }}{{ . }}{{ end }} {{ .System }} {{ .Prompt }}", []string{"prompt", "system", "tools"}},
|
||||||
|
{"{{ range .Messages }}{{ .Role }} {{ .Content }}{{ end }}", []string{"content", "messages", "role"}},
|
||||||
|
{"{{ range .Messages }}{{ if eq .Role \"system\" }}SYSTEM: {{ .Content }}{{ else if eq .Role \"user\" }}USER: {{ .Content }}{{ else if eq .Role \"assistant\" }}ASSISTANT: {{ .Content }}{{ end }}{{ end }}", []string{"content", "messages", "role"}},
|
||||||
|
{"{{ .Prompt }} {{ .Suffix }}", []string{"prompt", "suffix"}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range cases {
|
||||||
|
t.Run("", func(t *testing.T) {
|
||||||
|
tmpl, err := Parse(tt.template)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
vars := tmpl.Vars()
|
||||||
|
if !slices.Equal(tt.vars, vars) {
|
||||||
|
t.Errorf("expected %v, got %v", tt.vars, vars)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,70 +0,0 @@
|
||||||
package templates
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"embed"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/agnivade/levenshtein"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:embed index.json
|
|
||||||
var indexBytes []byte
|
|
||||||
|
|
||||||
//go:embed *.gotmpl
|
|
||||||
var templatesFS embed.FS
|
|
||||||
|
|
||||||
var templatesOnce = sync.OnceValues(func() ([]*Template, error) {
|
|
||||||
var templates []*Template
|
|
||||||
if err := json.Unmarshal(indexBytes, &templates); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, t := range templates {
|
|
||||||
bts, err := templatesFS.ReadFile(t.Name + ".gotmpl")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// normalize line endings
|
|
||||||
t.Bytes = bytes.ReplaceAll(bts, []byte("\r\n"), []byte("\n"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return templates, nil
|
|
||||||
})
|
|
||||||
|
|
||||||
type Template struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Template string `json:"template"`
|
|
||||||
Bytes []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Template) Reader() io.Reader {
|
|
||||||
return bytes.NewReader(t.Bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NamedTemplate(s string) (*Template, error) {
|
|
||||||
templates, err := templatesOnce()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var template *Template
|
|
||||||
score := math.MaxInt
|
|
||||||
for _, t := range templates {
|
|
||||||
if s := levenshtein.ComputeDistance(s, t.Template); s < score {
|
|
||||||
score = s
|
|
||||||
template = t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if score < 100 {
|
|
||||||
return template, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.New("no matching template found")
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package templates
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
"text/template"
|
|
||||||
|
|
||||||
"github.com/ollama/ollama/llm"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestKVChatTemplate(t *testing.T) {
|
|
||||||
f, err := os.Open(filepath.Join("testdata", "templates.jsonl"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(f)
|
|
||||||
for scanner.Scan() {
|
|
||||||
var ss map[string]string
|
|
||||||
if err := json.Unmarshal(scanner.Bytes(), &ss); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range ss {
|
|
||||||
t.Run(k, func(t *testing.T) {
|
|
||||||
kv := llm.KV{"tokenizer.chat_template": v}
|
|
||||||
s := kv.ChatTemplate()
|
|
||||||
r, err := NamedTemplate(s)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Name != k {
|
|
||||||
t.Errorf("expected %q, got %q", k, r.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var b bytes.Buffer
|
|
||||||
if _, err := io.Copy(&b, r.Reader()); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := template.New(s).Parse(b.String())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tmpl.Tree.Root.String() == "" {
|
|
||||||
t.Errorf("empty %s template", k)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -91,7 +91,6 @@ type Name struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
Model string
|
Model string
|
||||||
Tag string
|
Tag string
|
||||||
RawDigest string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseName parses and assembles a Name from a name string. The
|
// ParseName parses and assembles a Name from a name string. The
|
||||||
|
@ -143,11 +142,6 @@ func ParseNameBare(s string) Name {
|
||||||
var n Name
|
var n Name
|
||||||
var promised bool
|
var promised bool
|
||||||
|
|
||||||
s, n.RawDigest, promised = cutLast(s, "@")
|
|
||||||
if promised && n.RawDigest == "" {
|
|
||||||
n.RawDigest = MissingPart
|
|
||||||
}
|
|
||||||
|
|
||||||
// "/" is an illegal tag character, so we can use it to split the host
|
// "/" is an illegal tag character, so we can use it to split the host
|
||||||
if strings.LastIndex(s, ":") > strings.LastIndex(s, "/") {
|
if strings.LastIndex(s, ":") > strings.LastIndex(s, "/") {
|
||||||
s, n.Tag, _ = cutPromised(s, ":")
|
s, n.Tag, _ = cutPromised(s, ":")
|
||||||
|
@ -222,10 +216,6 @@ func (n Name) String() string {
|
||||||
b.WriteByte(':')
|
b.WriteByte(':')
|
||||||
b.WriteString(n.Tag)
|
b.WriteString(n.Tag)
|
||||||
}
|
}
|
||||||
if n.RawDigest != "" {
|
|
||||||
b.WriteByte('@')
|
|
||||||
b.WriteString(n.RawDigest)
|
|
||||||
}
|
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,16 +240,18 @@ func (n Name) DisplayShortest() string {
|
||||||
return sb.String()
|
return sb.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsValidNamespace(namespace string) bool {
|
// IsValidNamespace reports whether the provided string is a valid
|
||||||
return isValidPart(kindNamespace, namespace)
|
// namespace.
|
||||||
|
func IsValidNamespace(s string) bool {
|
||||||
|
return isValidPart(kindNamespace, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsValid reports whether all parts of the name are present and valid. The
|
// IsValid reports whether all parts of the name are present and valid. The
|
||||||
// digest is a special case, and is checked for validity only if present.
|
// digest is a special case, and is checked for validity only if present.
|
||||||
|
//
|
||||||
|
// Note: The digest check has been removed as is planned to be added back in
|
||||||
|
// at a later time.
|
||||||
func (n Name) IsValid() bool {
|
func (n Name) IsValid() bool {
|
||||||
if n.RawDigest != "" && !isValidPart(kindDigest, n.RawDigest) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return n.IsFullyQualified()
|
return n.IsFullyQualified()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -122,21 +122,6 @@ func TestParseNameParts(t *testing.T) {
|
||||||
},
|
},
|
||||||
wantFilepath: filepath.Join(part350, part80, part80, part80),
|
wantFilepath: filepath.Join(part350, part80, part80, part80),
|
||||||
},
|
},
|
||||||
{
|
|
||||||
in: "@digest",
|
|
||||||
want: Name{
|
|
||||||
RawDigest: "digest",
|
|
||||||
},
|
|
||||||
wantValidDigest: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
in: "model@sha256:123",
|
|
||||||
want: Name{
|
|
||||||
Model: "model",
|
|
||||||
RawDigest: "sha256:123",
|
|
||||||
},
|
|
||||||
wantValidDigest: true,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range cases {
|
for _, tt := range cases {
|
||||||
|
@ -160,22 +145,18 @@ var testCases = map[string]bool{ // name -> valid
|
||||||
"_why/_the/_lucky:_stiff": true,
|
"_why/_the/_lucky:_stiff": true,
|
||||||
|
|
||||||
// minimal
|
// minimal
|
||||||
"h/n/m:t@d": true,
|
"h/n/m:t": true,
|
||||||
|
|
||||||
"host/namespace/model:tag": true,
|
"host/namespace/model:tag": true,
|
||||||
"host/namespace/model": false,
|
"host/namespace/model": false,
|
||||||
"namespace/model": false,
|
"namespace/model": false,
|
||||||
"model": false,
|
"model": false,
|
||||||
"@sha256-1000000000000000000000000000000000000000000000000000000000000000": false,
|
|
||||||
"model@sha256-1000000000000000000000000000000000000000000000000000000000000000": false,
|
|
||||||
"model@sha256:1000000000000000000000000000000000000000000000000000000000000000": false,
|
|
||||||
|
|
||||||
// long (but valid)
|
// long (but valid)
|
||||||
part80 + "/" + part80 + "/" + part80 + ":" + part80: true,
|
part80 + "/" + part80 + "/" + part80 + ":" + part80: true,
|
||||||
part350 + "/" + part80 + "/" + part80 + ":" + part80: true,
|
part350 + "/" + part80 + "/" + part80 + ":" + part80: true,
|
||||||
|
|
||||||
"h/nn/mm:t@sha256-1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes
|
"h/nn/mm:t": true, // bare minimum part sizes
|
||||||
"h/nn/mm:t@sha256:1000000000000000000000000000000000000000000000000000000000000000": true, // bare minimum part sizes
|
|
||||||
|
|
||||||
// unqualified
|
// unqualified
|
||||||
"m": false,
|
"m": false,
|
||||||
|
@ -196,11 +177,10 @@ var testCases = map[string]bool{ // name -> valid
|
||||||
"@": false,
|
"@": false,
|
||||||
|
|
||||||
// not starting with alphanum
|
// not starting with alphanum
|
||||||
"-hh/nn/mm:tt@dd": false,
|
"-hh/nn/mm:tt": false,
|
||||||
"hh/-nn/mm:tt@dd": false,
|
"hh/-nn/mm:tt": false,
|
||||||
"hh/nn/-mm:tt@dd": false,
|
"hh/nn/-mm:tt": false,
|
||||||
"hh/nn/mm:-tt@dd": false,
|
"hh/nn/mm:-tt": false,
|
||||||
"hh/nn/mm:tt@-dd": false,
|
|
||||||
|
|
||||||
// hosts
|
// hosts
|
||||||
"host:https/namespace/model:tag": true,
|
"host:https/namespace/model:tag": true,
|
||||||
|
@ -334,7 +314,7 @@ func FuzzName(f *testing.F) {
|
||||||
f.Fuzz(func(t *testing.T, s string) {
|
f.Fuzz(func(t *testing.T, s string) {
|
||||||
n := ParseNameBare(s)
|
n := ParseNameBare(s)
|
||||||
if n.IsValid() {
|
if n.IsValid() {
|
||||||
parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag, n.RawDigest}
|
parts := [...]string{n.Host, n.Namespace, n.Model, n.Tag}
|
||||||
for _, part := range parts {
|
for _, part := range parts {
|
||||||
if part == ".." {
|
if part == ".." {
|
||||||
t.Errorf("unexpected .. as valid part")
|
t.Errorf("unexpected .. as valid part")
|
||||||
|
|
Loading…
Reference in a new issue