ollama/server/upload.go

403 lines
9 KiB
Go
Raw Normal View History

2023-08-22 15:50:21 +00:00
package server
import (
"context"
2023-10-27 17:11:28 +00:00
"crypto/md5"
2023-08-22 15:50:21 +00:00
"errors"
"fmt"
"hash"
2023-08-22 15:50:21 +00:00
"io"
"log"
2023-11-19 00:25:22 +00:00
"math"
2023-08-22 15:50:21 +00:00
"net/http"
"net/url"
"os"
2023-10-27 17:11:28 +00:00
"strings"
2023-09-27 23:22:30 +00:00
"sync"
2023-10-09 17:24:27 +00:00
"sync/atomic"
"time"
2023-08-22 15:50:21 +00:00
"github.com/jmorganca/ollama/api"
2023-10-09 17:24:27 +00:00
"github.com/jmorganca/ollama/format"
"golang.org/x/sync/errgroup"
2023-08-22 15:50:21 +00:00
)
2023-10-09 17:24:27 +00:00
var blobUploadManager sync.Map
type blobUpload struct {
*Layer
Total int64
Completed atomic.Int64
Parts []blobUploadPart
nextURL chan *url.URL
context.CancelFunc
file *os.File
2023-10-09 17:24:27 +00:00
done bool
err error
references atomic.Int32
}
2023-09-19 21:22:54 +00:00
const (
2023-10-09 17:24:27 +00:00
numUploadParts = 64
2023-11-17 21:17:55 +00:00
minUploadPartSize int64 = 100 * format.MegaByte
maxUploadPartSize int64 = 1000 * format.MegaByte
2023-09-19 21:22:54 +00:00
)
2023-10-09 17:24:27 +00:00
func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error {
p, err := GetBlobsPath(b.Digest)
if err != nil {
return err
}
if b.From != "" {
2023-08-22 15:50:21 +00:00
values := requestURL.Query()
2023-10-09 17:24:27 +00:00
values.Add("mount", b.Digest)
values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
2023-08-22 15:50:21 +00:00
requestURL.RawQuery = values.Encode()
}
2023-11-02 20:10:58 +00:00
resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts)
2023-08-22 15:50:21 +00:00
if err != nil {
2023-10-09 17:24:27 +00:00
return err
2023-08-22 15:50:21 +00:00
}
defer resp.Body.Close()
2023-09-19 21:22:54 +00:00
location := resp.Header.Get("Docker-Upload-Location")
2023-08-22 15:50:21 +00:00
if location == "" {
2023-09-19 21:22:54 +00:00
location = resp.Header.Get("Location")
}
2023-10-09 17:24:27 +00:00
fi, err := os.Stat(p)
2023-09-19 21:22:54 +00:00
if err != nil {
2023-10-09 17:24:27 +00:00
return err
2023-08-22 15:50:21 +00:00
}
2023-10-09 17:24:27 +00:00
b.Total = fi.Size()
2023-11-16 20:18:03 +00:00
// http.StatusCreated indicates a blob has been mounted
// ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
if resp.StatusCode == http.StatusCreated {
b.Completed.Store(b.Total)
b.done = true
return nil
}
2023-10-09 17:24:27 +00:00
var size = b.Total / numUploadParts
switch {
case size < minUploadPartSize:
size = minUploadPartSize
case size > maxUploadPartSize:
size = maxUploadPartSize
}
var offset int64
for offset < fi.Size() {
if offset+size > fi.Size() {
size = fi.Size() - offset
}
2023-10-20 20:11:25 +00:00
// set part.N to the current number of parts
2023-11-29 23:04:23 +00:00
b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size})
2023-10-09 17:24:27 +00:00
offset += size
}
2023-08-22 15:50:21 +00:00
2023-11-02 00:18:03 +00:00
log.Printf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size))
2023-08-22 15:50:21 +00:00
2023-10-09 17:24:27 +00:00
requestURL, err = url.Parse(location)
2023-08-22 15:50:21 +00:00
if err != nil {
return err
}
2023-10-09 17:24:27 +00:00
b.nextURL = make(chan *url.URL, 1)
b.nextURL <- requestURL
return nil
}
2023-10-20 20:11:25 +00:00
// Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded
// in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error.
2023-10-09 17:24:27 +00:00
func (b *blobUpload) Run(ctx context.Context, opts *RegistryOptions) {
defer blobUploadManager.Delete(b.Digest)
ctx, b.CancelFunc = context.WithCancel(ctx)
p, err := GetBlobsPath(b.Digest)
if err != nil {
b.err = err
return
}
b.file, err = os.Open(p)
if err != nil {
b.err = err
return
}
defer b.file.Close()
2023-10-09 17:24:27 +00:00
g, inner := errgroup.WithContext(ctx)
g.SetLimit(numUploadParts)
for i := range b.Parts {
part := &b.Parts[i]
2023-10-20 20:11:25 +00:00
select {
case <-inner.Done():
case requestURL := <-b.nextURL:
g.Go(func() error {
2023-11-03 23:49:51 +00:00
var err error
2023-10-20 20:11:25 +00:00
for try := 0; try < maxRetries; try++ {
err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
2023-10-20 20:11:25 +00:00
switch {
case errors.Is(err, context.Canceled):
return err
case errors.Is(err, errMaxRetriesExceeded):
return err
case err != nil:
2023-11-19 00:25:22 +00:00
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
2023-11-17 21:17:55 +00:00
log.Printf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep)
time.Sleep(sleep)
2023-10-20 20:11:25 +00:00
continue
}
return nil
2023-10-09 17:24:27 +00:00
}
2023-11-03 23:49:51 +00:00
return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
2023-10-20 20:11:25 +00:00
})
}
2023-10-09 17:24:27 +00:00
}
2023-10-09 17:24:27 +00:00
if err := g.Wait(); err != nil {
2023-10-20 20:11:25 +00:00
b.err = err
return
2023-08-22 15:50:21 +00:00
}
2023-10-09 17:24:27 +00:00
requestURL := <-b.nextURL
// calculate md5 checksum and add it to the commit request
var sb strings.Builder
2023-10-27 17:11:28 +00:00
for _, part := range b.Parts {
sb.Write(part.Sum(nil))
2023-10-27 17:11:28 +00:00
}
md5sum := md5.Sum([]byte(sb.String()))
2023-08-22 15:50:21 +00:00
values := requestURL.Query()
2023-10-09 17:24:27 +00:00
values.Add("digest", b.Digest)
2023-10-27 17:11:28 +00:00
values.Add("etag", fmt.Sprintf("%x-%d", md5sum, len(b.Parts)))
2023-08-22 15:50:21 +00:00
requestURL.RawQuery = values.Encode()
headers := make(http.Header)
headers.Set("Content-Type", "application/octet-stream")
headers.Set("Content-Length", "0")
2023-11-18 07:52:11 +00:00
for try := 0; try < maxRetries; try++ {
2023-11-29 23:18:53 +00:00
var resp *http.Response
resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
if errors.Is(err, context.Canceled) {
break
} else if err != nil {
2023-11-19 01:59:55 +00:00
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
2023-11-18 07:52:11 +00:00
log.Printf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep)
time.Sleep(sleep)
continue
}
defer resp.Body.Close()
2023-11-29 23:18:53 +00:00
break
2023-08-22 15:50:21 +00:00
}
2023-11-29 23:18:53 +00:00
b.err = err
b.done = true
2023-08-22 15:50:21 +00:00
}
2023-09-14 16:54:05 +00:00
func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *RegistryOptions) error {
2023-09-14 20:30:28 +00:00
headers := make(http.Header)
headers.Set("Content-Type", "application/octet-stream")
2023-10-09 17:24:27 +00:00
headers.Set("Content-Length", fmt.Sprintf("%d", part.Size))
if method == http.MethodPatch {
headers.Set("X-Redirect-Uploads", "1")
2023-10-09 17:24:27 +00:00
headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1))
}
2023-09-14 20:30:28 +00:00
sr := io.NewSectionReader(b.file, part.Offset, part.Size)
2023-11-29 23:04:23 +00:00
md5sum := md5.New()
w := &progressWriter{blobUpload: b}
resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts)
2023-10-09 17:24:27 +00:00
if err != nil {
2023-11-29 23:04:23 +00:00
w.Rollback()
2023-10-09 17:24:27 +00:00
return err
}
defer resp.Body.Close()
2023-09-14 20:30:28 +00:00
2023-10-09 17:24:27 +00:00
location := resp.Header.Get("Docker-Upload-Location")
if location == "" {
location = resp.Header.Get("Location")
}
nextURL, err := url.Parse(location)
if err != nil {
2023-11-29 23:04:23 +00:00
w.Rollback()
2023-10-09 17:24:27 +00:00
return err
}
switch {
case resp.StatusCode == http.StatusTemporaryRedirect:
2023-11-29 23:04:23 +00:00
w.Rollback()
2023-10-09 17:24:27 +00:00
b.nextURL <- nextURL
redirectURL, err := resp.Location()
if err != nil {
return err
}
2023-11-19 00:25:22 +00:00
// retry uploading to the redirect URL
2023-10-09 17:24:27 +00:00
for try := 0; try < maxRetries; try++ {
err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, nil)
2023-10-09 17:24:27 +00:00
switch {
case errors.Is(err, context.Canceled):
return err
case errors.Is(err, errMaxRetriesExceeded):
return err
case err != nil:
2023-11-19 00:25:22 +00:00
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
log.Printf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep)
time.Sleep(sleep)
continue
}
2023-10-09 17:24:27 +00:00
return nil
}
2023-11-03 23:49:51 +00:00
return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
2023-09-14 20:30:28 +00:00
2023-10-09 17:24:27 +00:00
case resp.StatusCode == http.StatusUnauthorized:
2023-11-29 23:04:23 +00:00
w.Rollback()
2023-10-09 17:24:27 +00:00
auth := resp.Header.Get("www-authenticate")
authRedir := ParseAuthRedirectString(auth)
token, err := getAuthToken(ctx, authRedir)
if err != nil {
return err
}
2023-09-14 20:30:28 +00:00
2023-10-09 17:24:27 +00:00
opts.Token = token
fallthrough
case resp.StatusCode >= http.StatusBadRequest:
2023-11-29 23:04:23 +00:00
w.Rollback()
2023-10-09 17:24:27 +00:00
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
2023-09-14 20:30:28 +00:00
}
return fmt.Errorf("http status %s: %s", resp.Status, body)
2023-09-14 20:30:28 +00:00
}
2023-10-09 17:24:27 +00:00
if method == http.MethodPatch {
b.nextURL <- nextURL
}
2023-11-29 22:52:12 +00:00
part.Hash = md5sum
2023-10-09 17:24:27 +00:00
return nil
2023-09-14 20:30:28 +00:00
}
2023-10-09 17:24:27 +00:00
func (b *blobUpload) acquire() {
b.references.Add(1)
2023-09-14 16:54:05 +00:00
}
2023-10-09 17:24:27 +00:00
func (b *blobUpload) release() {
if b.references.Add(-1) == 0 {
b.CancelFunc()
}
}
func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
b.acquire()
defer b.release()
ticker := time.NewTicker(60 * time.Millisecond)
for {
select {
case <-ticker.C:
case <-ctx.Done():
return ctx.Err()
}
fn(api.ProgressResponse{
2023-11-19 14:20:22 +00:00
Status: fmt.Sprintf("pushing %s", b.Digest[7:19]),
2023-10-09 17:24:27 +00:00
Digest: b.Digest,
Total: b.Total,
Completed: b.Completed.Load(),
2023-09-14 16:54:05 +00:00
})
2023-10-09 17:24:27 +00:00
if b.done || b.err != nil {
return b.err
}
2023-09-14 16:54:05 +00:00
}
2023-10-09 17:24:27 +00:00
}
2023-09-14 16:54:05 +00:00
2023-11-02 00:10:21 +00:00
type blobUploadPart struct {
// N is the part number
2023-11-29 23:04:23 +00:00
N int
Offset int64
Size int64
hash.Hash
}
2023-11-29 23:04:23 +00:00
type progressWriter struct {
written int64
2023-10-09 17:24:27 +00:00
*blobUpload
}
2023-11-29 23:04:23 +00:00
func (p *progressWriter) Write(b []byte) (n int, err error) {
2023-11-02 00:10:21 +00:00
n = len(b)
p.written += int64(n)
p.Completed.Add(int64(n))
2023-09-14 16:54:05 +00:00
return n, nil
}
2023-10-09 17:24:27 +00:00
2023-11-29 23:04:23 +00:00
func (p *progressWriter) Rollback() {
p.Completed.Add(-p.written)
2023-11-02 00:10:21 +00:00
p.written = 0
}
2023-10-09 17:24:27 +00:00
func uploadBlob(ctx context.Context, mp ModelPath, layer *Layer, opts *RegistryOptions, fn func(api.ProgressResponse)) error {
requestURL := mp.BaseURL()
requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs", layer.Digest)
2023-11-02 20:22:42 +00:00
resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
switch {
case errors.Is(err, os.ErrNotExist):
case err != nil:
2023-10-09 17:24:27 +00:00
return err
2023-11-02 20:22:42 +00:00
default:
defer resp.Body.Close()
2023-10-09 17:24:27 +00:00
fn(api.ProgressResponse{
2023-11-19 14:20:22 +00:00
Status: fmt.Sprintf("pushing %s", layer.Digest[7:19]),
2023-10-09 17:24:27 +00:00
Digest: layer.Digest,
Total: layer.Size,
Completed: layer.Size,
})
return nil
}
data, ok := blobUploadManager.LoadOrStore(layer.Digest, &blobUpload{Layer: layer})
upload := data.(*blobUpload)
if !ok {
requestURL := mp.BaseURL()
requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs/uploads/")
if err := upload.Prepare(ctx, requestURL, opts); err != nil {
blobUploadManager.Delete(layer.Digest)
return err
}
go upload.Run(context.Background(), opts)
}
return upload.Wait(ctx, fn)
}