2023-08-22 15:50:21 +00:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-10-27 17:11:28 +00:00
|
|
|
"crypto/md5"
|
2023-08-22 15:50:21 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2023-11-21 20:12:04 +00:00
|
|
|
"hash"
|
2023-08-22 15:50:21 +00:00
|
|
|
"io"
|
2024-01-18 18:52:01 +00:00
|
|
|
"log/slog"
|
2023-11-19 00:25:22 +00:00
|
|
|
"math"
|
2023-08-22 15:50:21 +00:00
|
|
|
"net/http"
|
|
|
|
"net/url"
|
|
|
|
"os"
|
2024-08-01 21:52:15 +00:00
|
|
|
"strconv"
|
2023-09-27 23:22:30 +00:00
|
|
|
"sync"
|
2023-10-09 17:24:27 +00:00
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
2023-08-22 15:50:21 +00:00
|
|
|
|
2024-08-01 21:52:15 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
|
2024-03-26 20:04:17 +00:00
|
|
|
"github.com/ollama/ollama/api"
|
|
|
|
"github.com/ollama/ollama/format"
|
2023-08-22 15:50:21 +00:00
|
|
|
)
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
var blobUploadManager sync.Map
|
|
|
|
|
|
|
|
type blobUpload struct {
|
2024-08-07 21:22:17 +00:00
|
|
|
Layer
|
2023-10-09 17:24:27 +00:00
|
|
|
|
|
|
|
Total int64
|
|
|
|
Completed atomic.Int64
|
|
|
|
|
|
|
|
Parts []blobUploadPart
|
|
|
|
|
|
|
|
nextURL chan *url.URL
|
|
|
|
|
|
|
|
context.CancelFunc
|
|
|
|
|
2023-11-19 03:08:21 +00:00
|
|
|
file *os.File
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
done bool
|
|
|
|
err error
|
|
|
|
references atomic.Int32
|
|
|
|
}
|
|
|
|
|
2023-09-19 21:22:54 +00:00
|
|
|
const (
|
2024-08-19 16:20:52 +00:00
|
|
|
numUploadParts = 16
|
2023-11-17 21:17:55 +00:00
|
|
|
minUploadPartSize int64 = 100 * format.MegaByte
|
|
|
|
maxUploadPartSize int64 = 1000 * format.MegaByte
|
2023-09-19 21:22:54 +00:00
|
|
|
)
|
|
|
|
|
2024-02-14 19:29:49 +00:00
|
|
|
func (b *blobUpload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
|
2023-10-09 17:24:27 +00:00
|
|
|
p, err := GetBlobsPath(b.Digest)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if b.From != "" {
|
2023-08-22 15:50:21 +00:00
|
|
|
values := requestURL.Query()
|
2023-10-09 17:24:27 +00:00
|
|
|
values.Add("mount", b.Digest)
|
2023-11-16 19:44:09 +00:00
|
|
|
values.Add("from", ParseModelPath(b.From).GetNamespaceRepository())
|
2023-08-22 15:50:21 +00:00
|
|
|
requestURL.RawQuery = values.Encode()
|
|
|
|
}
|
|
|
|
|
2023-11-02 20:10:58 +00:00
|
|
|
resp, err := makeRequestWithRetry(ctx, http.MethodPost, requestURL, nil, nil, opts)
|
2023-08-22 15:50:21 +00:00
|
|
|
if err != nil {
|
2023-10-09 17:24:27 +00:00
|
|
|
return err
|
2023-08-22 15:50:21 +00:00
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2023-09-19 21:22:54 +00:00
|
|
|
location := resp.Header.Get("Docker-Upload-Location")
|
2023-08-22 15:50:21 +00:00
|
|
|
if location == "" {
|
2023-09-19 21:22:54 +00:00
|
|
|
location = resp.Header.Get("Location")
|
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
fi, err := os.Stat(p)
|
2023-09-19 21:22:54 +00:00
|
|
|
if err != nil {
|
2023-10-09 17:24:27 +00:00
|
|
|
return err
|
2023-08-22 15:50:21 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
b.Total = fi.Size()
|
|
|
|
|
2023-11-16 20:18:03 +00:00
|
|
|
// http.StatusCreated indicates a blob has been mounted
|
|
|
|
// ref: https://distribution.github.io/distribution/spec/api/#cross-repository-blob-mount
|
|
|
|
if resp.StatusCode == http.StatusCreated {
|
|
|
|
b.Completed.Store(b.Total)
|
|
|
|
b.done = true
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-12-15 22:25:12 +00:00
|
|
|
size := b.Total / numUploadParts
|
2023-10-09 17:24:27 +00:00
|
|
|
switch {
|
|
|
|
case size < minUploadPartSize:
|
|
|
|
size = minUploadPartSize
|
|
|
|
case size > maxUploadPartSize:
|
|
|
|
size = maxUploadPartSize
|
|
|
|
}
|
|
|
|
|
|
|
|
var offset int64
|
|
|
|
for offset < fi.Size() {
|
|
|
|
if offset+size > fi.Size() {
|
|
|
|
size = fi.Size() - offset
|
|
|
|
}
|
|
|
|
|
2023-10-20 20:11:25 +00:00
|
|
|
// set part.N to the current number of parts
|
2023-11-29 23:04:23 +00:00
|
|
|
b.Parts = append(b.Parts, blobUploadPart{N: len(b.Parts), Offset: offset, Size: size})
|
2023-10-09 17:24:27 +00:00
|
|
|
offset += size
|
|
|
|
}
|
2023-08-22 15:50:21 +00:00
|
|
|
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("uploading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
|
2023-08-22 15:50:21 +00:00
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
requestURL, err = url.Parse(location)
|
2023-08-22 15:50:21 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
b.nextURL = make(chan *url.URL, 1)
|
|
|
|
b.nextURL <- requestURL
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-20 20:11:25 +00:00
|
|
|
// Run uploads blob parts to the upstream. If the upstream supports redirection, parts will be uploaded
|
|
|
|
// in parallel as defined by Prepare. Otherwise, parts will be uploaded serially. Run sets b.err on error.
|
2024-02-14 19:29:49 +00:00
|
|
|
func (b *blobUpload) Run(ctx context.Context, opts *registryOptions) {
|
2023-10-09 17:24:27 +00:00
|
|
|
defer blobUploadManager.Delete(b.Digest)
|
|
|
|
ctx, b.CancelFunc = context.WithCancel(ctx)
|
|
|
|
|
2023-11-19 03:08:21 +00:00
|
|
|
p, err := GetBlobsPath(b.Digest)
|
|
|
|
if err != nil {
|
|
|
|
b.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
b.file, err = os.Open(p)
|
|
|
|
if err != nil {
|
|
|
|
b.err = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer b.file.Close()
|
|
|
|
|
2024-03-08 02:10:16 +00:00
|
|
|
g, inner := errgroup.WithContext(ctx)
|
|
|
|
g.SetLimit(numUploadParts)
|
2023-10-09 17:24:27 +00:00
|
|
|
for i := range b.Parts {
|
|
|
|
part := &b.Parts[i]
|
2023-10-20 20:11:25 +00:00
|
|
|
select {
|
|
|
|
case <-inner.Done():
|
|
|
|
case requestURL := <-b.nextURL:
|
2024-03-08 02:10:16 +00:00
|
|
|
g.Go(func() error {
|
2023-11-03 23:49:51 +00:00
|
|
|
var err error
|
2024-05-22 05:21:04 +00:00
|
|
|
for try := range maxRetries {
|
2023-11-21 20:12:04 +00:00
|
|
|
err = b.uploadPart(inner, http.MethodPatch, requestURL, part, opts)
|
2023-10-20 20:11:25 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, context.Canceled):
|
|
|
|
return err
|
|
|
|
case errors.Is(err, errMaxRetriesExceeded):
|
|
|
|
return err
|
|
|
|
case err != nil:
|
2023-11-19 00:25:22 +00:00
|
|
|
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
|
2023-11-17 21:17:55 +00:00
|
|
|
time.Sleep(sleep)
|
2023-10-20 20:11:25 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2023-10-09 17:24:27 +00:00
|
|
|
}
|
|
|
|
|
2023-11-03 23:49:51 +00:00
|
|
|
return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
|
2023-10-20 20:11:25 +00:00
|
|
|
})
|
|
|
|
}
|
2023-10-09 17:24:27 +00:00
|
|
|
}
|
2023-09-14 22:42:50 +00:00
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
if err := g.Wait(); err != nil {
|
2023-10-20 20:11:25 +00:00
|
|
|
b.err = err
|
|
|
|
return
|
2023-08-22 15:50:21 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
requestURL := <-b.nextURL
|
|
|
|
|
2023-11-20 20:44:36 +00:00
|
|
|
// calculate md5 checksum and add it to the commit request
|
2024-02-21 00:07:50 +00:00
|
|
|
md5sum := md5.New()
|
2023-10-27 17:11:28 +00:00
|
|
|
for _, part := range b.Parts {
|
2024-02-21 00:07:50 +00:00
|
|
|
md5sum.Write(part.Sum(nil))
|
2023-10-27 17:11:28 +00:00
|
|
|
}
|
|
|
|
|
2023-08-22 15:50:21 +00:00
|
|
|
values := requestURL.Query()
|
2023-10-09 17:24:27 +00:00
|
|
|
values.Add("digest", b.Digest)
|
2024-02-21 00:07:50 +00:00
|
|
|
values.Add("etag", fmt.Sprintf("%x-%d", md5sum.Sum(nil), len(b.Parts)))
|
2023-08-22 15:50:21 +00:00
|
|
|
requestURL.RawQuery = values.Encode()
|
|
|
|
|
|
|
|
headers := make(http.Header)
|
|
|
|
headers.Set("Content-Type", "application/octet-stream")
|
|
|
|
headers.Set("Content-Length", "0")
|
|
|
|
|
2024-05-22 05:21:04 +00:00
|
|
|
for try := range maxRetries {
|
2023-11-29 23:18:53 +00:00
|
|
|
var resp *http.Response
|
|
|
|
resp, err = makeRequestWithRetry(ctx, http.MethodPut, requestURL, headers, nil, opts)
|
|
|
|
if errors.Is(err, context.Canceled) {
|
|
|
|
break
|
|
|
|
} else if err != nil {
|
2023-11-19 01:59:55 +00:00
|
|
|
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("%s complete upload attempt %d failed: %v, retrying in %s", b.Digest[7:19], try, err, sleep))
|
2023-11-18 07:52:11 +00:00
|
|
|
time.Sleep(sleep)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2023-11-29 23:18:53 +00:00
|
|
|
break
|
2023-08-22 15:50:21 +00:00
|
|
|
}
|
2023-11-29 23:18:53 +00:00
|
|
|
|
|
|
|
b.err = err
|
|
|
|
b.done = true
|
2023-08-22 15:50:21 +00:00
|
|
|
}
|
2023-09-14 16:54:05 +00:00
|
|
|
|
2024-02-14 19:29:49 +00:00
|
|
|
func (b *blobUpload) uploadPart(ctx context.Context, method string, requestURL *url.URL, part *blobUploadPart, opts *registryOptions) error {
|
2023-09-14 20:30:28 +00:00
|
|
|
headers := make(http.Header)
|
|
|
|
headers.Set("Content-Type", "application/octet-stream")
|
2024-08-01 21:52:15 +00:00
|
|
|
headers.Set("Content-Length", strconv.FormatInt(part.Size, 10))
|
2023-09-14 22:42:50 +00:00
|
|
|
|
|
|
|
if method == http.MethodPatch {
|
2023-11-19 01:36:34 +00:00
|
|
|
headers.Set("X-Redirect-Uploads", "1")
|
2023-10-09 17:24:27 +00:00
|
|
|
headers.Set("Content-Range", fmt.Sprintf("%d-%d", part.Offset, part.Offset+part.Size-1))
|
2023-09-14 22:42:50 +00:00
|
|
|
}
|
2023-09-14 20:30:28 +00:00
|
|
|
|
2023-11-19 03:08:21 +00:00
|
|
|
sr := io.NewSectionReader(b.file, part.Offset, part.Size)
|
2023-11-29 23:04:23 +00:00
|
|
|
|
|
|
|
md5sum := md5.New()
|
|
|
|
w := &progressWriter{blobUpload: b}
|
|
|
|
|
2024-02-14 19:29:49 +00:00
|
|
|
resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sr, io.MultiWriter(w, md5sum)), opts)
|
2023-10-09 17:24:27 +00:00
|
|
|
if err != nil {
|
2023-11-29 23:04:23 +00:00
|
|
|
w.Rollback()
|
2023-10-09 17:24:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2023-09-14 20:30:28 +00:00
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
location := resp.Header.Get("Docker-Upload-Location")
|
|
|
|
if location == "" {
|
|
|
|
location = resp.Header.Get("Location")
|
|
|
|
}
|
|
|
|
|
|
|
|
nextURL, err := url.Parse(location)
|
|
|
|
if err != nil {
|
2023-11-29 23:04:23 +00:00
|
|
|
w.Rollback()
|
2023-10-09 17:24:27 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case resp.StatusCode == http.StatusTemporaryRedirect:
|
2023-11-29 23:04:23 +00:00
|
|
|
w.Rollback()
|
2023-10-09 17:24:27 +00:00
|
|
|
b.nextURL <- nextURL
|
|
|
|
|
|
|
|
redirectURL, err := resp.Location()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-14 22:42:50 +00:00
|
|
|
|
2023-11-19 00:25:22 +00:00
|
|
|
// retry uploading to the redirect URL
|
2024-05-22 05:21:04 +00:00
|
|
|
for try := range maxRetries {
|
2024-07-26 21:10:18 +00:00
|
|
|
err = b.uploadPart(ctx, http.MethodPut, redirectURL, part, ®istryOptions{})
|
2023-10-09 17:24:27 +00:00
|
|
|
switch {
|
|
|
|
case errors.Is(err, context.Canceled):
|
|
|
|
return err
|
|
|
|
case errors.Is(err, errMaxRetriesExceeded):
|
|
|
|
return err
|
|
|
|
case err != nil:
|
2023-11-19 00:25:22 +00:00
|
|
|
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
|
2023-11-19 00:25:22 +00:00
|
|
|
time.Sleep(sleep)
|
2023-09-14 22:42:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-11-03 23:49:51 +00:00
|
|
|
return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
|
2023-09-14 20:30:28 +00:00
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
case resp.StatusCode == http.StatusUnauthorized:
|
2023-11-29 23:04:23 +00:00
|
|
|
w.Rollback()
|
2024-02-14 19:29:49 +00:00
|
|
|
challenge := parseRegistryChallenge(resp.Header.Get("www-authenticate"))
|
|
|
|
token, err := getAuthorizationToken(ctx, challenge)
|
2023-10-09 17:24:27 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2023-09-14 20:30:28 +00:00
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
opts.Token = token
|
|
|
|
fallthrough
|
|
|
|
case resp.StatusCode >= http.StatusBadRequest:
|
2023-11-29 23:04:23 +00:00
|
|
|
w.Rollback()
|
2023-10-09 17:24:27 +00:00
|
|
|
body, err := io.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-09-14 20:30:28 +00:00
|
|
|
}
|
|
|
|
|
2023-11-16 19:44:09 +00:00
|
|
|
return fmt.Errorf("http status %s: %s", resp.Status, body)
|
2023-09-14 20:30:28 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
if method == http.MethodPatch {
|
|
|
|
b.nextURL <- nextURL
|
|
|
|
}
|
|
|
|
|
2023-11-29 22:52:12 +00:00
|
|
|
part.Hash = md5sum
|
2023-10-09 17:24:27 +00:00
|
|
|
return nil
|
2023-09-14 20:30:28 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
func (b *blobUpload) acquire() {
|
|
|
|
b.references.Add(1)
|
2023-09-14 16:54:05 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
func (b *blobUpload) release() {
|
|
|
|
if b.references.Add(-1) == 0 {
|
|
|
|
b.CancelFunc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blobUpload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
|
|
|
|
b.acquire()
|
|
|
|
defer b.release()
|
|
|
|
|
|
|
|
ticker := time.NewTicker(60 * time.Millisecond)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn(api.ProgressResponse{
|
2023-11-19 14:20:22 +00:00
|
|
|
Status: fmt.Sprintf("pushing %s", b.Digest[7:19]),
|
2023-10-09 17:24:27 +00:00
|
|
|
Digest: b.Digest,
|
|
|
|
Total: b.Total,
|
|
|
|
Completed: b.Completed.Load(),
|
2023-09-14 16:54:05 +00:00
|
|
|
})
|
|
|
|
|
2023-10-09 17:24:27 +00:00
|
|
|
if b.done || b.err != nil {
|
|
|
|
return b.err
|
|
|
|
}
|
2023-09-14 16:54:05 +00:00
|
|
|
}
|
2023-10-09 17:24:27 +00:00
|
|
|
}
|
2023-09-14 16:54:05 +00:00
|
|
|
|
2023-11-02 00:10:21 +00:00
|
|
|
type blobUploadPart struct {
|
|
|
|
// N is the part number
|
2023-11-29 23:04:23 +00:00
|
|
|
N int
|
|
|
|
Offset int64
|
|
|
|
Size int64
|
|
|
|
hash.Hash
|
|
|
|
}
|
2023-11-21 20:12:04 +00:00
|
|
|
|
2023-11-29 23:04:23 +00:00
|
|
|
type progressWriter struct {
|
|
|
|
written int64
|
2023-10-09 17:24:27 +00:00
|
|
|
*blobUpload
|
|
|
|
}
|
|
|
|
|
2023-11-29 23:04:23 +00:00
|
|
|
func (p *progressWriter) Write(b []byte) (n int, err error) {
|
2023-11-02 00:10:21 +00:00
|
|
|
n = len(b)
|
|
|
|
p.written += int64(n)
|
|
|
|
p.Completed.Add(int64(n))
|
2023-09-14 16:54:05 +00:00
|
|
|
return n, nil
|
|
|
|
}
|
2023-10-09 17:24:27 +00:00
|
|
|
|
2023-11-29 23:04:23 +00:00
|
|
|
func (p *progressWriter) Rollback() {
|
|
|
|
p.Completed.Add(-p.written)
|
2023-11-02 00:10:21 +00:00
|
|
|
p.written = 0
|
|
|
|
}
|
|
|
|
|
2024-08-07 21:22:17 +00:00
|
|
|
func uploadBlob(ctx context.Context, mp ModelPath, layer Layer, opts *registryOptions, fn func(api.ProgressResponse)) error {
|
2023-10-09 17:24:27 +00:00
|
|
|
requestURL := mp.BaseURL()
|
|
|
|
requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs", layer.Digest)
|
|
|
|
|
2023-11-02 20:22:42 +00:00
|
|
|
resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, os.ErrNotExist):
|
|
|
|
case err != nil:
|
2023-10-09 17:24:27 +00:00
|
|
|
return err
|
2023-11-02 20:22:42 +00:00
|
|
|
default:
|
|
|
|
defer resp.Body.Close()
|
2023-10-09 17:24:27 +00:00
|
|
|
fn(api.ProgressResponse{
|
2023-11-19 14:20:22 +00:00
|
|
|
Status: fmt.Sprintf("pushing %s", layer.Digest[7:19]),
|
2023-10-09 17:24:27 +00:00
|
|
|
Digest: layer.Digest,
|
|
|
|
Total: layer.Size,
|
|
|
|
Completed: layer.Size,
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
data, ok := blobUploadManager.LoadOrStore(layer.Digest, &blobUpload{Layer: layer})
|
|
|
|
upload := data.(*blobUpload)
|
|
|
|
if !ok {
|
|
|
|
requestURL := mp.BaseURL()
|
|
|
|
requestURL = requestURL.JoinPath("v2", mp.GetNamespaceRepository(), "blobs/uploads/")
|
|
|
|
if err := upload.Prepare(ctx, requestURL, opts); err != nil {
|
|
|
|
blobUploadManager.Delete(layer.Digest)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-05-22 04:52:20 +00:00
|
|
|
//nolint:contextcheck
|
2023-10-09 17:24:27 +00:00
|
|
|
go upload.Run(context.Background(), opts)
|
|
|
|
}
|
|
|
|
|
|
|
|
return upload.Wait(ctx, fn)
|
|
|
|
}
|