2023-07-25 21:08:51 +00:00
|
|
|
package server
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2023-09-27 23:22:30 +00:00
|
|
|
"encoding/json"
|
2023-07-25 21:08:51 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2024-01-18 18:52:01 +00:00
|
|
|
"log/slog"
|
2023-11-19 00:25:22 +00:00
|
|
|
"math"
|
2024-07-25 22:58:30 +00:00
|
|
|
"math/rand/v2"
|
2023-07-25 21:08:51 +00:00
|
|
|
"net/http"
|
2023-09-27 23:22:30 +00:00
|
|
|
"net/url"
|
2023-07-25 21:08:51 +00:00
|
|
|
"os"
|
2023-09-19 16:36:30 +00:00
|
|
|
"path/filepath"
|
2023-07-25 21:08:51 +00:00
|
|
|
"strconv"
|
2023-10-02 22:26:27 +00:00
|
|
|
"strings"
|
2023-09-29 23:13:53 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2023-10-26 18:34:02 +00:00
|
|
|
"syscall"
|
2023-09-29 23:13:53 +00:00
|
|
|
"time"
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-27 23:22:30 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2023-09-29 23:13:53 +00:00
|
|
|
|
2024-03-26 20:04:17 +00:00
|
|
|
"github.com/ollama/ollama/api"
|
|
|
|
"github.com/ollama/ollama/format"
|
2023-07-25 21:08:51 +00:00
|
|
|
)
|
|
|
|
|
2024-01-08 19:44:59 +00:00
|
|
|
const maxRetries = 6
|
|
|
|
|
2024-08-01 21:52:15 +00:00
|
|
|
var (
|
|
|
|
errMaxRetriesExceeded = errors.New("max retries exceeded")
|
|
|
|
errPartStalled = errors.New("part stalled")
|
|
|
|
)
|
2024-01-08 19:44:59 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
var blobDownloadManager sync.Map
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
type blobDownload struct {
|
|
|
|
Name string
|
|
|
|
Digest string
|
2023-08-15 18:07:19 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
Total int64
|
|
|
|
Completed atomic.Int64
|
2023-08-15 18:07:19 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
Parts []*blobDownloadPart
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
context.CancelFunc
|
2023-10-11 20:49:01 +00:00
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
done chan struct{}
|
2023-10-11 20:49:01 +00:00
|
|
|
err error
|
2023-10-03 23:44:35 +00:00
|
|
|
references atomic.Int32
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
type blobDownloadPart struct {
|
2024-07-26 21:24:24 +00:00
|
|
|
N int
|
|
|
|
Offset int64
|
|
|
|
Size int64
|
|
|
|
Completed atomic.Int64
|
|
|
|
|
|
|
|
lastUpdatedMu sync.Mutex
|
|
|
|
lastUpdated time.Time
|
2023-10-02 22:26:27 +00:00
|
|
|
|
|
|
|
*blobDownload `json:"-"`
|
|
|
|
}
|
|
|
|
|
2024-07-31 23:01:24 +00:00
|
|
|
type jsonBlobDownloadPart struct {
|
|
|
|
N int
|
|
|
|
Offset int64
|
|
|
|
Size int64
|
|
|
|
Completed int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *blobDownloadPart) MarshalJSON() ([]byte, error) {
|
|
|
|
return json.Marshal(jsonBlobDownloadPart{
|
|
|
|
N: p.N,
|
|
|
|
Offset: p.Offset,
|
|
|
|
Size: p.Size,
|
|
|
|
Completed: p.Completed.Load(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *blobDownloadPart) UnmarshalJSON(b []byte) error {
|
|
|
|
var j jsonBlobDownloadPart
|
|
|
|
if err := json.Unmarshal(b, &j); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
*p = blobDownloadPart{
|
|
|
|
N: j.N,
|
|
|
|
Offset: j.Offset,
|
|
|
|
Size: j.Size,
|
|
|
|
}
|
|
|
|
p.Completed.Store(j.Completed)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-11 00:22:44 +00:00
|
|
|
const (
|
2024-08-13 23:47:35 +00:00
|
|
|
numDownloadParts = 16
|
2023-11-17 21:17:55 +00:00
|
|
|
minDownloadPartSize int64 = 100 * format.MegaByte
|
|
|
|
maxDownloadPartSize int64 = 1000 * format.MegaByte
|
2023-10-11 00:22:44 +00:00
|
|
|
)
|
|
|
|
|
2023-10-02 22:26:27 +00:00
|
|
|
func (p *blobDownloadPart) Name() string {
|
|
|
|
return strings.Join([]string{
|
|
|
|
p.blobDownload.Name, "partial", strconv.Itoa(p.N),
|
|
|
|
}, "-")
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-10-03 23:52:49 +00:00
|
|
|
func (p *blobDownloadPart) StartsAt() int64 {
|
2024-07-26 21:24:24 +00:00
|
|
|
return p.Offset + p.Completed.Load()
|
2023-10-03 23:52:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (p *blobDownloadPart) StopsAt() int64 {
|
|
|
|
return p.Offset + p.Size
|
|
|
|
}
|
|
|
|
|
2024-01-08 19:44:59 +00:00
|
|
|
func (p *blobDownloadPart) Write(b []byte) (n int, err error) {
|
|
|
|
n = len(b)
|
|
|
|
p.blobDownload.Completed.Add(int64(n))
|
2024-07-26 21:24:24 +00:00
|
|
|
p.lastUpdatedMu.Lock()
|
2024-01-08 19:44:59 +00:00
|
|
|
p.lastUpdated = time.Now()
|
2024-07-26 21:24:24 +00:00
|
|
|
p.lastUpdatedMu.Unlock()
|
2024-01-08 19:44:59 +00:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:29:49 +00:00
|
|
|
func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
|
2023-09-29 23:13:53 +00:00
|
|
|
partFilePaths, err := filepath.Glob(b.Name + "-partial-*")
|
2023-09-27 23:22:30 +00:00
|
|
|
if err != nil {
|
2023-08-15 18:07:19 +00:00
|
|
|
return err
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
b.done = make(chan struct{})
|
|
|
|
|
2023-09-27 23:22:30 +00:00
|
|
|
for _, partFilePath := range partFilePaths {
|
2023-09-29 23:13:53 +00:00
|
|
|
part, err := b.readPart(partFilePath)
|
2023-08-01 19:34:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
b.Total += part.Size
|
2024-07-26 21:24:24 +00:00
|
|
|
b.Completed.Add(part.Completed.Load())
|
2023-09-29 23:13:53 +00:00
|
|
|
b.Parts = append(b.Parts, part)
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
if len(b.Parts) == 0 {
|
2023-11-02 20:13:32 +00:00
|
|
|
resp, err := makeRequestWithRetry(ctx, http.MethodHead, requestURL, nil, nil, opts)
|
2023-07-25 21:08:51 +00:00
|
|
|
if err != nil {
|
2023-09-27 23:22:30 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
2023-09-27 23:22:30 +00:00
|
|
|
|
2023-12-15 22:25:12 +00:00
|
|
|
size := b.Total / numDownloadParts
|
2023-10-11 00:22:44 +00:00
|
|
|
switch {
|
|
|
|
case size < minDownloadPartSize:
|
|
|
|
size = minDownloadPartSize
|
|
|
|
case size > maxDownloadPartSize:
|
|
|
|
size = maxDownloadPartSize
|
|
|
|
}
|
2023-09-27 23:22:30 +00:00
|
|
|
|
2023-10-11 00:22:44 +00:00
|
|
|
var offset int64
|
2023-09-29 23:13:53 +00:00
|
|
|
for offset < b.Total {
|
|
|
|
if offset+size > b.Total {
|
|
|
|
size = b.Total - offset
|
|
|
|
}
|
|
|
|
|
2023-10-02 22:26:27 +00:00
|
|
|
if err := b.newPart(offset, size); err != nil {
|
2023-09-29 23:13:53 +00:00
|
|
|
return err
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
offset += size
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("downloading %s in %d %s part(s)", b.Digest[7:19], len(b.Parts), format.HumanBytes(b.Parts[0].Size)))
|
2023-09-29 23:13:53 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:29:49 +00:00
|
|
|
func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *registryOptions) {
|
2024-07-26 21:24:24 +00:00
|
|
|
defer close(b.done)
|
2024-03-08 02:10:16 +00:00
|
|
|
b.err = b.run(ctx, requestURL, opts)
|
|
|
|
}
|
|
|
|
|
2024-07-25 22:58:30 +00:00
|
|
|
func newBackoff(maxBackoff time.Duration) func(ctx context.Context) error {
|
|
|
|
var n int
|
|
|
|
return func(ctx context.Context) error {
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
|
|
|
|
n++
|
|
|
|
|
|
|
|
// n^2 backoff timer is a little smoother than the
|
|
|
|
// common choice of 2^n.
|
|
|
|
d := min(time.Duration(n*n)*10*time.Millisecond, maxBackoff)
|
|
|
|
// Randomize the delay between 0.5-1.5 x msec, in order
|
|
|
|
// to prevent accidental "thundering herd" problems.
|
|
|
|
d = time.Duration(float64(d) * (rand.Float64() + 0.5))
|
|
|
|
t := time.NewTimer(d)
|
|
|
|
defer t.Stop()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
case <-t.C:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-08 02:10:16 +00:00
|
|
|
func (b *blobDownload) run(ctx context.Context, requestURL *url.URL, opts *registryOptions) error {
|
2023-09-29 23:13:53 +00:00
|
|
|
defer blobDownloadManager.Delete(b.Digest)
|
|
|
|
ctx, b.CancelFunc = context.WithCancel(ctx)
|
|
|
|
|
2023-12-15 22:25:12 +00:00
|
|
|
file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0o644)
|
2023-09-29 23:13:53 +00:00
|
|
|
if err != nil {
|
2024-03-08 02:10:16 +00:00
|
|
|
return err
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
2023-10-03 23:52:49 +00:00
|
|
|
defer file.Close()
|
2024-08-09 18:57:48 +00:00
|
|
|
setSparse(file)
|
2023-09-29 23:13:53 +00:00
|
|
|
|
2023-12-15 22:07:34 +00:00
|
|
|
_ = file.Truncate(b.Total)
|
2023-10-02 20:34:07 +00:00
|
|
|
|
2024-07-25 22:58:30 +00:00
|
|
|
directURL, err := func() (*url.URL, error) {
|
|
|
|
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
backoff := newBackoff(10 * time.Second)
|
|
|
|
for {
|
|
|
|
// shallow clone opts to be used in the closure
|
|
|
|
// without affecting the outer opts.
|
|
|
|
newOpts := new(registryOptions)
|
|
|
|
*newOpts = *opts
|
|
|
|
|
|
|
|
newOpts.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
|
|
|
if len(via) > 10 {
|
2024-08-08 12:28:01 +00:00
|
|
|
return errors.New("maximum redirects exceeded (10) for directURL")
|
2024-07-25 22:58:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// if the hostname is the same, allow the redirect
|
|
|
|
if req.URL.Hostname() == requestURL.Hostname() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop at the first redirect that is not
|
|
|
|
// the same hostname as the original
|
|
|
|
// request.
|
|
|
|
return http.ErrUseLastResponse
|
|
|
|
}
|
|
|
|
|
|
|
|
resp, err := makeRequestWithRetry(ctx, http.MethodGet, requestURL, nil, nil, newOpts)
|
|
|
|
if err != nil {
|
|
|
|
slog.Warn("failed to get direct URL; backing off and retrying", "err", err)
|
|
|
|
if err := backoff(ctx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
|
|
|
if resp.StatusCode != http.StatusTemporaryRedirect {
|
|
|
|
return nil, fmt.Errorf("unexpected status code %d", resp.StatusCode)
|
|
|
|
}
|
|
|
|
return resp.Location()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2024-03-08 02:10:16 +00:00
|
|
|
g, inner := errgroup.WithContext(ctx)
|
|
|
|
g.SetLimit(numDownloadParts)
|
2023-09-29 23:13:53 +00:00
|
|
|
for i := range b.Parts {
|
|
|
|
part := b.Parts[i]
|
2024-07-26 21:24:24 +00:00
|
|
|
if part.Completed.Load() == part.Size {
|
2023-09-27 23:22:30 +00:00
|
|
|
continue
|
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2024-03-08 02:10:16 +00:00
|
|
|
g.Go(func() error {
|
2023-11-03 23:49:51 +00:00
|
|
|
var err error
|
2023-09-27 23:22:30 +00:00
|
|
|
for try := 0; try < maxRetries; try++ {
|
2023-10-03 23:52:49 +00:00
|
|
|
w := io.NewOffsetWriter(file, part.StartsAt())
|
2024-07-26 21:24:24 +00:00
|
|
|
err = b.downloadChunk(inner, directURL, w, part)
|
2023-09-29 23:13:53 +00:00
|
|
|
switch {
|
2023-10-26 18:34:02 +00:00
|
|
|
case errors.Is(err, context.Canceled), errors.Is(err, syscall.ENOSPC):
|
|
|
|
// return immediately if the context is canceled or the device is out of space
|
2023-09-29 23:13:53 +00:00
|
|
|
return err
|
2024-01-08 19:44:59 +00:00
|
|
|
case errors.Is(err, errPartStalled):
|
|
|
|
try--
|
|
|
|
continue
|
2023-09-29 23:13:53 +00:00
|
|
|
case err != nil:
|
2023-11-19 00:25:22 +00:00
|
|
|
sleep := time.Second * time.Duration(math.Pow(2, float64(try)))
|
2024-01-18 18:52:01 +00:00
|
|
|
slog.Info(fmt.Sprintf("%s part %d attempt %d failed: %v, retrying in %s", b.Digest[7:19], part.N, try, err, sleep))
|
2023-11-17 21:17:55 +00:00
|
|
|
time.Sleep(sleep)
|
2023-09-27 23:22:30 +00:00
|
|
|
continue
|
2023-09-29 23:13:53 +00:00
|
|
|
default:
|
|
|
|
return nil
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-03 23:49:51 +00:00
|
|
|
return fmt.Errorf("%w: %w", errMaxRetriesExceeded, err)
|
2023-09-27 23:22:30 +00:00
|
|
|
})
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
2023-09-27 23:22:30 +00:00
|
|
|
if err := g.Wait(); err != nil {
|
2024-03-08 02:10:16 +00:00
|
|
|
return err
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
2023-10-03 23:52:49 +00:00
|
|
|
// explicitly close the file so we can rename it
|
|
|
|
if err := file.Close(); err != nil {
|
2024-03-08 02:10:16 +00:00
|
|
|
return err
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
for i := range b.Parts {
|
2023-10-03 23:52:49 +00:00
|
|
|
if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil {
|
2024-03-08 02:10:16 +00:00
|
|
|
return err
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
|
|
|
|
2023-10-09 17:14:49 +00:00
|
|
|
if err := os.Rename(file.Name(), b.Name); err != nil {
|
2024-03-08 02:10:16 +00:00
|
|
|
return err
|
2023-10-09 17:14:49 +00:00
|
|
|
}
|
|
|
|
|
2024-03-08 02:10:16 +00:00
|
|
|
return nil
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart) error {
|
2024-01-08 19:44:59 +00:00
|
|
|
g, ctx := errgroup.WithContext(ctx)
|
|
|
|
g.Go(func() error {
|
2024-07-26 21:24:24 +00:00
|
|
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL.String(), nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1))
|
|
|
|
resp, err := http.DefaultClient.Do(req)
|
2024-01-08 19:44:59 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer resp.Body.Close()
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
n, err := io.CopyN(w, io.TeeReader(resp.Body, part), part.Size-part.Completed.Load())
|
2024-01-08 19:44:59 +00:00
|
|
|
if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.ErrUnexpectedEOF) {
|
|
|
|
// rollback progress
|
|
|
|
b.Completed.Add(-n)
|
|
|
|
return err
|
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
part.Completed.Add(n)
|
2024-01-08 19:44:59 +00:00
|
|
|
if err := b.writePart(part.Name(), part); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// return nil or context.Canceled or UnexpectedEOF (resumable)
|
2023-10-02 20:34:07 +00:00
|
|
|
return err
|
2024-01-08 19:44:59 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
g.Go(func() error {
|
|
|
|
ticker := time.NewTicker(time.Second)
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
2024-07-26 21:24:24 +00:00
|
|
|
if part.Completed.Load() >= part.Size {
|
2024-01-08 19:44:59 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2024-07-26 21:24:24 +00:00
|
|
|
part.lastUpdatedMu.Lock()
|
|
|
|
lastUpdated := part.lastUpdated
|
|
|
|
part.lastUpdatedMu.Unlock()
|
|
|
|
|
|
|
|
if !lastUpdated.IsZero() && time.Since(lastUpdated) > 5*time.Second {
|
2024-04-10 23:24:37 +00:00
|
|
|
const msg = "%s part %d stalled; retrying. If this persists, press ctrl-c to exit, then 'ollama pull' to find a faster connection."
|
|
|
|
slog.Info(fmt.Sprintf(msg, b.Digest[7:19], part.N))
|
2024-01-08 19:44:59 +00:00
|
|
|
// reset last updated
|
2024-07-26 21:24:24 +00:00
|
|
|
part.lastUpdatedMu.Lock()
|
2024-01-08 19:44:59 +00:00
|
|
|
part.lastUpdated = time.Time{}
|
2024-07-26 21:24:24 +00:00
|
|
|
part.lastUpdatedMu.Unlock()
|
2024-01-08 19:44:59 +00:00
|
|
|
return errPartStalled
|
|
|
|
}
|
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2023-10-02 20:34:07 +00:00
|
|
|
|
2024-01-08 19:44:59 +00:00
|
|
|
return g.Wait()
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
2023-10-02 22:26:27 +00:00
|
|
|
func (b *blobDownload) newPart(offset, size int64) error {
|
|
|
|
part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)}
|
|
|
|
if err := b.writePart(part.Name(), &part); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
b.Parts = append(b.Parts, &part)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) {
|
|
|
|
var part blobDownloadPart
|
|
|
|
partFile, err := os.Open(partName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer partFile.Close()
|
|
|
|
|
|
|
|
if err := json.NewDecoder(partFile).Decode(&part); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-10-02 22:26:27 +00:00
|
|
|
part.blobDownload = b
|
2023-09-29 23:13:53 +00:00
|
|
|
return &part, nil
|
2023-09-27 23:22:30 +00:00
|
|
|
}
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error {
|
2023-12-15 22:25:12 +00:00
|
|
|
partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o644)
|
2023-09-27 23:22:30 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
2023-09-27 23:22:30 +00:00
|
|
|
defer partFile.Close()
|
2023-07-25 21:08:51 +00:00
|
|
|
|
2023-09-27 23:22:30 +00:00
|
|
|
return json.NewEncoder(partFile).Encode(part)
|
2023-07-25 21:08:51 +00:00
|
|
|
}
|
2023-09-29 23:13:53 +00:00
|
|
|
|
2023-10-03 23:44:35 +00:00
|
|
|
func (b *blobDownload) acquire() {
|
|
|
|
b.references.Add(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *blobDownload) release() {
|
|
|
|
if b.references.Add(-1) == 0 {
|
|
|
|
b.CancelFunc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-29 23:13:53 +00:00
|
|
|
func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
|
2023-10-03 23:44:35 +00:00
|
|
|
b.acquire()
|
|
|
|
defer b.release()
|
2023-09-29 23:13:53 +00:00
|
|
|
|
|
|
|
ticker := time.NewTicker(60 * time.Millisecond)
|
|
|
|
for {
|
|
|
|
select {
|
2024-07-26 21:24:24 +00:00
|
|
|
case <-b.done:
|
|
|
|
return b.err
|
2023-09-29 23:13:53 +00:00
|
|
|
case <-ticker.C:
|
2024-01-08 19:44:59 +00:00
|
|
|
fn(api.ProgressResponse{
|
|
|
|
Status: fmt.Sprintf("pulling %s", b.Digest[7:19]),
|
|
|
|
Digest: b.Digest,
|
|
|
|
Total: b.Total,
|
|
|
|
Completed: b.Completed.Load(),
|
|
|
|
})
|
2023-09-29 23:13:53 +00:00
|
|
|
case <-ctx.Done():
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type downloadOpts struct {
|
|
|
|
mp ModelPath
|
|
|
|
digest string
|
2024-02-14 19:29:49 +00:00
|
|
|
regOpts *registryOptions
|
2023-09-29 23:13:53 +00:00
|
|
|
fn func(api.ProgressResponse)
|
|
|
|
}
|
|
|
|
|
|
|
|
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
|
2024-05-24 15:40:40 +00:00
|
|
|
func downloadBlob(ctx context.Context, opts downloadOpts) (cacheHit bool, _ error) {
|
2023-09-29 23:13:53 +00:00
|
|
|
fp, err := GetBlobsPath(opts.digest)
|
|
|
|
if err != nil {
|
2024-05-24 15:40:40 +00:00
|
|
|
return false, err
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fi, err := os.Stat(fp)
|
|
|
|
switch {
|
|
|
|
case errors.Is(err, os.ErrNotExist):
|
|
|
|
case err != nil:
|
2024-05-24 15:40:40 +00:00
|
|
|
return false, err
|
2023-09-29 23:13:53 +00:00
|
|
|
default:
|
|
|
|
opts.fn(api.ProgressResponse{
|
2023-11-19 14:20:22 +00:00
|
|
|
Status: fmt.Sprintf("pulling %s", opts.digest[7:19]),
|
2023-09-29 23:13:53 +00:00
|
|
|
Digest: opts.digest,
|
|
|
|
Total: fi.Size(),
|
|
|
|
Completed: fi.Size(),
|
|
|
|
})
|
|
|
|
|
2024-05-24 15:40:40 +00:00
|
|
|
return true, nil
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
2023-10-04 00:06:13 +00:00
|
|
|
data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
|
|
|
|
download := data.(*blobDownload)
|
2023-09-29 23:13:53 +00:00
|
|
|
if !ok {
|
|
|
|
requestURL := opts.mp.BaseURL()
|
|
|
|
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
|
2023-10-04 00:06:13 +00:00
|
|
|
if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
|
2023-10-10 17:12:29 +00:00
|
|
|
blobDownloadManager.Delete(opts.digest)
|
2024-05-24 15:40:40 +00:00
|
|
|
return false, err
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
2024-05-22 04:52:20 +00:00
|
|
|
//nolint:contextcheck
|
2023-10-04 00:06:13 +00:00
|
|
|
go download.Run(context.Background(), requestURL, opts.regOpts)
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|
|
|
|
|
2024-05-24 15:40:40 +00:00
|
|
|
return false, download.Wait(ctx, opts.fn)
|
2023-09-29 23:13:53 +00:00
|
|
|
}
|