Merge pull request #626 from jmorganca/mxyng/concurrent-downloads
parallel chunked downloads
This commit is contained in:
commit
38dc2f79bc
5 changed files with 285 additions and 202 deletions
1
go.mod
1
go.mod
|
@ -10,6 +10,7 @@ require (
|
||||||
github.com/olekukonko/tablewriter v0.0.5
|
github.com/olekukonko/tablewriter v0.0.5
|
||||||
github.com/pdevine/readline v1.5.2
|
github.com/pdevine/readline v1.5.2
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.7.0
|
||||||
|
golang.org/x/sync v0.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require github.com/rivo/uniseg v0.2.0 // indirect
|
require github.com/rivo/uniseg v0.2.0 // indirect
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -125,6 +125,8 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
|
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||||
|
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
|
@ -2,38 +2,288 @@ package server
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
"github.com/jmorganca/ollama/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FileDownload struct {
|
var blobDownloadManager sync.Map
|
||||||
Digest string
|
|
||||||
FilePath string
|
type blobDownload struct {
|
||||||
|
Name string
|
||||||
|
Digest string
|
||||||
|
|
||||||
Total int64
|
Total int64
|
||||||
Completed int64
|
Completed atomic.Int64
|
||||||
|
|
||||||
|
Parts []*blobDownloadPart
|
||||||
|
|
||||||
|
context.CancelFunc
|
||||||
|
references atomic.Int32
|
||||||
}
|
}
|
||||||
|
|
||||||
var inProgress sync.Map // map of digests currently being downloaded to their current download progress
|
type blobDownloadPart struct {
|
||||||
|
N int
|
||||||
|
Offset int64
|
||||||
|
Size int64
|
||||||
|
Completed int64
|
||||||
|
|
||||||
|
*blobDownload `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *blobDownloadPart) Name() string {
|
||||||
|
return strings.Join([]string{
|
||||||
|
p.blobDownload.Name, "partial", strconv.Itoa(p.N),
|
||||||
|
}, "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *blobDownloadPart) StartsAt() int64 {
|
||||||
|
return p.Offset + p.Completed
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *blobDownloadPart) StopsAt() int64 {
|
||||||
|
return p.Offset + p.Size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error {
|
||||||
|
partFilePaths, err := filepath.Glob(b.Name + "-partial-*")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, partFilePath := range partFilePaths {
|
||||||
|
part, err := b.readPart(partFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Total += part.Size
|
||||||
|
b.Completed.Add(part.Completed)
|
||||||
|
b.Parts = append(b.Parts, part)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b.Parts) == 0 {
|
||||||
|
resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= http.StatusBadRequest {
|
||||||
|
body, _ := io.ReadAll(resp.Body)
|
||||||
|
return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body))
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
||||||
|
|
||||||
|
var offset int64
|
||||||
|
var size int64 = 64 * 1024 * 1024
|
||||||
|
|
||||||
|
for offset < b.Total {
|
||||||
|
if offset+size > b.Total {
|
||||||
|
size = b.Total - offset
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := b.newPart(offset, size); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("downloading %s in %d part(s)", b.Digest[7:19], len(b.Parts))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) (err error) {
|
||||||
|
defer blobDownloadManager.Delete(b.Digest)
|
||||||
|
|
||||||
|
ctx, b.CancelFunc = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
file.Truncate(b.Total)
|
||||||
|
|
||||||
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
// TODO(mxyng): download concurrency should be configurable
|
||||||
|
g.SetLimit(64)
|
||||||
|
for i := range b.Parts {
|
||||||
|
part := b.Parts[i]
|
||||||
|
if part.Completed == part.Size {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
i := i
|
||||||
|
g.Go(func() error {
|
||||||
|
for try := 0; try < maxRetries; try++ {
|
||||||
|
w := io.NewOffsetWriter(file, part.StartsAt())
|
||||||
|
err := b.downloadChunk(ctx, requestURL, w, part, opts)
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, context.Canceled):
|
||||||
|
return err
|
||||||
|
case err != nil:
|
||||||
|
log.Printf("%s part %d attempt %d failed: %v, retrying", b.Digest[7:19], i, try, err)
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors.New("max retries exceeded")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := g.Wait(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// explicitly close the file so we can rename it
|
||||||
|
if err := file.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range b.Parts {
|
||||||
|
if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return os.Rename(file.Name(), b.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart, opts *RegistryOptions) error {
|
||||||
|
headers := make(http.Header)
|
||||||
|
headers.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1))
|
||||||
|
resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
n, err := io.Copy(w, io.TeeReader(resp.Body, b))
|
||||||
|
if err != nil && !errors.Is(err, context.Canceled) {
|
||||||
|
// rollback progress
|
||||||
|
b.Completed.Add(-n)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
part.Completed += n
|
||||||
|
if err := b.writePart(part.Name(), part); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// return nil or context.Canceled
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) newPart(offset, size int64) error {
|
||||||
|
part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)}
|
||||||
|
if err := b.writePart(part.Name(), &part); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Parts = append(b.Parts, &part)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) {
|
||||||
|
var part blobDownloadPart
|
||||||
|
partFile, err := os.Open(partName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer partFile.Close()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(partFile).Decode(&part); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
part.blobDownload = b
|
||||||
|
return &part, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error {
|
||||||
|
partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer partFile.Close()
|
||||||
|
|
||||||
|
return json.NewEncoder(partFile).Encode(part)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) Write(p []byte) (n int, err error) {
|
||||||
|
n = len(p)
|
||||||
|
b.Completed.Add(int64(n))
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) acquire() {
|
||||||
|
b.references.Add(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) release() {
|
||||||
|
if b.references.Add(-1) == 0 {
|
||||||
|
b.CancelFunc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error {
|
||||||
|
b.acquire()
|
||||||
|
defer b.release()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(60 * time.Millisecond)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn(api.ProgressResponse{
|
||||||
|
Status: fmt.Sprintf("downloading %s", b.Digest),
|
||||||
|
Digest: b.Digest,
|
||||||
|
Total: b.Total,
|
||||||
|
Completed: b.Completed.Load(),
|
||||||
|
})
|
||||||
|
|
||||||
|
if b.Completed.Load() >= b.Total {
|
||||||
|
// wait for the file to get renamed
|
||||||
|
if _, err := os.Stat(b.Name); err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type downloadOpts struct {
|
type downloadOpts struct {
|
||||||
mp ModelPath
|
mp ModelPath
|
||||||
digest string
|
digest string
|
||||||
regOpts *RegistryOptions
|
regOpts *RegistryOptions
|
||||||
fn func(api.ProgressResponse)
|
fn func(api.ProgressResponse)
|
||||||
retry int // track the number of retries on this download
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const maxRetry = 3
|
const maxRetries = 3
|
||||||
|
|
||||||
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
|
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
|
||||||
func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
||||||
|
@ -42,9 +292,14 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if fi, _ := os.Stat(fp); fi != nil {
|
fi, err := os.Stat(fp)
|
||||||
// we already have the file, so return
|
switch {
|
||||||
|
case errors.Is(err, os.ErrNotExist):
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
default:
|
||||||
opts.fn(api.ProgressResponse{
|
opts.fn(api.ProgressResponse{
|
||||||
|
Status: fmt.Sprintf("downloading %s", opts.digest),
|
||||||
Digest: opts.digest,
|
Digest: opts.digest,
|
||||||
Total: fi.Size(),
|
Total: fi.Size(),
|
||||||
Completed: fi.Size(),
|
Completed: fi.Size(),
|
||||||
|
@ -53,185 +308,17 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fileDownload := &FileDownload{
|
data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest})
|
||||||
Digest: opts.digest,
|
download := data.(*blobDownload)
|
||||||
FilePath: fp,
|
if !ok {
|
||||||
Total: 1, // dummy value to indicate that we don't know the total size yet
|
requestURL := opts.mp.BaseURL()
|
||||||
Completed: 0,
|
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
|
||||||
}
|
if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil {
|
||||||
|
|
||||||
_, downloading := inProgress.LoadOrStore(opts.digest, fileDownload)
|
|
||||||
if downloading {
|
|
||||||
// this is another client requesting the server to download the same blob concurrently
|
|
||||||
return monitorDownload(ctx, opts, fileDownload)
|
|
||||||
}
|
|
||||||
if err := doDownload(ctx, opts, fileDownload); err != nil {
|
|
||||||
if errors.Is(err, errDownload) && opts.retry < maxRetry {
|
|
||||||
opts.retry++
|
|
||||||
log.Print(err)
|
|
||||||
log.Printf("retrying download of %s", opts.digest)
|
|
||||||
return downloadBlob(ctx, opts)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var downloadMu sync.Mutex // mutex to check to resume a download while monitoring
|
|
||||||
|
|
||||||
// monitorDownload monitors the download progress of a blob and resumes it if it is interrupted
|
|
||||||
func monitorDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
|
|
||||||
tick := time.NewTicker(time.Second)
|
|
||||||
for range tick.C {
|
|
||||||
done, resume, err := func() (bool, bool, error) {
|
|
||||||
downloadMu.Lock()
|
|
||||||
defer downloadMu.Unlock()
|
|
||||||
val, downloading := inProgress.Load(f.Digest)
|
|
||||||
if !downloading {
|
|
||||||
// check once again if the download is complete
|
|
||||||
if fi, _ := os.Stat(f.FilePath); fi != nil {
|
|
||||||
// successful download while monitoring
|
|
||||||
opts.fn(api.ProgressResponse{
|
|
||||||
Digest: f.Digest,
|
|
||||||
Total: fi.Size(),
|
|
||||||
Completed: fi.Size(),
|
|
||||||
})
|
|
||||||
return true, false, nil
|
|
||||||
}
|
|
||||||
// resume the download
|
|
||||||
inProgress.Store(f.Digest, f) // store the file download again to claim the resume
|
|
||||||
return false, true, nil
|
|
||||||
}
|
|
||||||
f, ok := val.(*FileDownload)
|
|
||||||
if !ok {
|
|
||||||
return false, false, fmt.Errorf("invalid type for in progress download: %T", val)
|
|
||||||
}
|
|
||||||
opts.fn(api.ProgressResponse{
|
|
||||||
Status: fmt.Sprintf("downloading %s", f.Digest),
|
|
||||||
Digest: f.Digest,
|
|
||||||
Total: f.Total,
|
|
||||||
Completed: f.Completed,
|
|
||||||
})
|
|
||||||
return false, false, nil
|
|
||||||
}()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if done {
|
|
||||||
// done downloading
|
go download.Run(context.Background(), requestURL, opts.regOpts)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if resume {
|
|
||||||
return doDownload(ctx, opts, f)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
return download.Wait(ctx, opts.fn)
|
||||||
|
|
||||||
var (
|
|
||||||
chunkSize int64 = 1024 * 1024 // 1 MiB in bytes
|
|
||||||
errDownload = fmt.Errorf("download failed")
|
|
||||||
)
|
|
||||||
|
|
||||||
// doDownload downloads a blob from the registry and stores it in the blobs directory
|
|
||||||
func doDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
|
|
||||||
defer inProgress.Delete(f.Digest)
|
|
||||||
var size int64
|
|
||||||
|
|
||||||
fi, err := os.Stat(f.FilePath + "-partial")
|
|
||||||
switch {
|
|
||||||
case errors.Is(err, os.ErrNotExist):
|
|
||||||
// noop, file doesn't exist so create it
|
|
||||||
case err != nil:
|
|
||||||
return fmt.Errorf("stat: %w", err)
|
|
||||||
default:
|
|
||||||
size = fi.Size()
|
|
||||||
// Ensure the size is divisible by the chunk size by removing excess bytes
|
|
||||||
size -= size % chunkSize
|
|
||||||
|
|
||||||
err := os.Truncate(f.FilePath+"-partial", size)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("truncate: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
requestURL := opts.mp.BaseURL()
|
|
||||||
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", f.Digest)
|
|
||||||
|
|
||||||
headers := make(http.Header)
|
|
||||||
headers.Set("Range", fmt.Sprintf("bytes=%d-", size))
|
|
||||||
|
|
||||||
resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts.regOpts)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("couldn't download blob: %v", err)
|
|
||||||
return fmt.Errorf("%w: %w", errDownload, err)
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode >= http.StatusBadRequest {
|
|
||||||
body, _ := io.ReadAll(resp.Body)
|
|
||||||
return fmt.Errorf("%w: on download registry responded with code %d: %v", errDownload, resp.StatusCode, string(body))
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.MkdirAll(filepath.Dir(f.FilePath), 0o700)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("make blobs directory: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
|
|
||||||
f.Completed = size
|
|
||||||
f.Total = remaining + f.Completed
|
|
||||||
|
|
||||||
inProgress.Store(f.Digest, f)
|
|
||||||
|
|
||||||
out, err := os.OpenFile(f.FilePath+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("open file: %w", err)
|
|
||||||
}
|
|
||||||
defer out.Close()
|
|
||||||
outerLoop:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// handle client request cancellation
|
|
||||||
inProgress.Delete(f.Digest)
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
opts.fn(api.ProgressResponse{
|
|
||||||
Status: fmt.Sprintf("downloading %s", f.Digest),
|
|
||||||
Digest: f.Digest,
|
|
||||||
Total: f.Total,
|
|
||||||
Completed: f.Completed,
|
|
||||||
})
|
|
||||||
|
|
||||||
if f.Completed >= f.Total {
|
|
||||||
if err := out.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(f.FilePath+"-partial", f.FilePath); err != nil {
|
|
||||||
opts.fn(api.ProgressResponse{
|
|
||||||
Status: fmt.Sprintf("error renaming file: %v", err),
|
|
||||||
Digest: f.Digest,
|
|
||||||
Total: f.Total,
|
|
||||||
Completed: f.Completed,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
break outerLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := io.CopyN(out, resp.Body, chunkSize)
|
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
|
||||||
return fmt.Errorf("%w: %w", errDownload, err)
|
|
||||||
}
|
|
||||||
f.Completed += n
|
|
||||||
|
|
||||||
inProgress.Store(f.Digest, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("success getting %s\n", f.Digest)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,6 @@ import (
|
||||||
"github.com/jmorganca/ollama/version"
|
"github.com/jmorganca/ollama/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
const MaxRetries = 3
|
|
||||||
|
|
||||||
type RegistryOptions struct {
|
type RegistryOptions struct {
|
||||||
Insecure bool
|
Insecure bool
|
||||||
Username string
|
Username string
|
||||||
|
@ -1417,7 +1415,7 @@ func checkBlobExistence(ctx context.Context, mp ModelPath, digest string, regOpt
|
||||||
|
|
||||||
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
|
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
|
||||||
var status string
|
var status string
|
||||||
for try := 0; try < MaxRetries; try++ {
|
for try := 0; try < maxRetries; try++ {
|
||||||
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
|
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("couldn't start upload: %v", err)
|
log.Printf("couldn't start upload: %v", err)
|
||||||
|
@ -1487,17 +1485,7 @@ func makeRequest(ctx context.Context, method string, requestURL *url.URL, header
|
||||||
req.ContentLength = contentLength
|
req.ContentLength = contentLength
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &http.Client{
|
resp, err := http.DefaultClient.Do(req)
|
||||||
CheckRedirect: func(req *http.Request, via []*http.Request) error {
|
|
||||||
if len(via) >= 10 {
|
|
||||||
return fmt.Errorf("too many redirects")
|
|
||||||
}
|
|
||||||
log.Printf("redirected to: %s\n", req.URL)
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := client.Do(req)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/jmorganca/ollama/api"
|
"github.com/jmorganca/ollama/api"
|
||||||
)
|
)
|
||||||
|
@ -138,7 +139,7 @@ func uploadBlobChunk(ctx context.Context, method string, requestURL *url.URL, r
|
||||||
headers.Set("Content-Range", fmt.Sprintf("%d-%d", offset, offset+sectionReader.Size()-1))
|
headers.Set("Content-Range", fmt.Sprintf("%d-%d", offset, offset+sectionReader.Size()-1))
|
||||||
}
|
}
|
||||||
|
|
||||||
for try := 0; try < MaxRetries; try++ {
|
for try := 0; try < maxRetries; try++ {
|
||||||
resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sectionReader, pw), opts)
|
resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sectionReader, pw), opts)
|
||||||
if err != nil && !errors.Is(err, io.EOF) {
|
if err != nil && !errors.Is(err, io.EOF) {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -191,9 +192,13 @@ type ProgressWriter struct {
|
||||||
completed int64
|
completed int64
|
||||||
total int64
|
total int64
|
||||||
fn func(api.ProgressResponse)
|
fn func(api.ProgressResponse)
|
||||||
|
mu sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pw *ProgressWriter) Write(b []byte) (int, error) {
|
func (pw *ProgressWriter) Write(b []byte) (int, error) {
|
||||||
|
pw.mu.Lock()
|
||||||
|
defer pw.mu.Unlock()
|
||||||
|
|
||||||
n := len(b)
|
n := len(b)
|
||||||
pw.bucket += int64(n)
|
pw.bucket += int64(n)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue