From 8544edca2196500854932e537025b0f8da390661 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Wed, 27 Sep 2023 16:22:30 -0700 Subject: [PATCH 1/8] parallel chunked downloads --- go.mod | 1 + go.sum | 2 + server/download.go | 311 ++++++++++++++++++++------------------------- server/images.go | 16 +-- server/upload.go | 7 +- 5 files changed, 152 insertions(+), 185 deletions(-) diff --git a/go.mod b/go.mod index cca5858f..a36e3502 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/pdevine/readline v1.5.2 github.com/spf13/cobra v1.7.0 + golang.org/x/sync v0.3.0 ) require github.com/rivo/uniseg v0.2.0 // indirect diff --git a/go.sum b/go.sum index db15151a..34ea7fb6 100644 --- a/go.sum +++ b/go.sum @@ -125,6 +125,8 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/server/download.go b/server/download.go index cde9214f..2eadbce4 100644 --- a/server/download.go +++ b/server/download.go @@ -2,38 +2,35 @@ package server import ( "context" + "encoding/json" "errors" "fmt" "io" "log" "net/http" + "net/url" "os" "path/filepath" "strconv" - "sync" - "time" "github.com/jmorganca/ollama/api" + "golang.org/x/sync/errgroup" ) -type FileDownload struct { - Digest string - FilePath string - Total int64 +type BlobDownloadPart struct { + Offset int64 + Size int64 Completed int64 } -var inProgress sync.Map // map of digests currently being downloaded to their current download progress - type downloadOpts struct { mp ModelPath digest string regOpts *RegistryOptions fn func(api.ProgressResponse) - retry int // track the number of retries on this download } -const maxRetry = 3 +const maxRetries = 3 // downloadBlob downloads a blob from the registry and stores it in the blobs directory func downloadBlob(ctx context.Context, opts downloadOpts) error { @@ -42,9 +39,14 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error { return err } - if fi, _ := os.Stat(fp); fi != nil { - // we already have the file, so return + fi, err := os.Stat(fp) + switch { + case errors.Is(err, os.ErrNotExist): + case err != nil: + return err + default: opts.fn(api.ProgressResponse{ + Status: fmt.Sprintf("downloading %s", opts.digest), Digest: opts.digest, Total: fi.Size(), Completed: fi.Size(), @@ -53,185 +55,154 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error { return nil } - fileDownload := &FileDownload{ - Digest: opts.digest, - FilePath: fp, - Total: 1, // dummy value to indicate that we don't know the total size yet - Completed: 0, - } - - _, downloading := inProgress.LoadOrStore(opts.digest, fileDownload) - if downloading { - // this is another client requesting the server to download the same blob concurrently - return monitorDownload(ctx, opts, fileDownload) - } - if err := doDownload(ctx, opts, fileDownload); err != nil { - if errors.Is(err, errDownload) && opts.retry < maxRetry { - opts.retry++ - log.Print(err) - log.Printf("retrying download of %s", opts.digest) - return downloadBlob(ctx, opts) - } + f, err := os.OpenFile(fp+"-partial", os.O_CREATE|os.O_RDWR, 0644) + if err != nil { return err } - return nil -} + defer f.Close() -var downloadMu sync.Mutex // mutex to check to resume a download while monitoring + partFilePaths, err := filepath.Glob(fp + "-partial-*") + if err != nil { + return err + } -// monitorDownload monitors the download progress of a blob and resumes it if it is interrupted -func monitorDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error { - tick := time.NewTicker(time.Second) - for range tick.C { - done, resume, err := func() (bool, bool, error) { - downloadMu.Lock() - defer downloadMu.Unlock() - val, downloading := inProgress.Load(f.Digest) - if !downloading { - // check once again if the download is complete - if fi, _ := os.Stat(f.FilePath); fi != nil { - // successful download while monitoring - opts.fn(api.ProgressResponse{ - Digest: f.Digest, - Total: fi.Size(), - Completed: fi.Size(), - }) - return true, false, nil - } - // resume the download - inProgress.Store(f.Digest, f) // store the file download again to claim the resume - return false, true, nil - } - f, ok := val.(*FileDownload) - if !ok { - return false, false, fmt.Errorf("invalid type for in progress download: %T", val) - } - opts.fn(api.ProgressResponse{ - Status: fmt.Sprintf("downloading %s", f.Digest), - Digest: f.Digest, - Total: f.Total, - Completed: f.Completed, - }) - return false, false, nil - }() + var total, completed int64 + var parts []BlobDownloadPart + for _, partFilePath := range partFilePaths { + var part BlobDownloadPart + partFile, err := os.Open(partFilePath) if err != nil { return err } - if done { - // done downloading - return nil + defer partFile.Close() + + if err := json.NewDecoder(partFile).Decode(&part); err != nil { + return err } - if resume { - return doDownload(ctx, opts, f) - } - } - return nil -} -var ( - chunkSize int64 = 1024 * 1024 // 1 MiB in bytes - errDownload = fmt.Errorf("download failed") -) + total += part.Size + completed += part.Completed -// doDownload downloads a blob from the registry and stores it in the blobs directory -func doDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error { - defer inProgress.Delete(f.Digest) - var size int64 - - fi, err := os.Stat(f.FilePath + "-partial") - switch { - case errors.Is(err, os.ErrNotExist): - // noop, file doesn't exist so create it - case err != nil: - return fmt.Errorf("stat: %w", err) - default: - size = fi.Size() - // Ensure the size is divisible by the chunk size by removing excess bytes - size -= size % chunkSize - - err := os.Truncate(f.FilePath+"-partial", size) - if err != nil { - return fmt.Errorf("truncate: %w", err) - } + parts = append(parts, part) } requestURL := opts.mp.BaseURL() - requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", f.Digest) + requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest) + + if len(parts) == 0 { + resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts.regOpts) + if err != nil { + return err + } + defer resp.Body.Close() + + total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + + // reserve the file + f.Truncate(total) + + var offset int64 + var size int64 = 64 * 1024 * 1024 + + for offset < total { + if offset+size > total { + size = total - offset + } + + parts = append(parts, BlobDownloadPart{ + Offset: offset, + Size: size, + }) + + offset += size + } + } + + pw := &ProgressWriter{ + status: fmt.Sprintf("downloading %s", opts.digest), + digest: opts.digest, + total: total, + completed: completed, + fn: opts.fn, + } + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(64) + for i := range parts { + part := parts[i] + if part.Completed == part.Size { + continue + } + + i := i + g.Go(func() error { + for try := 0; try < maxRetries; try++ { + if err := downloadBlobChunk(ctx, f, requestURL, parts, i, pw, opts); err != nil { + log.Printf("%s part %d attempt %d failed: %v, retrying", opts.digest[7:19], i, try, err) + continue + } + + return nil + } + + return errors.New("max retries exceeded") + }) + } + + if err := g.Wait(); err != nil { + return err + } + + if err := f.Close(); err != nil { + return err + } + + for i := range parts { + if err := os.Remove(f.Name() + "-" + strconv.Itoa(i)); err != nil { + return err + } + } + + return os.Rename(f.Name(), fp) +} + +func downloadBlobChunk(ctx context.Context, f *os.File, requestURL *url.URL, parts []BlobDownloadPart, i int, pw *ProgressWriter, opts downloadOpts) error { + part := &parts[i] + + partName := f.Name() + "-" + strconv.Itoa(i) + if err := flushPart(partName, part); err != nil { + return err + } + + offset := part.Offset + part.Completed + w := io.NewOffsetWriter(f, offset) headers := make(http.Header) - headers.Set("Range", fmt.Sprintf("bytes=%d-", size)) - + headers.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, part.Offset+part.Size-1)) resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts.regOpts) if err != nil { - log.Printf("couldn't download blob: %v", err) - return fmt.Errorf("%w: %w", errDownload, err) + return err } defer resp.Body.Close() - if resp.StatusCode >= http.StatusBadRequest { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("%w: on download registry responded with code %d: %v", errDownload, resp.StatusCode, string(body)) + n, err := io.Copy(w, io.TeeReader(resp.Body, pw)) + if err != nil && !errors.Is(err, io.EOF) { + // rollback progress bar + pw.completed -= n + return err } - err = os.MkdirAll(filepath.Dir(f.FilePath), 0o700) - if err != nil { - return fmt.Errorf("make blobs directory: %w", err) - } + part.Completed += n - remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - f.Completed = size - f.Total = remaining + f.Completed - - inProgress.Store(f.Digest, f) - - out, err := os.OpenFile(f.FilePath+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644) - if err != nil { - return fmt.Errorf("open file: %w", err) - } - defer out.Close() -outerLoop: - for { - select { - case <-ctx.Done(): - // handle client request cancellation - inProgress.Delete(f.Digest) - return nil - default: - opts.fn(api.ProgressResponse{ - Status: fmt.Sprintf("downloading %s", f.Digest), - Digest: f.Digest, - Total: f.Total, - Completed: f.Completed, - }) - - if f.Completed >= f.Total { - if err := out.Close(); err != nil { - return err - } - - if err := os.Rename(f.FilePath+"-partial", f.FilePath); err != nil { - opts.fn(api.ProgressResponse{ - Status: fmt.Sprintf("error renaming file: %v", err), - Digest: f.Digest, - Total: f.Total, - Completed: f.Completed, - }) - return err - } - - break outerLoop - } - } - - n, err := io.CopyN(out, resp.Body, chunkSize) - if err != nil && !errors.Is(err, io.EOF) { - return fmt.Errorf("%w: %w", errDownload, err) - } - f.Completed += n - - inProgress.Store(f.Digest, f) - } - - log.Printf("success getting %s\n", f.Digest) - return nil + return flushPart(partName, part) +} + +func flushPart(name string, part *BlobDownloadPart) error { + partFile, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return err + } + defer partFile.Close() + + return json.NewEncoder(partFile).Encode(part) } diff --git a/server/images.go b/server/images.go index 3cc64eaf..f98c4412 100644 --- a/server/images.go +++ b/server/images.go @@ -30,8 +30,6 @@ import ( "github.com/jmorganca/ollama/version" ) -const MaxRetries = 3 - type RegistryOptions struct { Insecure bool Username string @@ -1417,7 +1415,7 @@ func checkBlobExistence(ctx context.Context, mp ModelPath, digest string, regOpt func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) { var status string - for try := 0; try < MaxRetries; try++ { + for try := 0; try < maxRetries; try++ { resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts) if err != nil { log.Printf("couldn't start upload: %v", err) @@ -1487,17 +1485,7 @@ func makeRequest(ctx context.Context, method string, requestURL *url.URL, header req.ContentLength = contentLength } - client := &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return fmt.Errorf("too many redirects") - } - log.Printf("redirected to: %s\n", req.URL) - return nil - }, - } - - resp, err := client.Do(req) + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } diff --git a/server/upload.go b/server/upload.go index f5e100e0..8f655337 100644 --- a/server/upload.go +++ b/server/upload.go @@ -10,6 +10,7 @@ import ( "net/url" "os" "strconv" + "sync" "github.com/jmorganca/ollama/api" ) @@ -138,7 +139,7 @@ func uploadBlobChunk(ctx context.Context, method string, requestURL *url.URL, r headers.Set("Content-Range", fmt.Sprintf("%d-%d", offset, offset+sectionReader.Size()-1)) } - for try := 0; try < MaxRetries; try++ { + for try := 0; try < maxRetries; try++ { resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sectionReader, pw), opts) if err != nil && !errors.Is(err, io.EOF) { return nil, err @@ -191,9 +192,13 @@ type ProgressWriter struct { completed int64 total int64 fn func(api.ProgressResponse) + mu sync.Mutex } func (pw *ProgressWriter) Write(b []byte) (int, error) { + pw.mu.Lock() + defer pw.mu.Unlock() + n := len(b) pw.bucket += int64(n) From 5b84404c64f0513e003b1a62463b437b672de5cd Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Fri, 29 Sep 2023 16:13:53 -0700 Subject: [PATCH 2/8] handle concurrent requests for the same blobs --- server/download.go | 378 +++++++++++++++++++++++++++------------------ 1 file changed, 231 insertions(+), 147 deletions(-) diff --git a/server/download.go b/server/download.go index 2eadbce4..6023de31 100644 --- a/server/download.go +++ b/server/download.go @@ -12,17 +12,238 @@ import ( "os" "path/filepath" "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/sync/errgroup" "github.com/jmorganca/ollama/api" - "golang.org/x/sync/errgroup" ) -type BlobDownloadPart struct { +var blobDownloadManager sync.Map + +type blobDownload struct { + Name string + Digest string + + Total int64 + Completed atomic.Int64 + + *os.File + Parts []*blobDownloadPart + + done chan struct{} + context.CancelFunc + refCount atomic.Int32 +} + +type blobDownloadPart struct { Offset int64 Size int64 Completed int64 } +func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error { + b.done = make(chan struct{}, 1) + + partFilePaths, err := filepath.Glob(b.Name + "-partial-*") + if err != nil { + return err + } + + for _, partFilePath := range partFilePaths { + part, err := b.readPart(partFilePath) + if err != nil { + return err + } + + b.Total += part.Size + b.Completed.Add(part.Completed) + b.Parts = append(b.Parts, part) + } + + if len(b.Parts) == 0 { + resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + + var offset int64 + var size int64 = 64 * 1024 * 1024 + + for offset < b.Total { + if offset+size > b.Total { + size = b.Total - offset + } + + partName := b.Name + "-partial-" + strconv.Itoa(len(b.Parts)) + part := blobDownloadPart{Offset: offset, Size: size} + if err := b.writePart(partName, &part); err != nil { + return err + } + + b.Parts = append(b.Parts, &part) + + offset += size + } + } + + log.Printf("downloading %s in %d part(s)", b.Digest[7:19], len(b.Parts)) + return nil +} + +func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) (err error) { + defer blobDownloadManager.Delete(b.Digest) + + ctx, b.CancelFunc = context.WithCancel(ctx) + + b.File, err = os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return err + } + defer b.Close() + + b.Truncate(b.Total) + + g, ctx := errgroup.WithContext(ctx) + g.SetLimit(64) + for i := range b.Parts { + part := b.Parts[i] + if part.Completed == part.Size { + continue + } + + i := i + g.Go(func() error { + for try := 0; try < maxRetries; try++ { + err := b.downloadChunk(ctx, requestURL, i, opts) + switch { + case errors.Is(err, context.Canceled): + return err + case err != nil: + log.Printf("%s part %d attempt %d failed: %v, retrying", b.Digest[7:19], i, try, err) + continue + default: + return nil + } + } + + return errors.New("max retries exceeded") + }) + } + + if err := g.Wait(); err != nil { + return err + } + + if err := b.Close(); err != nil { + return err + } + + for i := range b.Parts { + if err := os.Remove(b.File.Name() + "-" + strconv.Itoa(i)); err != nil { + return err + } + } + + if err := os.Rename(b.File.Name(), b.Name); err != nil { + return err + } + + close(b.done) + return nil +} + +func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i int, opts *RegistryOptions) error { + part := b.Parts[i] + + partName := b.File.Name() + "-" + strconv.Itoa(i) + offset := part.Offset + part.Completed + w := io.NewOffsetWriter(b.File, offset) + + headers := make(http.Header) + headers.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, part.Offset+part.Size-1)) + resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + n, err := io.Copy(w, io.TeeReader(resp.Body, b)) + if err != nil && !errors.Is(err, io.EOF) { + // rollback progress + b.Completed.Add(-n) + return err + } + + part.Completed += n + return b.writePart(partName, part) +} + +func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) { + var part blobDownloadPart + partFile, err := os.Open(partName) + if err != nil { + return nil, err + } + defer partFile.Close() + + if err := json.NewDecoder(partFile).Decode(&part); err != nil { + return nil, err + } + + return &part, nil +} + +func (b *blobDownload) writePart(partName string, part *blobDownloadPart) error { + partFile, err := os.OpenFile(partName, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer partFile.Close() + + return json.NewEncoder(partFile).Encode(part) +} + +func (b *blobDownload) Write(p []byte) (n int, err error) { + n = len(p) + b.Completed.Add(int64(n)) + return n, nil +} + +func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error { + b.refCount.Add(1) + + ticker := time.NewTicker(60 * time.Millisecond) + for { + select { + case <-ticker.C: + case <-ctx.Done(): + if b.refCount.Add(-1) == 0 { + b.CancelFunc() + } + + return ctx.Err() + } + + fn(api.ProgressResponse{ + Status: fmt.Sprintf("downloading %s", b.Digest), + Digest: b.Digest, + Total: b.Total, + Completed: b.Completed.Load(), + }) + + if b.Completed.Load() >= b.Total { + <-b.done + return nil + } + } +} + type downloadOpts struct { mp ModelPath digest string @@ -55,154 +276,17 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error { return nil } - f, err := os.OpenFile(fp+"-partial", os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return err - } - defer f.Close() - - partFilePaths, err := filepath.Glob(fp + "-partial-*") - if err != nil { - return err - } - - var total, completed int64 - var parts []BlobDownloadPart - for _, partFilePath := range partFilePaths { - var part BlobDownloadPart - partFile, err := os.Open(partFilePath) - if err != nil { - return err - } - defer partFile.Close() - - if err := json.NewDecoder(partFile).Decode(&part); err != nil { + value, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest}) + blobDownload := value.(*blobDownload) + if !ok { + requestURL := opts.mp.BaseURL() + requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest) + if err := blobDownload.Prepare(ctx, requestURL, opts.regOpts); err != nil { return err } - total += part.Size - completed += part.Completed - - parts = append(parts, part) + go blobDownload.Run(context.Background(), requestURL, opts.regOpts) } - requestURL := opts.mp.BaseURL() - requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest) - - if len(parts) == 0 { - resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts.regOpts) - if err != nil { - return err - } - defer resp.Body.Close() - - total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - - // reserve the file - f.Truncate(total) - - var offset int64 - var size int64 = 64 * 1024 * 1024 - - for offset < total { - if offset+size > total { - size = total - offset - } - - parts = append(parts, BlobDownloadPart{ - Offset: offset, - Size: size, - }) - - offset += size - } - } - - pw := &ProgressWriter{ - status: fmt.Sprintf("downloading %s", opts.digest), - digest: opts.digest, - total: total, - completed: completed, - fn: opts.fn, - } - - g, ctx := errgroup.WithContext(ctx) - g.SetLimit(64) - for i := range parts { - part := parts[i] - if part.Completed == part.Size { - continue - } - - i := i - g.Go(func() error { - for try := 0; try < maxRetries; try++ { - if err := downloadBlobChunk(ctx, f, requestURL, parts, i, pw, opts); err != nil { - log.Printf("%s part %d attempt %d failed: %v, retrying", opts.digest[7:19], i, try, err) - continue - } - - return nil - } - - return errors.New("max retries exceeded") - }) - } - - if err := g.Wait(); err != nil { - return err - } - - if err := f.Close(); err != nil { - return err - } - - for i := range parts { - if err := os.Remove(f.Name() + "-" + strconv.Itoa(i)); err != nil { - return err - } - } - - return os.Rename(f.Name(), fp) -} - -func downloadBlobChunk(ctx context.Context, f *os.File, requestURL *url.URL, parts []BlobDownloadPart, i int, pw *ProgressWriter, opts downloadOpts) error { - part := &parts[i] - - partName := f.Name() + "-" + strconv.Itoa(i) - if err := flushPart(partName, part); err != nil { - return err - } - - offset := part.Offset + part.Completed - w := io.NewOffsetWriter(f, offset) - - headers := make(http.Header) - headers.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, part.Offset+part.Size-1)) - resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts.regOpts) - if err != nil { - return err - } - defer resp.Body.Close() - - n, err := io.Copy(w, io.TeeReader(resp.Body, pw)) - if err != nil && !errors.Is(err, io.EOF) { - // rollback progress bar - pw.completed -= n - return err - } - - part.Completed += n - - return flushPart(partName, part) -} - -func flushPart(name string, part *BlobDownloadPart) error { - partFile, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644) - if err != nil { - return err - } - defer partFile.Close() - - return json.NewEncoder(partFile).Encode(part) + return blobDownload.Wait(ctx, opts.fn) } From 090d08422b361bcbef82a04d0e6e160caaad8f89 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 2 Oct 2023 13:34:07 -0700 Subject: [PATCH 3/8] handle unexpected eofs --- server/download.go | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/server/download.go b/server/download.go index 6023de31..f3a5b378 100644 --- a/server/download.go +++ b/server/download.go @@ -45,8 +45,6 @@ type blobDownloadPart struct { } func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error { - b.done = make(chan struct{}, 1) - partFilePaths, err := filepath.Glob(b.Name + "-partial-*") if err != nil { return err @@ -109,6 +107,9 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis b.Truncate(b.Total) + b.done = make(chan struct{}, 1) + defer close(b.done) + g, ctx := errgroup.WithContext(ctx) g.SetLimit(64) for i := range b.Parts { @@ -154,7 +155,6 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis return err } - close(b.done) return nil } @@ -174,14 +174,19 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i defer resp.Body.Close() n, err := io.Copy(w, io.TeeReader(resp.Body, b)) - if err != nil && !errors.Is(err, io.EOF) { + if err != nil && !errors.Is(err, context.Canceled) { // rollback progress b.Completed.Add(-n) return err } part.Completed += n - return b.writePart(partName, part) + if err := b.writePart(partName, part); err != nil { + return err + } + + // return nil or context.Canceled + return err } func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) { @@ -221,6 +226,10 @@ func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) ticker := time.NewTicker(60 * time.Millisecond) for { select { + case <-b.done: + if b.Completed.Load() != b.Total { + return io.ErrUnexpectedEOF + } case <-ticker.C: case <-ctx.Done(): if b.refCount.Add(-1) == 0 { From 711e891f0f6c2f36b5178c1a65177859eb0855b8 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Mon, 2 Oct 2023 15:26:27 -0700 Subject: [PATCH 4/8] fix resumable downloads glob returns files in lexical order which is not appropriate when rebuilding the parts list --- server/download.go | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/server/download.go b/server/download.go index f3a5b378..7acc09d5 100644 --- a/server/download.go +++ b/server/download.go @@ -12,6 +12,7 @@ import ( "os" "path/filepath" "strconv" + "strings" "sync" "sync/atomic" "time" @@ -39,9 +40,18 @@ type blobDownload struct { } type blobDownloadPart struct { + N int Offset int64 Size int64 Completed int64 + + *blobDownload `json:"-"` +} + +func (p *blobDownloadPart) Name() string { + return strings.Join([]string{ + p.blobDownload.Name, "partial", strconv.Itoa(p.N), + }, "-") } func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error { @@ -78,14 +88,10 @@ func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *R size = b.Total - offset } - partName := b.Name + "-partial-" + strconv.Itoa(len(b.Parts)) - part := blobDownloadPart{Offset: offset, Size: size} - if err := b.writePart(partName, &part); err != nil { + if err := b.newPart(offset, size); err != nil { return err } - b.Parts = append(b.Parts, &part) - offset += size } } @@ -161,7 +167,6 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i int, opts *RegistryOptions) error { part := b.Parts[i] - partName := b.File.Name() + "-" + strconv.Itoa(i) offset := part.Offset + part.Completed w := io.NewOffsetWriter(b.File, offset) @@ -181,7 +186,7 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i } part.Completed += n - if err := b.writePart(partName, part); err != nil { + if err := b.writePart(part.Name(), part); err != nil { return err } @@ -189,6 +194,16 @@ func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i return err } +func (b *blobDownload) newPart(offset, size int64) error { + part := blobDownloadPart{blobDownload: b, Offset: offset, Size: size, N: len(b.Parts)} + if err := b.writePart(part.Name(), &part); err != nil { + return err + } + + b.Parts = append(b.Parts, &part) + return nil +} + func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) { var part blobDownloadPart partFile, err := os.Open(partName) @@ -201,6 +216,7 @@ func (b *blobDownload) readPart(partName string) (*blobDownloadPart, error) { return nil, err } + part.blobDownload = b return &part, nil } From 04733438daa9ce274b3dd1d1cad66b51b43ef6e1 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 3 Oct 2023 16:12:53 -0700 Subject: [PATCH 5/8] check head request response --- server/download.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/download.go b/server/download.go index 7acc09d5..f5ebd8f8 100644 --- a/server/download.go +++ b/server/download.go @@ -78,6 +78,11 @@ func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *R } defer resp.Body.Close() + if resp.StatusCode >= http.StatusBadRequest { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("registry responded with code %d: %v", resp.StatusCode, string(body)) + } + b.Total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) var offset int64 From 288814d3e4b60a73cdfff80df9df2f572952aca3 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 3 Oct 2023 16:44:35 -0700 Subject: [PATCH 6/8] fix ref counts --- server/download.go | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/server/download.go b/server/download.go index f5ebd8f8..e550a89a 100644 --- a/server/download.go +++ b/server/download.go @@ -36,7 +36,7 @@ type blobDownload struct { done chan struct{} context.CancelFunc - refCount atomic.Int32 + references atomic.Int32 } type blobDownloadPart struct { @@ -241,8 +241,19 @@ func (b *blobDownload) Write(p []byte) (n int, err error) { return n, nil } +func (b *blobDownload) acquire() { + b.references.Add(1) +} + +func (b *blobDownload) release() { + if b.references.Add(-1) == 0 { + b.CancelFunc() + } +} + func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) error { - b.refCount.Add(1) + b.acquire() + defer b.release() ticker := time.NewTicker(60 * time.Millisecond) for { @@ -253,10 +264,6 @@ func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) } case <-ticker.C: case <-ctx.Done(): - if b.refCount.Add(-1) == 0 { - b.CancelFunc() - } - return ctx.Err() } From 10199c59879062d10056c285e1e1994286c929f8 Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 3 Oct 2023 16:52:49 -0700 Subject: [PATCH 7/8] replace done channel with file check --- server/download.go | 53 +++++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/server/download.go b/server/download.go index e550a89a..973e9eef 100644 --- a/server/download.go +++ b/server/download.go @@ -31,10 +31,8 @@ type blobDownload struct { Total int64 Completed atomic.Int64 - *os.File Parts []*blobDownloadPart - done chan struct{} context.CancelFunc references atomic.Int32 } @@ -54,6 +52,14 @@ func (p *blobDownloadPart) Name() string { }, "-") } +func (p *blobDownloadPart) StartsAt() int64 { + return p.Offset + p.Completed +} + +func (p *blobDownloadPart) StopsAt() int64 { + return p.Offset + p.Size +} + func (b *blobDownload) Prepare(ctx context.Context, requestURL *url.URL, opts *RegistryOptions) error { partFilePaths, err := filepath.Glob(b.Name + "-partial-*") if err != nil { @@ -110,18 +116,16 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis ctx, b.CancelFunc = context.WithCancel(ctx) - b.File, err = os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0644) + file, err := os.OpenFile(b.Name+"-partial", os.O_CREATE|os.O_RDWR, 0644) if err != nil { return err } - defer b.Close() + defer file.Close() - b.Truncate(b.Total) - - b.done = make(chan struct{}, 1) - defer close(b.done) + file.Truncate(b.Total) g, ctx := errgroup.WithContext(ctx) + // TODO(mxyng): download concurrency should be configurable g.SetLimit(64) for i := range b.Parts { part := b.Parts[i] @@ -132,7 +136,8 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis i := i g.Go(func() error { for try := 0; try < maxRetries; try++ { - err := b.downloadChunk(ctx, requestURL, i, opts) + w := io.NewOffsetWriter(file, part.StartsAt()) + err := b.downloadChunk(ctx, requestURL, w, part, opts) switch { case errors.Is(err, context.Canceled): return err @@ -152,31 +157,23 @@ func (b *blobDownload) Run(ctx context.Context, requestURL *url.URL, opts *Regis return err } - if err := b.Close(); err != nil { + // explicitly close the file so we can rename it + if err := file.Close(); err != nil { return err } for i := range b.Parts { - if err := os.Remove(b.File.Name() + "-" + strconv.Itoa(i)); err != nil { + if err := os.Remove(file.Name() + "-" + strconv.Itoa(i)); err != nil { return err } } - if err := os.Rename(b.File.Name(), b.Name); err != nil { - return err - } - - return nil + return os.Rename(file.Name(), b.Name) } -func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, i int, opts *RegistryOptions) error { - part := b.Parts[i] - - offset := part.Offset + part.Completed - w := io.NewOffsetWriter(b.File, offset) - +func (b *blobDownload) downloadChunk(ctx context.Context, requestURL *url.URL, w io.Writer, part *blobDownloadPart, opts *RegistryOptions) error { headers := make(http.Header) - headers.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, part.Offset+part.Size-1)) + headers.Set("Range", fmt.Sprintf("bytes=%d-%d", part.StartsAt(), part.StopsAt()-1)) resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts) if err != nil { return err @@ -258,10 +255,6 @@ func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) ticker := time.NewTicker(60 * time.Millisecond) for { select { - case <-b.done: - if b.Completed.Load() != b.Total { - return io.ErrUnexpectedEOF - } case <-ticker.C: case <-ctx.Done(): return ctx.Err() @@ -275,8 +268,10 @@ func (b *blobDownload) Wait(ctx context.Context, fn func(api.ProgressResponse)) }) if b.Completed.Load() >= b.Total { - <-b.done - return nil + // wait for the file to get renamed + if _, err := os.Stat(b.Name); err == nil { + return nil + } } } } From 0560b28a8d3a366f94e3996f59cc0774cb7d0e2a Mon Sep 17 00:00:00 2001 From: Michael Yang Date: Tue, 3 Oct 2023 17:06:13 -0700 Subject: [PATCH 8/8] names --- server/download.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/server/download.go b/server/download.go index 973e9eef..bb791130 100644 --- a/server/download.go +++ b/server/download.go @@ -308,17 +308,17 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error { return nil } - value, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest}) - blobDownload := value.(*blobDownload) + data, ok := blobDownloadManager.LoadOrStore(opts.digest, &blobDownload{Name: fp, Digest: opts.digest}) + download := data.(*blobDownload) if !ok { requestURL := opts.mp.BaseURL() requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest) - if err := blobDownload.Prepare(ctx, requestURL, opts.regOpts); err != nil { + if err := download.Prepare(ctx, requestURL, opts.regOpts); err != nil { return err } - go blobDownload.Run(context.Background(), requestURL, opts.regOpts) + go download.Run(context.Background(), requestURL, opts.regOpts) } - return blobDownload.Wait(ctx, opts.fn) + return download.Wait(ctx, opts.fn) }