parallel chunked downloads

This commit is contained in:
Michael Yang 2023-09-27 16:22:30 -07:00
parent 5d22319a2c
commit 8544edca21
5 changed files with 152 additions and 185 deletions

1
go.mod
View file

@ -10,6 +10,7 @@ require (
github.com/olekukonko/tablewriter v0.0.5
github.com/pdevine/readline v1.5.2
github.com/spf13/cobra v1.7.0
golang.org/x/sync v0.3.0
)
require github.com/rivo/uniseg v0.2.0 // indirect

2
go.sum
View file

@ -125,6 +125,8 @@ golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMe
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View file

@ -2,38 +2,35 @@ package server
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/jmorganca/ollama/api"
"golang.org/x/sync/errgroup"
)
type FileDownload struct {
Digest string
FilePath string
Total int64
type BlobDownloadPart struct {
Offset int64
Size int64
Completed int64
}
var inProgress sync.Map // map of digests currently being downloaded to their current download progress
type downloadOpts struct {
mp ModelPath
digest string
regOpts *RegistryOptions
fn func(api.ProgressResponse)
retry int // track the number of retries on this download
}
const maxRetry = 3
const maxRetries = 3
// downloadBlob downloads a blob from the registry and stores it in the blobs directory
func downloadBlob(ctx context.Context, opts downloadOpts) error {
@ -42,9 +39,14 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
return err
}
if fi, _ := os.Stat(fp); fi != nil {
// we already have the file, so return
fi, err := os.Stat(fp)
switch {
case errors.Is(err, os.ErrNotExist):
case err != nil:
return err
default:
opts.fn(api.ProgressResponse{
Status: fmt.Sprintf("downloading %s", opts.digest),
Digest: opts.digest,
Total: fi.Size(),
Completed: fi.Size(),
@ -53,185 +55,154 @@ func downloadBlob(ctx context.Context, opts downloadOpts) error {
return nil
}
fileDownload := &FileDownload{
Digest: opts.digest,
FilePath: fp,
Total: 1, // dummy value to indicate that we don't know the total size yet
Completed: 0,
}
_, downloading := inProgress.LoadOrStore(opts.digest, fileDownload)
if downloading {
// this is another client requesting the server to download the same blob concurrently
return monitorDownload(ctx, opts, fileDownload)
}
if err := doDownload(ctx, opts, fileDownload); err != nil {
if errors.Is(err, errDownload) && opts.retry < maxRetry {
opts.retry++
log.Print(err)
log.Printf("retrying download of %s", opts.digest)
return downloadBlob(ctx, opts)
}
f, err := os.OpenFile(fp+"-partial", os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return err
}
return nil
}
defer f.Close()
var downloadMu sync.Mutex // mutex to check to resume a download while monitoring
partFilePaths, err := filepath.Glob(fp + "-partial-*")
if err != nil {
return err
}
// monitorDownload monitors the download progress of a blob and resumes it if it is interrupted
func monitorDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
tick := time.NewTicker(time.Second)
for range tick.C {
done, resume, err := func() (bool, bool, error) {
downloadMu.Lock()
defer downloadMu.Unlock()
val, downloading := inProgress.Load(f.Digest)
if !downloading {
// check once again if the download is complete
if fi, _ := os.Stat(f.FilePath); fi != nil {
// successful download while monitoring
opts.fn(api.ProgressResponse{
Digest: f.Digest,
Total: fi.Size(),
Completed: fi.Size(),
})
return true, false, nil
}
// resume the download
inProgress.Store(f.Digest, f) // store the file download again to claim the resume
return false, true, nil
}
f, ok := val.(*FileDownload)
if !ok {
return false, false, fmt.Errorf("invalid type for in progress download: %T", val)
}
opts.fn(api.ProgressResponse{
Status: fmt.Sprintf("downloading %s", f.Digest),
Digest: f.Digest,
Total: f.Total,
Completed: f.Completed,
})
return false, false, nil
}()
var total, completed int64
var parts []BlobDownloadPart
for _, partFilePath := range partFilePaths {
var part BlobDownloadPart
partFile, err := os.Open(partFilePath)
if err != nil {
return err
}
if done {
// done downloading
return nil
defer partFile.Close()
if err := json.NewDecoder(partFile).Decode(&part); err != nil {
return err
}
if resume {
return doDownload(ctx, opts, f)
}
}
return nil
}
var (
chunkSize int64 = 1024 * 1024 // 1 MiB in bytes
errDownload = fmt.Errorf("download failed")
)
total += part.Size
completed += part.Completed
// doDownload downloads a blob from the registry and stores it in the blobs directory
func doDownload(ctx context.Context, opts downloadOpts, f *FileDownload) error {
defer inProgress.Delete(f.Digest)
var size int64
fi, err := os.Stat(f.FilePath + "-partial")
switch {
case errors.Is(err, os.ErrNotExist):
// noop, file doesn't exist so create it
case err != nil:
return fmt.Errorf("stat: %w", err)
default:
size = fi.Size()
// Ensure the size is divisible by the chunk size by removing excess bytes
size -= size % chunkSize
err := os.Truncate(f.FilePath+"-partial", size)
if err != nil {
return fmt.Errorf("truncate: %w", err)
}
parts = append(parts, part)
}
requestURL := opts.mp.BaseURL()
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", f.Digest)
requestURL = requestURL.JoinPath("v2", opts.mp.GetNamespaceRepository(), "blobs", opts.digest)
if len(parts) == 0 {
resp, err := makeRequest(ctx, "HEAD", requestURL, nil, nil, opts.regOpts)
if err != nil {
return err
}
defer resp.Body.Close()
total, _ = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
// reserve the file
f.Truncate(total)
var offset int64
var size int64 = 64 * 1024 * 1024
for offset < total {
if offset+size > total {
size = total - offset
}
parts = append(parts, BlobDownloadPart{
Offset: offset,
Size: size,
})
offset += size
}
}
pw := &ProgressWriter{
status: fmt.Sprintf("downloading %s", opts.digest),
digest: opts.digest,
total: total,
completed: completed,
fn: opts.fn,
}
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(64)
for i := range parts {
part := parts[i]
if part.Completed == part.Size {
continue
}
i := i
g.Go(func() error {
for try := 0; try < maxRetries; try++ {
if err := downloadBlobChunk(ctx, f, requestURL, parts, i, pw, opts); err != nil {
log.Printf("%s part %d attempt %d failed: %v, retrying", opts.digest[7:19], i, try, err)
continue
}
return nil
}
return errors.New("max retries exceeded")
})
}
if err := g.Wait(); err != nil {
return err
}
if err := f.Close(); err != nil {
return err
}
for i := range parts {
if err := os.Remove(f.Name() + "-" + strconv.Itoa(i)); err != nil {
return err
}
}
return os.Rename(f.Name(), fp)
}
func downloadBlobChunk(ctx context.Context, f *os.File, requestURL *url.URL, parts []BlobDownloadPart, i int, pw *ProgressWriter, opts downloadOpts) error {
part := &parts[i]
partName := f.Name() + "-" + strconv.Itoa(i)
if err := flushPart(partName, part); err != nil {
return err
}
offset := part.Offset + part.Completed
w := io.NewOffsetWriter(f, offset)
headers := make(http.Header)
headers.Set("Range", fmt.Sprintf("bytes=%d-", size))
headers.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, part.Offset+part.Size-1))
resp, err := makeRequest(ctx, "GET", requestURL, headers, nil, opts.regOpts)
if err != nil {
log.Printf("couldn't download blob: %v", err)
return fmt.Errorf("%w: %w", errDownload, err)
return err
}
defer resp.Body.Close()
if resp.StatusCode >= http.StatusBadRequest {
body, _ := io.ReadAll(resp.Body)
return fmt.Errorf("%w: on download registry responded with code %d: %v", errDownload, resp.StatusCode, string(body))
n, err := io.Copy(w, io.TeeReader(resp.Body, pw))
if err != nil && !errors.Is(err, io.EOF) {
// rollback progress bar
pw.completed -= n
return err
}
err = os.MkdirAll(filepath.Dir(f.FilePath), 0o700)
if err != nil {
return fmt.Errorf("make blobs directory: %w", err)
}
part.Completed += n
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
f.Completed = size
f.Total = remaining + f.Completed
inProgress.Store(f.Digest, f)
out, err := os.OpenFile(f.FilePath+"-partial", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
if err != nil {
return fmt.Errorf("open file: %w", err)
}
defer out.Close()
outerLoop:
for {
select {
case <-ctx.Done():
// handle client request cancellation
inProgress.Delete(f.Digest)
return nil
default:
opts.fn(api.ProgressResponse{
Status: fmt.Sprintf("downloading %s", f.Digest),
Digest: f.Digest,
Total: f.Total,
Completed: f.Completed,
})
if f.Completed >= f.Total {
if err := out.Close(); err != nil {
return err
}
if err := os.Rename(f.FilePath+"-partial", f.FilePath); err != nil {
opts.fn(api.ProgressResponse{
Status: fmt.Sprintf("error renaming file: %v", err),
Digest: f.Digest,
Total: f.Total,
Completed: f.Completed,
})
return err
}
break outerLoop
}
}
n, err := io.CopyN(out, resp.Body, chunkSize)
if err != nil && !errors.Is(err, io.EOF) {
return fmt.Errorf("%w: %w", errDownload, err)
}
f.Completed += n
inProgress.Store(f.Digest, f)
}
log.Printf("success getting %s\n", f.Digest)
return nil
return flushPart(partName, part)
}
func flushPart(name string, part *BlobDownloadPart) error {
partFile, err := os.OpenFile(name, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
return err
}
defer partFile.Close()
return json.NewEncoder(partFile).Encode(part)
}

View file

@ -30,8 +30,6 @@ import (
"github.com/jmorganca/ollama/version"
)
const MaxRetries = 3
type RegistryOptions struct {
Insecure bool
Username string
@ -1417,7 +1415,7 @@ func checkBlobExistence(ctx context.Context, mp ModelPath, digest string, regOpt
func makeRequestWithRetry(ctx context.Context, method string, requestURL *url.URL, headers http.Header, body io.ReadSeeker, regOpts *RegistryOptions) (*http.Response, error) {
var status string
for try := 0; try < MaxRetries; try++ {
for try := 0; try < maxRetries; try++ {
resp, err := makeRequest(ctx, method, requestURL, headers, body, regOpts)
if err != nil {
log.Printf("couldn't start upload: %v", err)
@ -1487,17 +1485,7 @@ func makeRequest(ctx context.Context, method string, requestURL *url.URL, header
req.ContentLength = contentLength
}
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return fmt.Errorf("too many redirects")
}
log.Printf("redirected to: %s\n", req.URL)
return nil
},
}
resp, err := client.Do(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}

View file

@ -10,6 +10,7 @@ import (
"net/url"
"os"
"strconv"
"sync"
"github.com/jmorganca/ollama/api"
)
@ -138,7 +139,7 @@ func uploadBlobChunk(ctx context.Context, method string, requestURL *url.URL, r
headers.Set("Content-Range", fmt.Sprintf("%d-%d", offset, offset+sectionReader.Size()-1))
}
for try := 0; try < MaxRetries; try++ {
for try := 0; try < maxRetries; try++ {
resp, err := makeRequest(ctx, method, requestURL, headers, io.TeeReader(sectionReader, pw), opts)
if err != nil && !errors.Is(err, io.EOF) {
return nil, err
@ -191,9 +192,13 @@ type ProgressWriter struct {
completed int64
total int64
fn func(api.ProgressResponse)
mu sync.Mutex
}
func (pw *ProgressWriter) Write(b []byte) (int, error) {
pw.mu.Lock()
defer pw.mu.Unlock()
n := len(b)
pw.bucket += int64(n)