Use build tags to generate accelerated binaries for CUDA and ROCm on Linux.

The build tags rocm or cuda must be specified to both go generate and go build.
ROCm builds should have both ROCM_PATH set (and the ROCM SDK present) as well
as CLBlast installed (for GGML) and CLBlast_DIR set in the environment to the
CLBlast cmake directory (likely /usr/lib/cmake/CLBlast). Build tags are also
used to switch VRAM detection between cuda and rocm implementations, using
added "accelerator_foo.go" files which contain architecture specific functions
and variables. accelerator_none is used when no tags are set, and a helper
function addRunner will ignore it if it is the chosen accelerator. Fix go
generate commands, thanks @deadmeu for testing.
This commit is contained in:
65a 2023-10-16 17:41:40 -07:00 committed by Daniel Hiltgen
parent d4cd695759
commit f8ef4439e9
8 changed files with 261 additions and 6 deletions

View file

@ -11,8 +11,8 @@ RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
COPY . . COPY . .
ENV GOARCH=$TARGETARCH ENV GOARCH=$TARGETARCH
ENV GOFLAGS=$GOFLAGS ENV GOFLAGS=$GOFLAGS
RUN /usr/local/go/bin/go generate ./... \ RUN /usr/local/go/bin/go generate -tags cuda ./... \
&& /usr/local/go/bin/go build . && /usr/local/go/bin/go build -tags cuda .
FROM ubuntu:22.04 FROM ubuntu:22.04
RUN apt-get update && apt-get install -y ca-certificates RUN apt-get update && apt-get install -y ca-certificates
@ -27,3 +27,5 @@ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
ENTRYPOINT ["/bin/ollama"] ENTRYPOINT ["/bin/ollama"]
CMD ["serve"] CMD ["serve"]

View file

@ -27,5 +27,5 @@ ENV GOOS=linux
ENV GOARCH=$TARGETARCH ENV GOARCH=$TARGETARCH
ENV GOFLAGS=$GOFLAGS ENV GOFLAGS=$GOFLAGS
RUN /usr/local/go/bin/go generate ./... && \ RUN /usr/local/go/bin/go generate -tags cuda ./... && \
/usr/local/go/bin/go build . /usr/local/go/bin/go build -tags cuda .

View file

@ -185,19 +185,50 @@ ollama list
## Building ## Building
### Generic (CPU)
Install `cmake` and `go`: Install `cmake` and `go`:
``` ```
brew install cmake go brew install cmake go
``` ```
Then generate dependencies and build: Then generate dependencies:
``` ```
go generate ./... go generate ./...
```
Then build the binary:
```
go build . go build .
``` ```
### CUDA (NVIDIA)
*Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
Install `cmake` and `golang` as well as [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) development and runtime packages.
Then generate dependencies:
```
go generate -tags cuda ./...
```
Then build the binary:
```
go build -tags cuda .
```
### ROCm (AMD)
*Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) developement packages first, as well as `cmake` and `golang`.
Adjust the paths below (correct for Arch) as appropriate for your distributions install locations and generate dependencies:
```
CLBlast_DIR=/usr/lib/cmake/CLBlast ROCM_PATH=/opt/rocm go generate -tags rocm ./...
```
Then build the binary:
```
go build -tags rocm
```
### Running local builds
Next, start the server: Next, start the server:
``` ```

67
llm/accelerator_cuda.go Normal file
View file

@ -0,0 +1,67 @@
//go:build cuda
package llm
import (
"bufio"
"bytes"
"errors"
"fmt"
"log"
"os/exec"
"path"
"strconv"
"strings"
"github.com/jmorganca/ollama/format"
)
var (
errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
)
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
func acceleratedRunner(buildPath string) []ModelRunner {
return []ModelRunner{
ModelRunner{
Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"),
Accelerated: true,
},
}
}
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
func CheckVRAM() (int64, error) {
cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
var stdout bytes.Buffer
cmd.Stdout = &stdout
err := cmd.Run()
if err != nil {
return 0, errNoAccel
}
var freeMiB int64
scanner := bufio.NewScanner(&stdout)
for scanner.Scan() {
line := scanner.Text()
if strings.Contains(line, "[Insufficient Permissions]") {
return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
}
vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
}
freeMiB += vram
}
freeBytes := freeMiB * 1024 * 1024
if freeBytes < 2*format.GigaByte {
log.Printf("less than 2 GB VRAM available")
return 0, errAvailableVRAM
}
return freeBytes, nil
}

21
llm/accelerator_none.go Normal file
View file

@ -0,0 +1,21 @@
//go:build !rocm && !cuda
package llm
import (
"errors"
)
var (
errNoAccel = errors.New("no accelerator support in this binary")
)
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
func acceleratedRunner(buildPath string) []ModelRunner {
return make([]ModelRunner, 0, 1)
}
// CheckVRAM is a stub with no accelerator.
func CheckVRAM() (int64, error) {
return 0, errNoGPU
}

85
llm/accelerator_rocm.go Normal file
View file

@ -0,0 +1,85 @@
//go:build rocm
package llm
import (
"bytes"
"encoding/csv"
"errors"
"fmt"
"io"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
)
var errNoAccel = errors.New("rocm-smi command failed")
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
func acceleratedRunner(buildPath string) []ModelRunner {
return []ModelRunner{
ModelRunner{
Path: path.Join(buildPath, "rocm", "bin", "ollama-runner"),
Accelerated: true,
},
}
}
// CheckVRAM returns the available VRAM in MiB on Linux machines with AMD GPUs
func CheckVRAM() (int64, error) {
rocmHome := os.Getenv("ROCM_PATH")
if rocmHome == "" {
rocmHome = os.Getenv("ROCM_HOME")
}
if rocmHome == "" {
log.Println("warning: ROCM_PATH is not set. Trying a likely fallback path, but it is recommended to set this variable in the environment.")
rocmHome = "/opt/rocm"
}
cmd := exec.Command(filepath.Join(rocmHome, "bin/rocm-smi"), "--showmeminfo", "VRAM", "--csv")
var stdout bytes.Buffer
cmd.Stdout = &stdout
err := cmd.Run()
if err != nil {
return 0, errNoAccel
}
csvData := csv.NewReader(&stdout)
// llama.cpp or ROCm don't seem to understand splitting the VRAM allocations across them properly, so try to find the biggest card instead :(. FIXME.
totalBiggestCard := int64(0)
bigCardName := ""
for {
record, err := csvData.Read()
if err == io.EOF {
break
}
if err != nil {
return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
}
if !strings.HasPrefix(record[0], "card") {
continue
}
cardTotal, err := strconv.ParseInt(record[1], 10, 64)
if err != nil {
return 0, err
}
cardUsed, err := strconv.ParseInt(record[2], 10, 64)
if err != nil {
return 0, err
}
possible := (cardTotal - cardUsed)
log.Printf("ROCm found %d MiB of available VRAM on device %q", possible/1024/1024, record[0])
if possible > totalBiggestCard {
totalBiggestCard = possible
bigCardName = record[0]
}
}
if totalBiggestCard == 0 {
log.Printf("found ROCm GPU but failed to parse free VRAM!")
return 0, errNoAccel
}
log.Printf("ROCm selecting device %q", bigCardName)
return totalBiggestCard, nil
}

View file

@ -0,0 +1,24 @@
//go:build cuda
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate rm -rf ggml/build/cuda
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/cuda --target server --config Release
//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
//go:generate rm -rf gguf/build/cuda
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
//go:generate cmake --build gguf/build/cuda --target server --config Release
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner

View file

@ -0,0 +1,25 @@
//go:build rocm
package llm
//go:generate git submodule init
//go:generate git submodule update --force ggml
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git submodule update --force gguf
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
//go:generate rm -rf ggml/build/rocm
//go:generate cmake -S ggml -B ggml/build/rocm -DLLAMA_CLBLAST=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
//go:generate cmake --build ggml/build/rocm --target server --config Release
//go:generate mv ggml/build/rocm/bin/server ggml/build/rocm/bin/ollama-runner
//go:generate rm -rf gguf/build/rocm
//go:generate cmake -S gguf -B gguf/build/rocm -DLLAMA_HIPBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102' -DGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102'
//go:generate cmake --build gguf/build/rocm --target server --config Release
//go:generate mv gguf/build/rocm/bin/server gguf/build/rocm/bin/ollama-runner