diff --git a/docker/README.md b/docker/README.md index 100bcbd..130d180 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,10 +1,21 @@ -# Dockerfiles for building the llama-cpp-python server -- `Dockerfile.openblas_simple` - a simple Dockerfile for non-GPU OpenBLAS -- `Dockerfile.cuda_simple` - a simple Dockerfile for CUDA accelerated CuBLAS -- `hug_model.py` - a Python utility for interactively choosing and downloading the latest `5_1` quantized models from [huggingface.co/TheBloke]( https://huggingface.co/TheBloke) +# Simple Dockerfiles for building the llama-cpp-python server with external model bin files +- `./openblas_simple/Dockerfile` - a simple Dockerfile for non-GPU OpenBLAS, where the model is located outside the Docker image + - `cd ./openblas_simple` + - `docker build -t openblas_simple .` + - `docker run -e USE_MLOCK=0 -e MODEL=/var/model/ -v :/var/model -t openblas_simple` + where `/` is the full path to the model file on the Docker host system. +- `./cuda_simple/Dockerfile` - a simple Dockerfile for CUDA accelerated CuBLAS, where the model is located outside the Docker image + - `cd ./cuda_simple` + - `docker build -t cuda_simple .` + - `docker run -e USE_MLOCK=0 -e MODEL=/var/model/ -v :/var/model -t cuda_simple` + where `/` is the full path to the model file on the Docker host system. + +# "Bot-in-a-box" - a method to build a Docker image by choosing a model to be downloaded and loading into a Docker image + - `cd ./auto_docker`: + - `hug_model.py` - a Python utility for interactively choosing and downloading the latest `5_1` quantized models from [huggingface.co/TheBloke]( https://huggingface.co/TheBloke) - `Dockerfile` - a single OpenBLAS and CuBLAS combined Dockerfile that automatically installs a previously downloaded model `model.bin` -# Get model from Hugging Face +## Get model from Hugging Face `python3 ./hug_model.py` You should now have a model in the current directory and `model.bin` symlinked to it for the subsequent Docker build and copy step. e.g. diff --git a/docker/Dockerfile b/docker/auto_docker/Dockerfile similarity index 100% rename from docker/Dockerfile rename to docker/auto_docker/Dockerfile diff --git a/docker/hug_model.py b/docker/auto_docker/hug_model.py similarity index 100% rename from docker/hug_model.py rename to docker/auto_docker/hug_model.py diff --git a/docker/start_server.sh b/docker/auto_docker/start_server.sh similarity index 100% rename from docker/start_server.sh rename to docker/auto_docker/start_server.sh diff --git a/docker/Dockerfile.cuda_simple b/docker/cuda_simple/Dockerfile similarity index 82% rename from docker/Dockerfile.cuda_simple rename to docker/cuda_simple/Dockerfile index dda7a9f..24906d5 100644 --- a/docker/Dockerfile.cuda_simple +++ b/docker/cuda_simple/Dockerfile @@ -1,5 +1,5 @@ ARG CUDA_IMAGE="12.1.1-devel-ubuntu22.04" -FROM ${CUDA_IMAGE} +FROM nvidia/cuda:${CUDA_IMAGE} # We need to set the host to 0.0.0.0 to allow outside access ENV HOST 0.0.0.0 @@ -10,7 +10,7 @@ COPY . . RUN apt update && apt install -y python3 python3-pip RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette -RUN LLAMA_CUBLAS=1 python3 setup.py develop +RUN LLAMA_CUBLAS=1 pip install llama-cpp-python # Run the server CMD python3 -m llama_cpp.server diff --git a/docker/Dockerfile.openblas_simple b/docker/openblas_simple/Dockerfile similarity index 86% rename from docker/Dockerfile.openblas_simple rename to docker/openblas_simple/Dockerfile index f58506f..1a95cae 100644 --- a/docker/Dockerfile.openblas_simple +++ b/docker/openblas_simple/Dockerfile @@ -9,7 +9,7 @@ COPY . . RUN apt update && apt install -y libopenblas-dev ninja-build build-essential RUN python -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette -RUN LLAMA_OPENBLAS=1 python3 setup.py develop +RUN LLAMA_OPENBLAS=1 pip install llama_cpp_python --verbose # Run the server CMD python3 -m llama_cpp.server