Merge branch 'main' of github.com:abetlen/llama-cpp-python into better-server-params-and-fields

This commit is contained in:
Lucas Doyle 2023-05-03 13:10:03 -07:00
commit 3008a954c1
11 changed files with 268 additions and 16 deletions

166
.dockerignore Normal file
View file

@ -0,0 +1,166 @@
_skbuild/
.envrc
models/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/

39
.github/workflows/build-docker.yaml vendored Normal file
View file

@ -0,0 +1,39 @@
name: Build Docker
on: workflow_dispatch
permissions:
contents: write
packages: write
jobs:
docker:
name: Build and push Docker image
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
submodules: "true"
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push
uses: docker/build-push-action@v4
with:
context: .
push: true # push to registry
pull: true # always fetch the latest base images
platforms: linux/amd64,linux/arm64 # build for both amd64 and arm64
tags: ghcr.io/abetlen/llama-cpp-python:latest

15
Dockerfile Normal file
View file

@ -0,0 +1,15 @@
FROM python:3-bullseye
# We need to set the host to 0.0.0.0 to allow outside access
ENV HOST 0.0.0.0
COPY . .
# Install the package
RUN apt update && apt install -y libopenblas-dev
RUN python -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette
RUN LLAMA_OPENBLAS=1 python3 setup.py develop
# Run the server
CMD python3 -m llama_cpp.server

15
Dockerfile.cuda Normal file
View file

@ -0,0 +1,15 @@
FROM nvidia/cuda:12.1.1-devel-ubuntu20.04
# We need to set the host to 0.0.0.0 to allow outside access
ENV HOST 0.0.0.0
COPY . .
# Install the package
RUN apt update && apt install -y python3 python3-pip
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette
RUN LLAMA_CUBLAS=1 python3 setup.py develop
# Run the server
CMD python3 -m llama_cpp.server

View file

@ -72,6 +72,14 @@ python3 -m llama_cpp.server
Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation. Navigate to [http://localhost:8000/docs](http://localhost:8000/docs) to see the OpenAPI documentation.
## Docker image
A Docker image is available on [GHCR](https://ghcr.io/abetlen/llama-cpp-python). To run the server:
```bash
docker run --rm -it -p8000:8000 -v /path/to/models:/models -eMODEL=/models/ggml-model-name.bin ghcr.io/abetlen/llama-cpp-python:latest
```
## Low-level API ## Low-level API
The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`. The low-level API is a direct `ctypes` binding to the C API provided by `llama.cpp`.

View file

@ -53,12 +53,14 @@ class LlamaState:
def __init__( def __init__(
self, self,
eval_tokens: Deque[llama_cpp.llama_token], eval_tokens: Deque[llama_cpp.llama_token],
eval_logits: Deque[List[float]], eval_logits: Deque[List[llama_cpp.c_float]],
llama_state, llama_state,
llama_state_size: llama_cpp.c_size_t,
): ):
self.eval_tokens = eval_tokens self.eval_tokens = eval_tokens
self.eval_logits = eval_logits self.eval_logits = eval_logits
self.llama_state = llama_state self.llama_state = llama_state
self.llama_state_size = llama_state_size
class Llama: class Llama:
@ -394,7 +396,7 @@ class Llama:
and tuple(self.eval_tokens) == tuple(tokens[: len(self.eval_tokens)]) and tuple(self.eval_tokens) == tuple(tokens[: len(self.eval_tokens)])
): ):
if self.verbose: if self.verbose:
print("generate cache hit", file=sys.stderr) print("Llama.generate: cache hit", file=sys.stderr)
reset = False reset = False
tokens = tokens[len(self.eval_tokens) :] tokens = tokens[len(self.eval_tokens) :]
@ -516,7 +518,7 @@ class Llama:
if self.cache and prompt_tokens in self.cache: if self.cache and prompt_tokens in self.cache:
if self.verbose: if self.verbose:
print("cache hit", file=sys.stderr) print("Llama._create_completion: cache hit", file=sys.stderr)
self.load_state(self.cache[prompt_tokens]) self.load_state(self.cache[prompt_tokens])
finish_reason = "length" finish_reason = "length"
@ -536,7 +538,7 @@ class Llama:
if self.cache and len(completion_tokens) == 0: if self.cache and len(completion_tokens) == 0:
if prompt_tokens not in self.cache: if prompt_tokens not in self.cache:
if self.verbose: if self.verbose:
print("cache miss", file=sys.stderr) print("Llama._create_completion: cache miss", file=sys.stderr)
self.cache[prompt_tokens] = self.save_state() self.cache[prompt_tokens] = self.save_state()
completion_tokens.append(token) completion_tokens.append(token)
@ -950,19 +952,25 @@ class Llama:
assert self.ctx is not None assert self.ctx is not None
state_size = llama_cpp.llama_get_state_size(self.ctx) state_size = llama_cpp.llama_get_state_size(self.ctx)
llama_state = (llama_cpp.c_uint8 * int(state_size))() llama_state = (llama_cpp.c_uint8 * int(state_size))()
if llama_cpp.llama_copy_state_data(self.ctx, llama_state) != state_size: n_bytes = llama_cpp.llama_copy_state_data(self.ctx, llama_state)
if int(n_bytes) > int(state_size):
raise RuntimeError("Failed to copy llama state data") raise RuntimeError("Failed to copy llama state data")
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
llama_cpp.ctypes.memmove(llama_state_compact, llama_state, int(n_bytes))
if self.verbose:
print(f"Llama.save_state: saving {n_bytes} bytes of llama state", file=sys.stderr)
return LlamaState( return LlamaState(
eval_tokens=self.eval_tokens.copy(), eval_tokens=self.eval_tokens.copy(),
eval_logits=self.eval_logits.copy(), eval_logits=self.eval_logits.copy(),
llama_state=llama_state, llama_state=llama_state_compact,
llama_state_size=n_bytes,
) )
def load_state(self, state: LlamaState) -> None: def load_state(self, state: LlamaState) -> None:
assert self.ctx is not None assert self.ctx is not None
self.eval_tokens = state.eval_tokens.copy() self.eval_tokens = state.eval_tokens.copy()
self.eval_logits = state.eval_logits.copy() self.eval_logits = state.eval_logits.copy()
state_size = llama_cpp.llama_get_state_size(self.ctx) state_size = state.llama_state_size
if llama_cpp.llama_set_state_data(self.ctx, state.llama_state) != state_size: if llama_cpp.llama_set_state_data(self.ctx, state.llama_state) != state_size:
raise RuntimeError("Failed to set llama state data") raise RuntimeError("Failed to set llama state data")

View file

@ -71,7 +71,7 @@ LLAMA_FILE_VERSION = ctypes.c_int(1)
LLAMA_FILE_MAGIC = b"ggjt" LLAMA_FILE_MAGIC = b"ggjt"
LLAMA_FILE_MAGIC_UNVERSIONED = b"ggml" LLAMA_FILE_MAGIC_UNVERSIONED = b"ggml"
LLAMA_SESSION_MAGIC = b"ggsn" LLAMA_SESSION_MAGIC = b"ggsn"
LLAMA_SESSION_VERSION = ctypes.c_int(0) LLAMA_SESSION_VERSION = ctypes.c_int(1)
llama_context_p = c_void_p llama_context_p = c_void_p
@ -136,9 +136,9 @@ LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = ctypes.c_int(
) # tok_embeddings.weight and output.weight are F16 ) # tok_embeddings.weight and output.weight are F16
LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors
# LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors # LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors
LLAMA_FTYPE_MOSTYL_Q8_0 = ctypes.c_int(7) # except 1d tensors LLAMA_FTYPE_MOSTLY_Q8_0 = ctypes.c_int(7) # except 1d tensors
LLAMA_FTYPE_MOSTYL_Q5_0 = ctypes.c_int(8) # except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_0 = ctypes.c_int(8) # except 1d tensors
LLAMA_FTYPE_MOSTYL_Q5_1 = ctypes.c_int(9) # except 1d tensors LLAMA_FTYPE_MOSTLY_Q5_1 = ctypes.c_int(9) # except 1d tensors
# Functions # Functions
@ -239,7 +239,8 @@ _lib.llama_set_rng_seed.argtypes = [llama_context_p, c_int]
_lib.llama_set_rng_seed.restype = None _lib.llama_set_rng_seed.restype = None
# Returns the size in bytes of the state (rng, logits, embedding and kv_cache) # Returns the maximum size in bytes of the state (rng, logits, embedding
# and kv_cache) - will often be smaller after compacting tokens
def llama_get_state_size(ctx: llama_context_p) -> c_size_t: def llama_get_state_size(ctx: llama_context_p) -> c_size_t:
return _lib.llama_get_state_size(ctx) return _lib.llama_get_state_size(ctx)

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "llama_cpp_python" name = "llama_cpp_python"
version = "0.1.40" version = "0.1.41"
description = "Python bindings for the llama.cpp library" description = "Python bindings for the llama.cpp library"
authors = ["Andrei Betlen <abetlen@gmail.com>"] authors = ["Andrei Betlen <abetlen@gmail.com>"]
license = "MIT" license = "MIT"

View file

@ -10,7 +10,7 @@ setup(
description="A Python wrapper for llama.cpp", description="A Python wrapper for llama.cpp",
long_description=long_description, long_description=long_description,
long_description_content_type="text/markdown", long_description_content_type="text/markdown",
version="0.1.40", version="0.1.41",
author="Andrei Betlen", author="Andrei Betlen",
author_email="abetlen@gmail.com", author_email="abetlen@gmail.com",
license="MIT", license="MIT",

2
vendor/llama.cpp vendored

@ -1 +1 @@
Subproject commit 58b367c2d757c0ea12aec672382462b42204c724 Subproject commit e216aa04633892b972d013719e38b59fd4917341