Merge branch 'main' into v0.2-wip
This commit is contained in:
commit
ac47d55577
9 changed files with 208 additions and 124 deletions
|
@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [0.1.79]
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- GGUF Support (breaking change requiring new model format)
|
||||||
|
|
||||||
## [0.1.78]
|
## [0.1.78]
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|
|
@ -17,6 +17,9 @@ This package provides:
|
||||||
|
|
||||||
Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest).
|
Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest).
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Starting with version 0.1.79 the model format has changed from `ggmlv3` to `gguf`. Old model files can be converted using the `convert-llama-ggmlv3-to-gguf.py` script in [`llama.cpp`](https://github.com/ggerganov/llama.cpp)
|
||||||
|
|
||||||
|
|
||||||
## Installation from PyPI (recommended)
|
## Installation from PyPI (recommended)
|
||||||
|
|
||||||
|
@ -201,7 +204,7 @@ This package is under active development and I welcome any contributions.
|
||||||
To get started, clone the repository and install the package in editable / development mode:
|
To get started, clone the repository and install the package in editable / development mode:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone --recurse-submodules git@github.com:abetlen/llama-cpp-python.git
|
git clone --recurse-submodules https://github.com/abetlen/llama-cpp-python.git
|
||||||
cd llama-cpp-python
|
cd llama-cpp-python
|
||||||
|
|
||||||
# Upgrade pip (required for editable mode)
|
# Upgrade pip (required for editable mode)
|
||||||
|
|
|
@ -1,37 +1,44 @@
|
||||||
# Install Docker Server
|
### Install Docker Server
|
||||||
|
> [!IMPORTANT]
|
||||||
**Note #1:** This was tested with Docker running on Linux. If you can get it working on Windows or MacOS, please update this `README.md` with a PR!
|
> This was tested with Docker running on Linux. <br>If you can get it working on Windows or MacOS, please update this `README.md` with a PR!<br>
|
||||||
|
|
||||||
[Install Docker Engine](https://docs.docker.com/engine/install)
|
[Install Docker Engine](https://docs.docker.com/engine/install)
|
||||||
|
|
||||||
**Note #2:** NVidia GPU CuBLAS support requires a NVidia GPU with sufficient VRAM (approximately as much as the size in the table below) and Docker NVidia support (see [container-toolkit/install-guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html))
|
|
||||||
|
|
||||||
# Simple Dockerfiles for building the llama-cpp-python server with external model bin files
|
## Simple Dockerfiles for building the llama-cpp-python server with external model bin files
|
||||||
## openblas_simple - a simple Dockerfile for non-GPU OpenBLAS, where the model is located outside the Docker image
|
### openblas_simple
|
||||||
|
A simple Dockerfile for non-GPU OpenBLAS, where the model is located outside the Docker image:
|
||||||
```
|
```
|
||||||
cd ./openblas_simple
|
cd ./openblas_simple
|
||||||
docker build -t openblas_simple .
|
docker build -t openblas_simple .
|
||||||
docker run -e USE_MLOCK=0 -e MODEL=/var/model/<model-path> -v <model-root-path>:/var/model -t openblas_simple
|
docker run --cap-add SYS_RESOURCE -e USE_MLOCK=0 -e MODEL=/var/model/<model-path> -v <model-root-path>:/var/model -t openblas_simple
|
||||||
```
|
```
|
||||||
where `<model-root-path>/<model-path>` is the full path to the model file on the Docker host system.
|
where `<model-root-path>/<model-path>` is the full path to the model file on the Docker host system.
|
||||||
|
|
||||||
## cuda_simple - a simple Dockerfile for CUDA accelerated CuBLAS, where the model is located outside the Docker image
|
### cuda_simple
|
||||||
|
> [!WARNING]
|
||||||
|
> Nvidia GPU CuBLAS support requires an Nvidia GPU with sufficient VRAM (approximately as much as the size in the table below) and Docker Nvidia support (see [container-toolkit/install-guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)) <br>
|
||||||
|
|
||||||
|
A simple Dockerfile for CUDA-accelerated CuBLAS, where the model is located outside the Docker image:
|
||||||
|
|
||||||
```
|
```
|
||||||
cd ./cuda_simple
|
cd ./cuda_simple
|
||||||
docker build -t cuda_simple .
|
docker build -t cuda_simple .
|
||||||
docker run -e USE_MLOCK=0 -e MODEL=/var/model/<model-path> -v <model-root-path>:/var/model -t cuda_simple
|
docker run --gpus=all --cap-add SYS_RESOURCE -e USE_MLOCK=0 -e MODEL=/var/model/<model-path> -v <model-root-path>:/var/model -t cuda_simple
|
||||||
```
|
```
|
||||||
where `<model-root-path>/<model-path>` is the full path to the model file on the Docker host system.
|
where `<model-root-path>/<model-path>` is the full path to the model file on the Docker host system.
|
||||||
|
|
||||||
# "Open-Llama-in-a-box"
|
--------------------------------------------------------------------------
|
||||||
## Download an Apache V2.0 licensed 3B paramter Open Llama model and install into a Docker image that runs an OpenBLAS-enabled llama-cpp-python server
|
|
||||||
|
### "Open-Llama-in-a-box"
|
||||||
|
Download an Apache V2.0 licensed 3B params Open LLaMA model and install into a Docker image that runs an OpenBLAS-enabled llama-cpp-python server:
|
||||||
```
|
```
|
||||||
$ cd ./open_llama
|
$ cd ./open_llama
|
||||||
./build.sh
|
./build.sh
|
||||||
./start.sh
|
./start.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
# Manually choose your own Llama model from Hugging Face
|
### Manually choose your own Llama model from Hugging Face
|
||||||
`python3 ./hug_model.py -a TheBloke -t llama`
|
`python3 ./hug_model.py -a TheBloke -t llama`
|
||||||
You should now have a model in the current directory and `model.bin` symlinked to it for the subsequent Docker build and copy step. e.g.
|
You should now have a model in the current directory and `model.bin` symlinked to it for the subsequent Docker build and copy step. e.g.
|
||||||
```
|
```
|
||||||
|
@ -39,8 +46,10 @@ docker $ ls -lh *.bin
|
||||||
-rw-rw-r-- 1 user user 4.8G May 23 18:30 <downloaded-model-file>q5_1.bin
|
-rw-rw-r-- 1 user user 4.8G May 23 18:30 <downloaded-model-file>q5_1.bin
|
||||||
lrwxrwxrwx 1 user user 24 May 23 18:30 model.bin -> <downloaded-model-file>q5_1.bin
|
lrwxrwxrwx 1 user user 24 May 23 18:30 model.bin -> <downloaded-model-file>q5_1.bin
|
||||||
```
|
```
|
||||||
**Note #1:** Make sure you have enough disk space to download the model. As the model is then copied into the image you will need at least
|
|
||||||
**TWICE** as much disk space as the size of the model:
|
> [!NOTE]
|
||||||
|
> Make sure you have enough disk space to download the model. As the model is then copied into the image you will need at least
|
||||||
|
**TWICE** as much disk space as the size of the model:<br>
|
||||||
|
|
||||||
| Model | Quantized size |
|
| Model | Quantized size |
|
||||||
|------:|----------------:|
|
|------:|----------------:|
|
||||||
|
@ -50,17 +59,6 @@ lrwxrwxrwx 1 user user 24 May 23 18:30 model.bin -> <downloaded-model-file>q5_
|
||||||
| 33B | 25 GB |
|
| 33B | 25 GB |
|
||||||
| 65B | 50 GB |
|
| 65B | 50 GB |
|
||||||
|
|
||||||
**Note #2:** If you want to pass or tune additional parameters, customise `./start_server.sh` before running `docker build ...`
|
|
||||||
|
|
||||||
## Use OpenBLAS
|
> [!NOTE]
|
||||||
Use if you don't have a NVidia GPU. Defaults to `python:3-slim-bullseye` Docker base image and OpenBLAS:
|
> If you want to pass or tune additional parameters, customise `./start_server.sh` before running `docker build ...`
|
||||||
### Build:
|
|
||||||
`docker build -t openblas .`
|
|
||||||
### Run:
|
|
||||||
`docker run --cap-add SYS_RESOURCE -t openblas`
|
|
||||||
|
|
||||||
## Use CuBLAS
|
|
||||||
### Build:
|
|
||||||
`docker build --build-arg IMAGE=nvidia/cuda:12.1.1-devel-ubuntu22.04 -t cublas .`
|
|
||||||
### Run:
|
|
||||||
`docker run --cap-add SYS_RESOURCE -t cublas`
|
|
||||||
|
|
|
@ -4,13 +4,24 @@ FROM nvidia/cuda:${CUDA_IMAGE}
|
||||||
# We need to set the host to 0.0.0.0 to allow outside access
|
# We need to set the host to 0.0.0.0 to allow outside access
|
||||||
ENV HOST 0.0.0.0
|
ENV HOST 0.0.0.0
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get upgrade -y \
|
||||||
|
&& apt-get install -y git build-essential \
|
||||||
|
python3 python3-pip gcc wget \
|
||||||
|
ocl-icd-opencl-dev opencl-headers clinfo \
|
||||||
|
libclblast-dev libopenblas-dev \
|
||||||
|
&& mkdir -p /etc/OpenCL/vendors && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Install the package
|
# setting build related env vars
|
||||||
RUN apt update && apt install -y python3 python3-pip
|
ENV CUDA_DOCKER_ARCH=all
|
||||||
|
ENV LLAMA_CUBLAS=1
|
||||||
|
|
||||||
|
# Install depencencies
|
||||||
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings
|
RUN python3 -m pip install --upgrade pip pytest cmake scikit-build setuptools fastapi uvicorn sse-starlette pydantic-settings
|
||||||
|
|
||||||
RUN LLAMA_CUBLAS=1 pip install llama-cpp-python
|
# Install llama-cpp-python (build with cuda)
|
||||||
|
RUN CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python
|
||||||
|
|
||||||
# Run the server
|
# Run the server
|
||||||
CMD python3 -m llama_cpp.server
|
CMD python3 -m llama_cpp.server
|
||||||
|
|
|
@ -234,7 +234,7 @@ class Llama:
|
||||||
rope_freq_scale: float = 1.0,
|
rope_freq_scale: float = 1.0,
|
||||||
n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b
|
n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b
|
||||||
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
|
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
|
||||||
mul_mat_q: Optional[bool] = None, # (TEMPORARY)
|
mul_mat_q: Optional[bool] = None,
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
):
|
):
|
||||||
"""Load a llama.cpp model from `model_path`.
|
"""Load a llama.cpp model from `model_path`.
|
||||||
|
@ -297,11 +297,6 @@ class Llama:
|
||||||
self.params.rope_freq_base = rope_freq_base
|
self.params.rope_freq_base = rope_freq_base
|
||||||
self.params.rope_freq_scale = rope_freq_scale
|
self.params.rope_freq_scale = rope_freq_scale
|
||||||
|
|
||||||
if n_gqa is not None:
|
|
||||||
self.params.n_gqa = n_gqa
|
|
||||||
|
|
||||||
if rms_norm_eps is not None:
|
|
||||||
self.params.rms_norm_eps = rms_norm_eps
|
|
||||||
|
|
||||||
if mul_mat_q is not None:
|
if mul_mat_q is not None:
|
||||||
self.params.mul_mat_q = mul_mat_q
|
self.params.mul_mat_q = mul_mat_q
|
||||||
|
@ -420,11 +415,11 @@ class Llama:
|
||||||
Returns:
|
Returns:
|
||||||
A list of tokens.
|
A list of tokens.
|
||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.model is not None
|
||||||
n_ctx = self._n_ctx
|
n_ctx = self._n_ctx
|
||||||
tokens = (llama_cpp.llama_token * n_ctx)()
|
tokens = (llama_cpp.llama_token * n_ctx)()
|
||||||
n_tokens = llama_cpp.llama_tokenize(
|
n_tokens = llama_cpp.llama_tokenize_with_model(
|
||||||
self.ctx,
|
self.model,
|
||||||
text,
|
text,
|
||||||
tokens,
|
tokens,
|
||||||
llama_cpp.c_int(n_ctx),
|
llama_cpp.c_int(n_ctx),
|
||||||
|
@ -433,8 +428,8 @@ class Llama:
|
||||||
if n_tokens < 0:
|
if n_tokens < 0:
|
||||||
n_tokens = abs(n_tokens)
|
n_tokens = abs(n_tokens)
|
||||||
tokens = (llama_cpp.llama_token * n_tokens)()
|
tokens = (llama_cpp.llama_token * n_tokens)()
|
||||||
n_tokens = llama_cpp.llama_tokenize(
|
n_tokens = llama_cpp.llama_tokenize_with_model(
|
||||||
self.ctx,
|
self.model,
|
||||||
text,
|
text,
|
||||||
tokens,
|
tokens,
|
||||||
llama_cpp.c_int(n_tokens),
|
llama_cpp.c_int(n_tokens),
|
||||||
|
@ -455,17 +450,19 @@ class Llama:
|
||||||
Returns:
|
Returns:
|
||||||
The detokenized string.
|
The detokenized string.
|
||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.model is not None
|
||||||
output = b""
|
output = b""
|
||||||
buffer_size = 32
|
size = 8
|
||||||
buffer = (ctypes.c_char * buffer_size)()
|
buffer = (ctypes.c_char * size)()
|
||||||
for token in tokens:
|
for token in tokens:
|
||||||
n = llama_cpp.llama_token_to_str(
|
n = llama_cpp.llama_token_to_str_with_model(
|
||||||
self.ctx, llama_cpp.llama_token(token), buffer, buffer_size
|
self.model, llama_cpp.llama_token(token), buffer, size
|
||||||
)
|
)
|
||||||
assert n <= buffer_size
|
assert n <= size
|
||||||
output += bytes(buffer[:n])
|
output += bytes(buffer[:n])
|
||||||
return output
|
# NOTE: Llama1 models automatically added a space at the start of the prompt
|
||||||
|
# this line removes a leading space if the first token is a beginning of sentence token
|
||||||
|
return output[1:] if len(tokens) > 0 and tokens[0] == self.token_bos() else output
|
||||||
|
|
||||||
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
||||||
"""Set the cache.
|
"""Set the cache.
|
||||||
|
@ -892,7 +889,7 @@ class Llama:
|
||||||
created: int = int(time.time())
|
created: int = int(time.time())
|
||||||
completion_tokens: List[int] = []
|
completion_tokens: List[int] = []
|
||||||
# Add blank space to start of prompt to match OG llama tokenizer
|
# Add blank space to start of prompt to match OG llama tokenizer
|
||||||
prompt_tokens: List[int] = self.tokenize(b" " + prompt.encode("utf-8"))
|
prompt_tokens: List[int] = self.tokenize(prompt.encode("utf-8")) if prompt != "" else [self.token_bos()]
|
||||||
text: bytes = b""
|
text: bytes = b""
|
||||||
returned_tokens: int = 0
|
returned_tokens: int = 0
|
||||||
stop = (
|
stop = (
|
||||||
|
@ -1590,13 +1587,7 @@ class Llama:
|
||||||
lora_base=self.lora_base,
|
lora_base=self.lora_base,
|
||||||
lora_path=self.lora_path,
|
lora_path=self.lora_path,
|
||||||
tensor_split=self.tensor_split,
|
tensor_split=self.tensor_split,
|
||||||
### TEMPORARY ###
|
mul_mat_q=self.params.mul_mat_q,
|
||||||
n_gqa=self.params.n_gqa,
|
|
||||||
rms_norm_eps=self.params.rms_norm_eps,
|
|
||||||
### TEMPORARY ###
|
|
||||||
### DEPRECATED ###
|
|
||||||
n_parts=self.n_parts,
|
|
||||||
### DEPRECATED ###
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
|
@ -1618,14 +1609,8 @@ class Llama:
|
||||||
lora_base=state["lora_base"],
|
lora_base=state["lora_base"],
|
||||||
lora_path=state["lora_path"],
|
lora_path=state["lora_path"],
|
||||||
tensor_split=state["tensor_split"],
|
tensor_split=state["tensor_split"],
|
||||||
|
mul_mat_q=state["mul_mat_q"],
|
||||||
verbose=state["verbose"],
|
verbose=state["verbose"],
|
||||||
### TEMPORARY ###
|
|
||||||
n_gqa=state["n_gqa"],
|
|
||||||
rms_norm_eps=state["rms_norm_eps"],
|
|
||||||
### TEMPORARY ###
|
|
||||||
### DEPRECATED ###
|
|
||||||
n_parts=state["n_parts"],
|
|
||||||
### DEPRECATED ###
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def save_state(self) -> LlamaState:
|
def save_state(self) -> LlamaState:
|
||||||
|
|
|
@ -531,6 +531,15 @@ _lib.llama_n_embd.argtypes = [llama_context_p]
|
||||||
_lib.llama_n_embd.restype = c_int
|
_lib.llama_n_embd.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx);
|
||||||
|
def llama_vocab_type(ctx: llama_context_p) -> int:
|
||||||
|
return _lib.llama_vocab_type(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_vocab_type.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_vocab_type.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
|
# LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
|
||||||
def llama_model_n_vocab(model: llama_model_p) -> int:
|
def llama_model_n_vocab(model: llama_model_p) -> int:
|
||||||
return _lib.llama_model_n_vocab(model)
|
return _lib.llama_model_n_vocab(model)
|
||||||
|
@ -559,13 +568,33 @@ _lib.llama_model_n_embd.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# // Get a string describing the model type
|
# // Get a string describing the model type
|
||||||
# LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size);
|
# LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||||
def llama_model_type(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int:
|
def llama_model_desc(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int:
|
||||||
return _lib.llama_model_type(model, buf, buf_size)
|
return _lib.llama_model_desc(model, buf, buf_size)
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_model_type.argtypes = [llama_model_p, c_char_p, c_size_t]
|
_lib.llama_model_desc.argtypes = [llama_model_p, c_char_p, c_size_t]
|
||||||
_lib.llama_model_type.restype = c_int
|
_lib.llama_model_desc.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# // Returns the total size of all the tensors in the model in bytes
|
||||||
|
# LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
||||||
|
def llama_model_size(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_size(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_size.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_size.restype = ctypes.c_uint64
|
||||||
|
|
||||||
|
|
||||||
|
# // Returns the total number of parameters in the model
|
||||||
|
# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
|
||||||
|
def llama_model_n_params(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_n_params(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_n_params.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_n_params.restype = ctypes.c_uint64
|
||||||
|
|
||||||
|
|
||||||
# // Returns 0 on success
|
# // Returns 0 on success
|
||||||
|
@ -849,7 +878,7 @@ _lib.llama_token_get_score.argtypes = [llama_context_p, llama_token]
|
||||||
_lib.llama_token_get_score.restype = c_float
|
_lib.llama_token_get_score.restype = c_float
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token);
|
# LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token);
|
||||||
def llama_token_get_type(ctx: llama_context_p, token: llama_token) -> int:
|
def llama_token_get_type(ctx: llama_context_p, token: llama_token) -> int:
|
||||||
return _lib.llama_token_get_type(ctx, token)
|
return _lib.llama_token_get_type(ctx, token)
|
||||||
|
|
||||||
|
@ -918,32 +947,6 @@ _lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int,
|
||||||
_lib.llama_tokenize.restype = c_int
|
_lib.llama_tokenize.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_tokenize_bpe(
|
|
||||||
# struct llama_context * ctx,
|
|
||||||
# const char * text,
|
|
||||||
# llama_token * tokens,
|
|
||||||
# int n_max_tokens,
|
|
||||||
# bool add_bos);
|
|
||||||
def llama_tokenize_bpe(
|
|
||||||
ctx: llama_context_p,
|
|
||||||
text: bytes,
|
|
||||||
tokens, # type: Array[llama_token]
|
|
||||||
n_max_tokens: c_int,
|
|
||||||
add_bos: c_bool,
|
|
||||||
) -> int:
|
|
||||||
return _lib.llama_tokenize_bpe(ctx, text, tokens, n_max_tokens, add_bos)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_tokenize_bpe.argtypes = [
|
|
||||||
llama_context_p,
|
|
||||||
c_char_p,
|
|
||||||
llama_token_p,
|
|
||||||
c_int,
|
|
||||||
c_bool,
|
|
||||||
]
|
|
||||||
_lib.llama_tokenize_bpe.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_tokenize_with_model(
|
# LLAMA_API int llama_tokenize_with_model(
|
||||||
# const struct llama_model * model,
|
# const struct llama_model * model,
|
||||||
# const char * text,
|
# const char * text,
|
||||||
|
@ -993,30 +996,24 @@ _lib.llama_tokenize_with_model.argtypes = [
|
||||||
_lib.llama_tokenize_with_model.restype = c_int
|
_lib.llama_tokenize_with_model.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_token_to_str_bpe(
|
# LLAMA_API int llama_token_to_str_with_model(
|
||||||
# const struct llama_context * ctx,
|
# const struct llama_model * model,
|
||||||
# llama_token token,
|
# llama_token token,
|
||||||
# char * buf,
|
# char * buf,
|
||||||
# int length);
|
# int length);
|
||||||
def llama_token_to_str_bpe(
|
def llama_token_to_str_with_model(
|
||||||
ctx: llama_context_p, token: llama_token, buf: bytes, length: c_int
|
model: llama_model_p, token: llama_token, buf: bytes, length: c_int
|
||||||
) -> int:
|
) -> int:
|
||||||
return _lib.llama_token_to_str_bpe(ctx, token, buf, length)
|
return _lib.llama_token_to_str_with_model(model, token, buf, length)
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_to_str_bpe.argtypes = [llama_context_p, llama_token, c_char_p, c_int]
|
_lib.llama_token_to_str_with_model.argtypes = [
|
||||||
_lib.llama_token_to_str_bpe.restype = c_int
|
llama_model_p,
|
||||||
|
llama_token,
|
||||||
|
c_char_p,
|
||||||
# LLAMA_API const char * llama_token_to_str_with_model(
|
c_int,
|
||||||
# const struct llama_model * model,
|
]
|
||||||
# llama_token token);
|
_lib.llama_token_to_str_with_model.restype = c_int
|
||||||
def llama_token_to_str_with_model(model: llama_model_p, token: llama_token) -> bytes:
|
|
||||||
return _lib.llama_token_to_str_with_model(model, token)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_to_str_with_model.argtypes = [llama_model_p, llama_token]
|
|
||||||
_lib.llama_token_to_str_with_model.restype = c_char_p
|
|
||||||
|
|
||||||
|
|
||||||
# //
|
# //
|
||||||
|
@ -1052,6 +1049,74 @@ def llama_grammar_free(grammar: llama_grammar_p):
|
||||||
_lib.llama_grammar_free.argtypes = [llama_grammar_p]
|
_lib.llama_grammar_free.argtypes = [llama_grammar_p]
|
||||||
_lib.llama_grammar_free.restype = None
|
_lib.llama_grammar_free.restype = None
|
||||||
|
|
||||||
|
# //
|
||||||
|
# // Beam search
|
||||||
|
# //
|
||||||
|
|
||||||
|
|
||||||
|
# struct llama_beam_view {
|
||||||
|
# const llama_token * tokens;
|
||||||
|
# size_t n_tokens;
|
||||||
|
# float p; // Cumulative beam probability (renormalized relative to all beams)
|
||||||
|
# bool eob; // Callback should set this to true when a beam is at end-of-beam.
|
||||||
|
# };
|
||||||
|
class llama_beam_view(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("tokens", llama_token_p),
|
||||||
|
("n_tokens", c_size_t),
|
||||||
|
("p", c_float),
|
||||||
|
("eob", c_bool),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# // Passed to beam_search_callback function.
|
||||||
|
# // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams
|
||||||
|
# // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks.
|
||||||
|
# // These pointers are valid only during the synchronous callback, so should not be saved.
|
||||||
|
# struct llama_beams_state {
|
||||||
|
# struct llama_beam_view * beam_views;
|
||||||
|
# size_t n_beams; // Number of elements in beam_views[].
|
||||||
|
# size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
|
||||||
|
# bool last_call; // True iff this is the last callback invocation.
|
||||||
|
# };
|
||||||
|
class llama_beams_state(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("beam_views", POINTER(llama_beam_view)),
|
||||||
|
("n_beams", c_size_t),
|
||||||
|
("common_prefix_length", c_size_t),
|
||||||
|
("last_call", c_bool),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# // Type of pointer to the beam_search_callback function.
|
||||||
|
# // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
|
||||||
|
# // passed back to beam_search_callback. This avoids having to use global variables in the callback.
|
||||||
|
# typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, llama_beams_state);
|
||||||
|
llama_beam_search_callback_fn_t = ctypes.CFUNCTYPE(None, c_void_p, llama_beams_state)
|
||||||
|
|
||||||
|
|
||||||
|
# /// @details Deterministically returns entire sentence constructed by a beam search.
|
||||||
|
# /// @param ctx Pointer to the llama_context.
|
||||||
|
# /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state.
|
||||||
|
# /// @param callback_data A pointer that is simply passed back to callback.
|
||||||
|
# /// @param n_beams Number of beams to use.
|
||||||
|
# /// @param n_past Number of tokens already evaluated.
|
||||||
|
# /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
|
||||||
|
# /// @param n_threads Number of threads as passed to llama_eval().
|
||||||
|
# LLAMA_API void llama_beam_search(struct llama_context * ctx, llama_beam_search_callback_fn_t callback, void * callback_data, size_t n_beams, int n_past, int n_predict, int n_threads);
|
||||||
|
def llama_beam_search(
|
||||||
|
ctx: llama_context_p,
|
||||||
|
callback: "ctypes._CFuncPtr[None, c_void_p, llama_beams_state]", # type: ignore
|
||||||
|
callback_data: c_void_p,
|
||||||
|
n_beams: c_size_t,
|
||||||
|
n_past: c_int,
|
||||||
|
n_predict: c_int,
|
||||||
|
n_threads: c_int,
|
||||||
|
):
|
||||||
|
return _lib.llama_beam_search(
|
||||||
|
ctx, callback, callback_data, n_beams, n_past, n_predict, n_threads
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# //
|
# //
|
||||||
# // Sampling functions
|
# // Sampling functions
|
||||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build"
|
||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "llama_cpp_python"
|
name = "llama_cpp_python"
|
||||||
version = "0.1.78"
|
version = "0.1.79"
|
||||||
description = "Python bindings for the llama.cpp library"
|
description = "Python bindings for the llama.cpp library"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
license = { text = "MIT" }
|
license = { text = "MIT" }
|
||||||
|
@ -51,3 +51,7 @@ cmake.verbose = true
|
||||||
[project.urls]
|
[project.urls]
|
||||||
Homepage = "https://github.com/abetlen/llama-cpp-python"
|
Homepage = "https://github.com/abetlen/llama-cpp-python"
|
||||||
Issues = "https://github.com/abetlen/llama-cpp-python/issues"
|
Issues = "https://github.com/abetlen/llama-cpp-python/issues"
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
addopts = "--ignore=vendor"
|
||||||
|
|
||||||
|
|
|
@ -1,20 +1,32 @@
|
||||||
|
import pytest
|
||||||
import llama_cpp
|
import llama_cpp
|
||||||
|
|
||||||
MODEL = "./vendor/llama.cpp/models/ggml-vocab.bin"
|
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf"
|
||||||
|
|
||||||
|
|
||||||
def test_llama():
|
def test_llama_cpp_tokenization():
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
|
||||||
|
|
||||||
assert llama
|
assert llama
|
||||||
assert llama.ctx is not None
|
assert llama.ctx is not None
|
||||||
|
|
||||||
text = b"Hello World"
|
text = b"Hello World"
|
||||||
|
|
||||||
assert llama.detokenize(llama.tokenize(text)) == text
|
tokens = llama.tokenize(text)
|
||||||
|
assert tokens[0] == llama.token_bos()
|
||||||
|
assert tokens == [1, 15043, 2787]
|
||||||
|
detokenized = llama.detokenize(tokens)
|
||||||
|
assert detokenized == text
|
||||||
|
|
||||||
|
tokens = llama.tokenize(text, add_bos=False)
|
||||||
|
assert tokens[0] != llama.token_bos()
|
||||||
|
assert tokens == [15043, 2787]
|
||||||
|
|
||||||
|
detokenized = llama.detokenize(tokens)
|
||||||
|
assert detokenized != text
|
||||||
|
|
||||||
|
|
||||||
# @pytest.mark.skip(reason="need to update sample mocking")
|
@pytest.mark.skip(reason="bug in tokenization where leading space is always inserted even if not after eos")
|
||||||
def test_llama_patch(monkeypatch):
|
def test_llama_patch(monkeypatch):
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
||||||
n_vocab = llama_cpp.llama_n_vocab(llama.ctx)
|
n_vocab = llama_cpp.llama_n_vocab(llama.ctx)
|
||||||
|
|
2
vendor/llama.cpp
vendored
2
vendor/llama.cpp
vendored
|
@ -1 +1 @@
|
||||||
Subproject commit f5fe98d11bdf9e7797bcfb05c0c3601ffc4b9d26
|
Subproject commit 232caf3c1581a6cb023571780ff41dc2d66d1ca0
|
Loading…
Reference in a new issue