Merge pull request #633 from abetlen/gguf
GGUF (Breaking Change to Model Files)
This commit is contained in:
commit
915bbeacc5
6 changed files with 510 additions and 362 deletions
|
@ -17,6 +17,9 @@ This package provides:
|
||||||
|
|
||||||
Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest).
|
Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest).
|
||||||
|
|
||||||
|
> [!WARNING]
|
||||||
|
> Starting with version 0.1.79 the model format has changed from `ggmlv3` to `gguf`. Old model files can be converted using the `convert-llama-ggmlv3-to-gguf.py` script in [`llama.cpp`](https://github.com/ggerganov/llama.cpp)
|
||||||
|
|
||||||
|
|
||||||
## Installation from PyPI (recommended)
|
## Installation from PyPI (recommended)
|
||||||
|
|
||||||
|
|
|
@ -228,7 +228,7 @@ class Llama:
|
||||||
rope_freq_scale: float = 1.0,
|
rope_freq_scale: float = 1.0,
|
||||||
n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b
|
n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b
|
||||||
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
|
rms_norm_eps: Optional[float] = None, # (TEMPORARY)
|
||||||
mul_mat_q: Optional[bool] = None, # (TEMPORARY)
|
mul_mat_q: Optional[bool] = None,
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
):
|
):
|
||||||
"""Load a llama.cpp model from `model_path`.
|
"""Load a llama.cpp model from `model_path`.
|
||||||
|
@ -290,11 +290,6 @@ class Llama:
|
||||||
self.params.rope_freq_base = rope_freq_base
|
self.params.rope_freq_base = rope_freq_base
|
||||||
self.params.rope_freq_scale = rope_freq_scale
|
self.params.rope_freq_scale = rope_freq_scale
|
||||||
|
|
||||||
if n_gqa is not None:
|
|
||||||
self.params.n_gqa = n_gqa
|
|
||||||
|
|
||||||
if rms_norm_eps is not None:
|
|
||||||
self.params.rms_norm_eps = rms_norm_eps
|
|
||||||
|
|
||||||
if mul_mat_q is not None:
|
if mul_mat_q is not None:
|
||||||
self.params.mul_mat_q = mul_mat_q
|
self.params.mul_mat_q = mul_mat_q
|
||||||
|
@ -371,8 +366,8 @@ class Llama:
|
||||||
sorted=sorted,
|
sorted=sorted,
|
||||||
)
|
)
|
||||||
self._candidates = candidates
|
self._candidates = candidates
|
||||||
self._token_nl = Llama.token_nl()
|
self._token_nl = self.token_nl()
|
||||||
self._token_eos = Llama.token_eos()
|
self._token_eos = self.token_eos()
|
||||||
self._candidates_data_id = np.arange(self._n_vocab, dtype=np.intc) # type: ignore
|
self._candidates_data_id = np.arange(self._n_vocab, dtype=np.intc) # type: ignore
|
||||||
self._candidates_data_p = np.zeros(self._n_vocab, dtype=np.single)
|
self._candidates_data_p = np.zeros(self._n_vocab, dtype=np.single)
|
||||||
|
|
||||||
|
@ -413,11 +408,11 @@ class Llama:
|
||||||
Returns:
|
Returns:
|
||||||
A list of tokens.
|
A list of tokens.
|
||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.model is not None
|
||||||
n_ctx = self._n_ctx
|
n_ctx = self._n_ctx
|
||||||
tokens = (llama_cpp.llama_token * n_ctx)()
|
tokens = (llama_cpp.llama_token * n_ctx)()
|
||||||
n_tokens = llama_cpp.llama_tokenize(
|
n_tokens = llama_cpp.llama_tokenize_with_model(
|
||||||
self.ctx,
|
self.model,
|
||||||
text,
|
text,
|
||||||
tokens,
|
tokens,
|
||||||
llama_cpp.c_int(n_ctx),
|
llama_cpp.c_int(n_ctx),
|
||||||
|
@ -426,8 +421,8 @@ class Llama:
|
||||||
if n_tokens < 0:
|
if n_tokens < 0:
|
||||||
n_tokens = abs(n_tokens)
|
n_tokens = abs(n_tokens)
|
||||||
tokens = (llama_cpp.llama_token * n_tokens)()
|
tokens = (llama_cpp.llama_token * n_tokens)()
|
||||||
n_tokens = llama_cpp.llama_tokenize(
|
n_tokens = llama_cpp.llama_tokenize_with_model(
|
||||||
self.ctx,
|
self.model,
|
||||||
text,
|
text,
|
||||||
tokens,
|
tokens,
|
||||||
llama_cpp.c_int(n_tokens),
|
llama_cpp.c_int(n_tokens),
|
||||||
|
@ -448,13 +443,19 @@ class Llama:
|
||||||
Returns:
|
Returns:
|
||||||
The detokenized string.
|
The detokenized string.
|
||||||
"""
|
"""
|
||||||
assert self.ctx is not None
|
assert self.model is not None
|
||||||
output = b""
|
output = b""
|
||||||
|
size = 8
|
||||||
|
buffer = (ctypes.c_char * size)()
|
||||||
for token in tokens:
|
for token in tokens:
|
||||||
output += llama_cpp.llama_token_to_str(
|
n = llama_cpp.llama_token_to_str_with_model(
|
||||||
self.ctx, llama_cpp.llama_token(token)
|
self.model, llama_cpp.llama_token(token), buffer, size
|
||||||
)
|
)
|
||||||
return output
|
assert n <= size
|
||||||
|
output += bytes(buffer[:n])
|
||||||
|
# NOTE: Llama1 models automatically added a space at the start of the prompt
|
||||||
|
# this line removes a leading space if the first token is a beginning of sentence token
|
||||||
|
return output[1:] if len(tokens) > 0 and tokens[0] == self.token_bos() else output
|
||||||
|
|
||||||
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
||||||
"""Set the cache.
|
"""Set the cache.
|
||||||
|
@ -885,7 +886,7 @@ class Llama:
|
||||||
created: int = int(time.time())
|
created: int = int(time.time())
|
||||||
completion_tokens: List[int] = []
|
completion_tokens: List[int] = []
|
||||||
# Add blank space to start of prompt to match OG llama tokenizer
|
# Add blank space to start of prompt to match OG llama tokenizer
|
||||||
prompt_tokens: List[int] = self.tokenize(b" " + prompt.encode("utf-8"))
|
prompt_tokens: List[int] = self.tokenize(prompt.encode("utf-8")) if prompt != "" else [self.token_bos()]
|
||||||
text: bytes = b""
|
text: bytes = b""
|
||||||
returned_tokens: int = 0
|
returned_tokens: int = 0
|
||||||
stop = (
|
stop = (
|
||||||
|
@ -1581,13 +1582,7 @@ class Llama:
|
||||||
lora_base=self.lora_base,
|
lora_base=self.lora_base,
|
||||||
lora_path=self.lora_path,
|
lora_path=self.lora_path,
|
||||||
tensor_split=self.tensor_split,
|
tensor_split=self.tensor_split,
|
||||||
### TEMPORARY ###
|
mul_mat_q=self.params.mul_mat_q,
|
||||||
n_gqa=self.params.n_gqa,
|
|
||||||
rms_norm_eps=self.params.rms_norm_eps,
|
|
||||||
### TEMPORARY ###
|
|
||||||
### DEPRECATED ###
|
|
||||||
n_parts=self.n_parts,
|
|
||||||
### DEPRECATED ###
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def __setstate__(self, state):
|
def __setstate__(self, state):
|
||||||
|
@ -1609,14 +1604,8 @@ class Llama:
|
||||||
lora_base=state["lora_base"],
|
lora_base=state["lora_base"],
|
||||||
lora_path=state["lora_path"],
|
lora_path=state["lora_path"],
|
||||||
tensor_split=state["tensor_split"],
|
tensor_split=state["tensor_split"],
|
||||||
|
mul_mat_q=state["mul_mat_q"],
|
||||||
verbose=state["verbose"],
|
verbose=state["verbose"],
|
||||||
### TEMPORARY ###
|
|
||||||
n_gqa=state["n_gqa"],
|
|
||||||
rms_norm_eps=state["rms_norm_eps"],
|
|
||||||
### TEMPORARY ###
|
|
||||||
### DEPRECATED ###
|
|
||||||
n_parts=state["n_parts"],
|
|
||||||
### DEPRECATED ###
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def save_state(self) -> LlamaState:
|
def save_state(self) -> LlamaState:
|
||||||
|
@ -1681,20 +1670,20 @@ class Llama:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
return LlamaTokenizer(self)
|
return LlamaTokenizer(self)
|
||||||
|
|
||||||
@staticmethod
|
def token_eos(self) -> int:
|
||||||
def token_eos() -> int:
|
|
||||||
"""Return the end-of-sequence token."""
|
"""Return the end-of-sequence token."""
|
||||||
return llama_cpp.llama_token_eos()
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_token_eos(self.ctx)
|
||||||
|
|
||||||
@staticmethod
|
def token_bos(self) -> int:
|
||||||
def token_bos() -> int:
|
|
||||||
"""Return the beginning-of-sequence token."""
|
"""Return the beginning-of-sequence token."""
|
||||||
return llama_cpp.llama_token_bos()
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_token_bos(self.ctx)
|
||||||
|
|
||||||
@staticmethod
|
def token_nl(self) -> int:
|
||||||
def token_nl() -> int:
|
|
||||||
"""Return the newline token."""
|
"""Return the newline token."""
|
||||||
return llama_cpp.llama_token_nl()
|
assert self.ctx is not None
|
||||||
|
return llama_cpp.llama_token_nl(self.ctx)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def logits_to_logprobs(logits: List[float]) -> List[float]:
|
def logits_to_logprobs(logits: List[float]) -> List[float]:
|
||||||
|
|
|
@ -90,26 +90,17 @@ GGML_USE_CUBLAS = hasattr(_lib, "ggml_init_cublas")
|
||||||
GGML_CUDA_MAX_DEVICES = ctypes.c_int(16)
|
GGML_CUDA_MAX_DEVICES = ctypes.c_int(16)
|
||||||
LLAMA_MAX_DEVICES = GGML_CUDA_MAX_DEVICES if GGML_USE_CUBLAS else ctypes.c_int(1)
|
LLAMA_MAX_DEVICES = GGML_CUDA_MAX_DEVICES if GGML_USE_CUBLAS else ctypes.c_int(1)
|
||||||
|
|
||||||
# #define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
|
# define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
||||||
LLAMA_FILE_MAGIC_GGJT = ctypes.c_uint(0x67676A74)
|
LLAMA_DEFAULT_SEED = ctypes.c_int(0xFFFFFFFF)
|
||||||
# #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
|
|
||||||
LLAMA_FILE_MAGIC_GGLA = ctypes.c_uint(0x67676C61)
|
# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
||||||
# #define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
|
|
||||||
LLAMA_FILE_MAGIC_GGMF = ctypes.c_uint(0x67676D66)
|
|
||||||
# #define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
|
|
||||||
LLAMA_FILE_MAGIC_GGML = ctypes.c_uint(0x67676D6C)
|
|
||||||
# #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
|
||||||
LLAMA_FILE_MAGIC_GGSN = ctypes.c_uint(0x6767736E)
|
LLAMA_FILE_MAGIC_GGSN = ctypes.c_uint(0x6767736E)
|
||||||
|
|
||||||
# #define LLAMA_FILE_VERSION 3
|
# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||||
LLAMA_FILE_VERSION = c_int(3)
|
|
||||||
LLAMA_FILE_MAGIC = LLAMA_FILE_MAGIC_GGJT
|
|
||||||
LLAMA_FILE_MAGIC_UNVERSIONED = LLAMA_FILE_MAGIC_GGML
|
|
||||||
LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN
|
LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN
|
||||||
LLAMA_SESSION_VERSION = c_int(1)
|
# define LLAMA_SESSION_VERSION 1
|
||||||
|
LLAMA_SESSION_VERSION = ctypes.c_int(1)
|
||||||
|
|
||||||
# #define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
|
||||||
LLAMA_DEFAULT_SEED = c_int(0xFFFFFFFF)
|
|
||||||
|
|
||||||
# struct llama_model;
|
# struct llama_model;
|
||||||
llama_model_p = c_void_p
|
llama_model_p = c_void_p
|
||||||
|
@ -122,6 +113,82 @@ llama_context_p = c_void_p
|
||||||
llama_token = c_int
|
llama_token = c_int
|
||||||
llama_token_p = POINTER(llama_token)
|
llama_token_p = POINTER(llama_token)
|
||||||
|
|
||||||
|
# enum llama_log_level {
|
||||||
|
# LLAMA_LOG_LEVEL_ERROR = 2,
|
||||||
|
# LLAMA_LOG_LEVEL_WARN = 3,
|
||||||
|
# LLAMA_LOG_LEVEL_INFO = 4
|
||||||
|
# };
|
||||||
|
LLAMA_LOG_LEVEL_ERROR = c_int(2)
|
||||||
|
LLAMA_LOG_LEVEL_WARN = c_int(3)
|
||||||
|
LLAMA_LOG_LEVEL_INFO = c_int(4)
|
||||||
|
|
||||||
|
# enum llama_vocab_type {
|
||||||
|
# LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
|
||||||
|
# LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
|
||||||
|
# };
|
||||||
|
LLAMA_VOCAB_TYPE_SPM = c_int(0)
|
||||||
|
LLAMA_VOCAB_TYPE_BPE = c_int(1)
|
||||||
|
|
||||||
|
|
||||||
|
# enum llama_token_type {
|
||||||
|
# LLAMA_TOKEN_TYPE_UNDEFINED = 0,
|
||||||
|
# LLAMA_TOKEN_TYPE_NORMAL = 1,
|
||||||
|
# LLAMA_TOKEN_TYPE_UNKNOWN = 2,
|
||||||
|
# LLAMA_TOKEN_TYPE_CONTROL = 3,
|
||||||
|
# LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
|
||||||
|
# LLAMA_TOKEN_TYPE_UNUSED = 5,
|
||||||
|
# LLAMA_TOKEN_TYPE_BYTE = 6,
|
||||||
|
# };
|
||||||
|
LLAMA_TOKEN_TYPE_UNDEFINED = c_int(0)
|
||||||
|
LLAMA_TOKEN_TYPE_NORMAL = c_int(1)
|
||||||
|
LLAMA_TOKEN_TYPE_UNKNOWN = c_int(2)
|
||||||
|
LLAMA_TOKEN_TYPE_CONTROL = c_int(3)
|
||||||
|
LLAMA_TOKEN_TYPE_USER_DEFINED = c_int(4)
|
||||||
|
LLAMA_TOKEN_TYPE_UNUSED = c_int(5)
|
||||||
|
LLAMA_TOKEN_TYPE_BYTE = c_int(6)
|
||||||
|
|
||||||
|
# enum llama_ftype {
|
||||||
|
# LLAMA_FTYPE_ALL_F32 = 0,
|
||||||
|
# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||||
|
# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
||||||
|
# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
||||||
|
# LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
||||||
|
#
|
||||||
|
# LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
||||||
|
# };
|
||||||
|
LLAMA_FTYPE_ALL_F32 = c_int(0)
|
||||||
|
LLAMA_FTYPE_MOSTLY_F16 = c_int(1)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0 = c_int(2)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1 = c_int(3)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int(4)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q8_0 = c_int(7)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_0 = c_int(8)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_1 = c_int(9)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q2_K = c_int(10)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_S = c_int(11)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_M = c_int(12)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_L = c_int(13)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_S = c_int(14)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_M = c_int(15)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_S = c_int(16)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_M = c_int(17)
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q6_K = c_int(18)
|
||||||
|
LLAMA_FTYPE_GUESSED = c_int(1024)
|
||||||
|
|
||||||
|
|
||||||
# typedef struct llama_token_data {
|
# typedef struct llama_token_data {
|
||||||
# llama_token id; // token id
|
# llama_token id; // token id
|
||||||
|
@ -157,35 +224,13 @@ llama_token_data_array_p = POINTER(llama_token_data_array)
|
||||||
# typedef void (*llama_progress_callback)(float progress, void *ctx);
|
# typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p)
|
llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p)
|
||||||
|
|
||||||
|
|
||||||
# enum llama_log_level {
|
|
||||||
# LLAMA_LOG_LEVEL_ERROR = 2,
|
|
||||||
# LLAMA_LOG_LEVEL_WARN = 3,
|
|
||||||
# LLAMA_LOG_LEVEL_INFO = 4
|
|
||||||
# };
|
|
||||||
LLAMA_LOG_LEVEL_ERROR = c_int(2)
|
|
||||||
LLAMA_LOG_LEVEL_WARN = c_int(3)
|
|
||||||
LLAMA_LOG_LEVEL_INFO = c_int(4)
|
|
||||||
|
|
||||||
|
|
||||||
# // Signature for logging events
|
|
||||||
# // Note that text includes the new line character at the end for most events.
|
|
||||||
# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
|
||||||
# // if it exists.
|
|
||||||
# // It might not exist for progress report where '.' is output repeatedly.
|
|
||||||
# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
|
|
||||||
llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p)
|
|
||||||
|
|
||||||
|
|
||||||
# struct llama_context_params {
|
# struct llama_context_params {
|
||||||
# uint32_t seed; // RNG seed, -1 for random
|
# uint32_t seed; // RNG seed, -1 for random
|
||||||
# int32_t n_ctx; // text context
|
# int32_t n_ctx; // text context
|
||||||
# int32_t n_batch; // prompt processing batch size
|
# int32_t n_batch; // prompt processing batch size
|
||||||
# int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
|
|
||||||
# float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
|
|
||||||
# int32_t n_gpu_layers; // number of layers to store in VRAM
|
# int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
# int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
# int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
||||||
#
|
|
||||||
# const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
# const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES)
|
||||||
|
|
||||||
# // ref: https://github.com/ggerganov/llama.cpp/pull/2054
|
# // ref: https://github.com/ggerganov/llama.cpp/pull/2054
|
||||||
|
@ -213,11 +258,9 @@ class llama_context_params(Structure):
|
||||||
("seed", c_uint32),
|
("seed", c_uint32),
|
||||||
("n_ctx", c_int32),
|
("n_ctx", c_int32),
|
||||||
("n_batch", c_int32),
|
("n_batch", c_int32),
|
||||||
("n_gqa", c_int32),
|
|
||||||
("rms_norm_eps", c_float),
|
|
||||||
("n_gpu_layers", c_int32),
|
("n_gpu_layers", c_int32),
|
||||||
("main_gpu", c_int32),
|
("main_gpu", c_int32),
|
||||||
("tensor_split", POINTER(c_float)),
|
("tensor_split", c_float_p),
|
||||||
("rope_freq_base", c_float),
|
("rope_freq_base", c_float),
|
||||||
("rope_freq_scale", c_float),
|
("rope_freq_scale", c_float),
|
||||||
("progress_callback", llama_progress_callback),
|
("progress_callback", llama_progress_callback),
|
||||||
|
@ -235,44 +278,14 @@ class llama_context_params(Structure):
|
||||||
|
|
||||||
llama_context_params_p = POINTER(llama_context_params)
|
llama_context_params_p = POINTER(llama_context_params)
|
||||||
|
|
||||||
# enum llama_ftype {
|
|
||||||
# LLAMA_FTYPE_ALL_F32 = 0,
|
# // Signature for logging events
|
||||||
# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
# // Note that text includes the new line character at the end for most events.
|
||||||
# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
||||||
# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
# // if it exists.
|
||||||
# LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
# // It might not exist for progress report where '.' is output repeatedly.
|
||||||
# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
|
||||||
# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p)
|
||||||
# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
|
||||||
# LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
|
||||||
# };
|
|
||||||
LLAMA_FTYPE_ALL_F32 = c_int(0)
|
|
||||||
LLAMA_FTYPE_MOSTLY_F16 = c_int(1)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0 = c_int(2)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1 = c_int(3)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int(4)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q8_0 = c_int(7)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_0 = c_int(8)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_1 = c_int(9)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q2_K = c_int(10)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_S = c_int(11)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_M = c_int(12)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_L = c_int(13)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_S = c_int(14)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_M = c_int(15)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_S = c_int(16)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_M = c_int(17)
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q6_K = c_int(18)
|
|
||||||
|
|
||||||
|
|
||||||
# // model quantization parameters
|
# // model quantization parameters
|
||||||
|
@ -370,29 +383,7 @@ class llama_timings(Structure):
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
# // Set callback for all future logging events.
|
# LLAMA_API struct llama_context_params llama_context_default_params(void);
|
||||||
# // If this is not called, or NULL is supplied, everything is output on stderr.
|
|
||||||
# LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
|
||||||
def llama_log_set(
|
|
||||||
log_callback: "ctypes._FuncPointer", user_data: c_void_p # type: ignore
|
|
||||||
):
|
|
||||||
return _lib.llama_log_set(log_callback, user_data)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_log_set.argtypes = [llama_log_callback, c_void_p]
|
|
||||||
_lib.llama_log_set.restype = None
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_max_devices();
|
|
||||||
def llama_max_devices() -> int:
|
|
||||||
return _lib.llama_max_devices()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_max_devices.argtypes = []
|
|
||||||
_lib.llama_max_devices.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API struct llama_context_params llama_context_default_params();
|
|
||||||
def llama_context_default_params() -> llama_context_params:
|
def llama_context_default_params() -> llama_context_params:
|
||||||
return _lib.llama_context_default_params()
|
return _lib.llama_context_default_params()
|
||||||
|
|
||||||
|
@ -401,7 +392,7 @@ _lib.llama_context_default_params.argtypes = []
|
||||||
_lib.llama_context_default_params.restype = llama_context_params
|
_lib.llama_context_default_params.restype = llama_context_params
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
|
||||||
def llama_model_quantize_default_params() -> llama_model_quantize_params:
|
def llama_model_quantize_default_params() -> llama_model_quantize_params:
|
||||||
return _lib.llama_model_quantize_default_params()
|
return _lib.llama_model_quantize_default_params()
|
||||||
|
|
||||||
|
@ -410,25 +401,6 @@ _lib.llama_model_quantize_default_params.argtypes = []
|
||||||
_lib.llama_model_quantize_default_params.restype = llama_model_quantize_params
|
_lib.llama_model_quantize_default_params.restype = llama_model_quantize_params
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API bool llama_mmap_supported();
|
|
||||||
def llama_mmap_supported() -> bool:
|
|
||||||
return _lib.llama_mmap_supported()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_mmap_supported.argtypes = []
|
|
||||||
_lib.llama_mmap_supported.restype = c_bool
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API bool llama_mlock_supported();
|
|
||||||
def llama_mlock_supported() -> bool:
|
|
||||||
return _lib.llama_mlock_supported()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_mlock_supported.argtypes = []
|
|
||||||
_lib.llama_mlock_supported.restype = c_bool
|
|
||||||
|
|
||||||
|
|
||||||
# // TODO: not great API - very likely to change
|
|
||||||
# // Initialize the llama + ggml backend
|
# // Initialize the llama + ggml backend
|
||||||
# // If numa is true, use NUMA optimizations
|
# // If numa is true, use NUMA optimizations
|
||||||
# // Call once at the start of the program
|
# // Call once at the start of the program
|
||||||
|
@ -442,7 +414,7 @@ _lib.llama_backend_init.restype = None
|
||||||
|
|
||||||
|
|
||||||
# // Call once at the end of the program - currently only used for MPI
|
# // Call once at the end of the program - currently only used for MPI
|
||||||
# LLAMA_API void llama_backend_free();
|
# LLAMA_API void llama_backend_free(void);
|
||||||
def llama_backend_free():
|
def llama_backend_free():
|
||||||
return _lib.llama_backend_free()
|
return _lib.llama_backend_free()
|
||||||
|
|
||||||
|
@ -486,7 +458,17 @@ _lib.llama_new_context_with_model.argtypes = [llama_model_p, llama_context_param
|
||||||
_lib.llama_new_context_with_model.restype = llama_context_p
|
_lib.llama_new_context_with_model.restype = llama_context_p
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int64_t llama_time_us();
|
# // Frees all allocated memory
|
||||||
|
# LLAMA_API void llama_free(struct llama_context * ctx);
|
||||||
|
def llama_free(ctx: llama_context_p):
|
||||||
|
return _lib.llama_free(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_free.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_free.restype = None
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int64_t llama_time_us(void);
|
||||||
def llama_time_us() -> int:
|
def llama_time_us() -> int:
|
||||||
return _lib.llama_time_us()
|
return _lib.llama_time_us()
|
||||||
|
|
||||||
|
@ -495,30 +477,124 @@ _lib.llama_time_us.argtypes = []
|
||||||
_lib.llama_time_us.restype = ctypes.c_int64
|
_lib.llama_time_us.restype = ctypes.c_int64
|
||||||
|
|
||||||
|
|
||||||
# // Various functions for loading a ggml llama model.
|
# LLAMA_API int llama_max_devices (void);
|
||||||
# // Allocate (almost) all memory needed for the model.
|
def llama_max_devices() -> int:
|
||||||
# // Return NULL on failure
|
return _lib.llama_max_devices()
|
||||||
# LLAMA_API struct llama_context * llama_init_from_file(
|
|
||||||
# const char * path_model,
|
|
||||||
# struct llama_context_params params);
|
|
||||||
def llama_init_from_file(
|
|
||||||
path_model: bytes, params: llama_context_params
|
|
||||||
) -> llama_context_p:
|
|
||||||
return _lib.llama_init_from_file(path_model, params)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params]
|
_lib.llama_max_devices.argtypes = []
|
||||||
_lib.llama_init_from_file.restype = llama_context_p
|
_lib.llama_max_devices.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# Frees all allocated memory
|
# LLAMA_API bool llama_mmap_supported (void);
|
||||||
# LLAMA_API void llama_free(struct llama_context * ctx);
|
def llama_mmap_supported() -> bool:
|
||||||
def llama_free(ctx: llama_context_p):
|
return _lib.llama_mmap_supported()
|
||||||
return _lib.llama_free(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_free.argtypes = [llama_context_p]
|
_lib.llama_mmap_supported.argtypes = []
|
||||||
_lib.llama_free.restype = None
|
_lib.llama_mmap_supported.restype = c_bool
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API bool llama_mlock_supported(void);
|
||||||
|
def llama_mlock_supported() -> bool:
|
||||||
|
return _lib.llama_mlock_supported()
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_mlock_supported.argtypes = []
|
||||||
|
_lib.llama_mlock_supported.restype = c_bool
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||||
|
def llama_n_vocab(ctx: llama_context_p) -> int:
|
||||||
|
return _lib.llama_n_vocab(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_n_vocab.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_n_vocab.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||||
|
def llama_n_ctx(ctx: llama_context_p) -> int:
|
||||||
|
return _lib.llama_n_ctx(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_n_ctx.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_n_ctx.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||||
|
def llama_n_embd(ctx: llama_context_p) -> int:
|
||||||
|
return _lib.llama_n_embd(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_n_embd.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_n_embd.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx);
|
||||||
|
def llama_vocab_type(ctx: llama_context_p) -> int:
|
||||||
|
return _lib.llama_vocab_type(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_vocab_type.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_vocab_type.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
|
||||||
|
def llama_model_n_vocab(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_n_vocab(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_n_vocab.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_n_vocab.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
|
||||||
|
def llama_model_n_ctx(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_n_ctx(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_n_ctx.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_n_ctx.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API int llama_model_n_embd (const struct llama_model * model);
|
||||||
|
def llama_model_n_embd(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_n_embd(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_n_embd.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_n_embd.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# // Get a string describing the model type
|
||||||
|
# LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||||
|
def llama_model_desc(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int:
|
||||||
|
return _lib.llama_model_desc(model, buf, buf_size)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_desc.argtypes = [llama_model_p, c_char_p, c_size_t]
|
||||||
|
_lib.llama_model_desc.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# // Returns the total size of all the tensors in the model in bytes
|
||||||
|
# LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
|
||||||
|
def llama_model_size(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_size(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_size.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_size.restype = ctypes.c_uint64
|
||||||
|
|
||||||
|
|
||||||
|
# // Returns the total number of parameters in the model
|
||||||
|
# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
|
||||||
|
def llama_model_n_params(model: llama_model_p) -> int:
|
||||||
|
return _lib.llama_model_n_params(model)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_model_n_params.argtypes = [llama_model_p]
|
||||||
|
_lib.llama_model_n_params.restype = ctypes.c_uint64
|
||||||
|
|
||||||
|
|
||||||
# // Returns 0 on success
|
# // Returns 0 on success
|
||||||
|
@ -737,6 +813,115 @@ _lib.llama_eval_embd.argtypes = [llama_context_p, c_float_p, c_int, c_int, c_int
|
||||||
_lib.llama_eval_embd.restype = c_int
|
_lib.llama_eval_embd.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# // Export a static computation graph for context of 511 and batch size of 1
|
||||||
|
# // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these
|
||||||
|
# // parameters here to keep things simple
|
||||||
|
# // IMPORTANT: do not use for anything else other than debugging and testing!
|
||||||
|
# LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
|
||||||
|
def llama_eval_export(ctx: llama_context_p, fname: bytes) -> int:
|
||||||
|
return _lib.llama_eval_export(ctx, fname)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_eval_export.argtypes = [llama_context_p, c_char_p]
|
||||||
|
_lib.llama_eval_export.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
|
# Token logits obtained from the last call to llama_eval()
|
||||||
|
# The logits for the last token are stored in the last row
|
||||||
|
# Can be mutated in order to change the probabilities of the next token
|
||||||
|
# Rows: n_tokens
|
||||||
|
# Cols: n_vocab
|
||||||
|
# LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||||
|
def llama_get_logits(
|
||||||
|
ctx: llama_context_p,
|
||||||
|
): # type: (...) -> Array[float] # type: ignore
|
||||||
|
return _lib.llama_get_logits(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_get_logits.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_get_logits.restype = c_float_p
|
||||||
|
|
||||||
|
|
||||||
|
# Get the embeddings for the input
|
||||||
|
# shape: [n_embd] (1-dimensional)
|
||||||
|
# LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||||
|
def llama_get_embeddings(
|
||||||
|
ctx: llama_context_p,
|
||||||
|
): # type: (...) -> Array[float] # type: ignore
|
||||||
|
return _lib.llama_get_embeddings(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_get_embeddings.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_get_embeddings.restype = c_float_p
|
||||||
|
|
||||||
|
|
||||||
|
# //
|
||||||
|
# // Vocab
|
||||||
|
# //
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API const char * llama_token_get_text(const struct llama_context * ctx, llama_token token);
|
||||||
|
def llama_token_get_text(ctx: llama_context_p, token: llama_token) -> bytes:
|
||||||
|
return _lib.llama_token_get_text(ctx, token)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_get_text.argtypes = [llama_context_p, llama_token]
|
||||||
|
_lib.llama_token_get_text.restype = c_char_p
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API float llama_token_get_score(const struct llama_context * ctx, llama_token token);
|
||||||
|
def llama_token_get_score(ctx: llama_context_p, token: llama_token) -> float:
|
||||||
|
return _lib.llama_token_get_score(ctx, token)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_get_score.argtypes = [llama_context_p, llama_token]
|
||||||
|
_lib.llama_token_get_score.restype = c_float
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token);
|
||||||
|
def llama_token_get_type(ctx: llama_context_p, token: llama_token) -> int:
|
||||||
|
return _lib.llama_token_get_type(ctx, token)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_get_type.argtypes = [llama_context_p, llama_token]
|
||||||
|
_lib.llama_token_get_type.restype = ctypes.c_int
|
||||||
|
|
||||||
|
|
||||||
|
# // Special tokens
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API llama_token llama_token_bos(const struct llama_context * ctx); // beginning-of-sentence
|
||||||
|
def llama_token_bos(ctx: llama_context_p) -> llama_token:
|
||||||
|
return _lib.llama_token_bos(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_bos.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_token_bos.restype = llama_token
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API llama_token llama_token_eos(const struct llama_context * ctx); // end-of-sentence
|
||||||
|
def llama_token_eos(ctx: llama_context_p) -> llama_token:
|
||||||
|
return _lib.llama_token_eos(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_eos.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_token_eos.restype = llama_token
|
||||||
|
|
||||||
|
|
||||||
|
# LLAMA_API llama_token llama_token_nl (const struct llama_context * ctx); // next-line
|
||||||
|
def llama_token_nl(ctx: llama_context_p) -> llama_token:
|
||||||
|
return _lib.llama_token_nl(ctx)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_token_nl.argtypes = [llama_context_p]
|
||||||
|
_lib.llama_token_nl.restype = llama_token
|
||||||
|
|
||||||
|
|
||||||
|
# //
|
||||||
|
# // Tokenization
|
||||||
|
# //
|
||||||
|
|
||||||
|
|
||||||
# Convert the provided text into tokens.
|
# Convert the provided text into tokens.
|
||||||
# The tokens pointer must be large enough to hold the resulting tokens.
|
# The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
# Returns the number of tokens on success, no more than n_max_tokens
|
# Returns the number of tokens on success, no more than n_max_tokens
|
||||||
|
@ -778,191 +963,64 @@ def llama_tokenize_with_model(
|
||||||
return _lib.llama_tokenize_with_model(model, text, tokens, n_max_tokens, add_bos)
|
return _lib.llama_tokenize_with_model(model, text, tokens, n_max_tokens, add_bos)
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
_lib.llama_tokenize_with_model.argtypes = [
|
||||||
def llama_n_vocab(ctx: llama_context_p) -> int:
|
|
||||||
return _lib.llama_n_vocab(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_vocab.argtypes = [llama_context_p]
|
|
||||||
_lib.llama_n_vocab.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
|
||||||
def llama_n_ctx(ctx: llama_context_p) -> int:
|
|
||||||
return _lib.llama_n_ctx(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_ctx.argtypes = [llama_context_p]
|
|
||||||
_lib.llama_n_ctx.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
|
||||||
def llama_n_embd(ctx: llama_context_p) -> int:
|
|
||||||
return _lib.llama_n_embd(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_embd.argtypes = [llama_context_p]
|
|
||||||
_lib.llama_n_embd.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model);
|
|
||||||
def llama_n_vocab_from_model(model: llama_model_p) -> int:
|
|
||||||
return _lib.llama_n_vocab_from_model(model)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_vocab_from_model.argtypes = [llama_model_p]
|
|
||||||
_lib.llama_n_vocab_from_model.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model);
|
|
||||||
def llama_n_ctx_from_model(model: llama_model_p) -> int:
|
|
||||||
return _lib.llama_n_ctx_from_model(model)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_ctx_from_model.argtypes = [llama_model_p]
|
|
||||||
_lib.llama_n_ctx_from_model.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_n_embd_from_model (const struct llama_model * model);
|
|
||||||
def llama_n_embd_from_model(model: llama_model_p) -> int:
|
|
||||||
return _lib.llama_n_embd_from_model(model)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_n_embd_from_model.argtypes = [llama_model_p]
|
|
||||||
_lib.llama_n_embd_from_model.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# // Get the vocabulary as output parameters.
|
|
||||||
# // Returns number of results.
|
|
||||||
# LLAMA_API int llama_get_vocab(
|
|
||||||
# const struct llama_context * ctx,
|
|
||||||
# const char * * strings,
|
|
||||||
# float * scores,
|
|
||||||
# int capacity);
|
|
||||||
def llama_get_vocab(
|
|
||||||
ctx: llama_context_p,
|
|
||||||
strings, # type: Array[c_char_p] # type: ignore
|
|
||||||
scores, # type: Array[c_float] # type: ignore
|
|
||||||
capacity: c_int,
|
|
||||||
) -> int:
|
|
||||||
return _lib.llama_get_vocab(ctx, strings, scores, capacity)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_get_vocab.argtypes = [
|
|
||||||
llama_context_p,
|
|
||||||
POINTER(c_char_p),
|
|
||||||
POINTER(c_float),
|
|
||||||
c_int,
|
|
||||||
]
|
|
||||||
_lib.llama_get_vocab.restype = c_int
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API int llama_get_vocab_from_model(
|
|
||||||
# const struct llama_model * model,
|
|
||||||
# const char * * strings,
|
|
||||||
# float * scores,
|
|
||||||
# int capacity);
|
|
||||||
def llama_get_vocab_from_model(
|
|
||||||
model: llama_model_p,
|
|
||||||
strings, # type: Array[c_char_p] # type: ignore
|
|
||||||
scores, # type: Array[c_float] # type: ignore
|
|
||||||
capacity: c_int,
|
|
||||||
) -> int:
|
|
||||||
return _lib.llama_get_vocab_from_model(model, strings, scores, capacity)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_get_vocab_from_model.argtypes = [
|
|
||||||
llama_model_p,
|
llama_model_p,
|
||||||
POINTER(c_char_p),
|
c_char_p,
|
||||||
POINTER(c_float),
|
llama_token_p,
|
||||||
c_int,
|
c_int,
|
||||||
|
c_bool,
|
||||||
]
|
]
|
||||||
_lib.llama_get_vocab_from_model.restype = c_int
|
_lib.llama_tokenize_with_model.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# Token logits obtained from the last call to llama_eval()
|
|
||||||
# The logits for the last token are stored in the last row
|
|
||||||
# Can be mutated in order to change the probabilities of the next token
|
|
||||||
# Rows: n_tokens
|
|
||||||
# Cols: n_vocab
|
|
||||||
# LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
|
||||||
def llama_get_logits(
|
|
||||||
ctx: llama_context_p,
|
|
||||||
): # type: (...) -> Array[float] # type: ignore
|
|
||||||
return _lib.llama_get_logits(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_get_logits.argtypes = [llama_context_p]
|
|
||||||
_lib.llama_get_logits.restype = c_float_p
|
|
||||||
|
|
||||||
|
|
||||||
# Get the embeddings for the input
|
|
||||||
# shape: [n_embd] (1-dimensional)
|
|
||||||
# LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
|
||||||
def llama_get_embeddings(
|
|
||||||
ctx: llama_context_p,
|
|
||||||
): # type: (...) -> Array[float] # type: ignore
|
|
||||||
return _lib.llama_get_embeddings(ctx)
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_get_embeddings.argtypes = [llama_context_p]
|
|
||||||
_lib.llama_get_embeddings.restype = c_float_p
|
|
||||||
|
|
||||||
|
|
||||||
# // Token Id -> String. Uses the vocabulary in the provided context
|
# // Token Id -> String. Uses the vocabulary in the provided context
|
||||||
# LLAMA_API const char * llama_token_to_str(
|
# // Does not write null terminator to the buffer
|
||||||
|
# LLAMA_API int llama_token_to_str(
|
||||||
# const struct llama_context * ctx,
|
# const struct llama_context * ctx,
|
||||||
# llama_token token);
|
# llama_token token,
|
||||||
def llama_token_to_str(ctx: llama_context_p, token: llama_token) -> bytes:
|
# char * buf,
|
||||||
return _lib.llama_token_to_str(ctx, token)
|
# int length);
|
||||||
|
def llama_token_to_str(
|
||||||
|
ctx: llama_context_p, token: llama_token, buf: bytes, length: c_int
|
||||||
|
) -> int:
|
||||||
|
return _lib.llama_token_to_str(ctx, token, buf, length)
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_to_str.argtypes = [llama_context_p, llama_token]
|
_lib.llama_tokenize_with_model.argtypes = [
|
||||||
_lib.llama_token_to_str.restype = c_char_p
|
llama_model_p,
|
||||||
|
c_char_p,
|
||||||
|
llama_token_p,
|
||||||
|
c_int,
|
||||||
|
c_bool,
|
||||||
|
]
|
||||||
|
_lib.llama_tokenize_with_model.restype = c_int
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API const char * llama_token_to_str_with_model(
|
# LLAMA_API int llama_token_to_str_with_model(
|
||||||
# const struct llama_model * model,
|
# const struct llama_model * model,
|
||||||
# llama_token token);
|
# llama_token token,
|
||||||
def llama_token_to_str_with_model(model: llama_model_p, token: llama_token) -> bytes:
|
# char * buf,
|
||||||
return _lib.llama_token_to_str_with_model(model, token)
|
# int length);
|
||||||
|
def llama_token_to_str_with_model(
|
||||||
|
model: llama_model_p, token: llama_token, buf: bytes, length: c_int
|
||||||
|
) -> int:
|
||||||
|
return _lib.llama_token_to_str_with_model(model, token, buf, length)
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_to_str_with_model.argtypes = [llama_model_p, llama_token]
|
_lib.llama_token_to_str_with_model.argtypes = [
|
||||||
_lib.llama_token_to_str_with_model.restype = c_char_p
|
llama_model_p,
|
||||||
|
llama_token,
|
||||||
# Special tokens
|
c_char_p,
|
||||||
|
c_int,
|
||||||
|
]
|
||||||
# LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
|
_lib.llama_token_to_str_with_model.restype = c_int
|
||||||
def llama_token_bos() -> int:
|
|
||||||
return _lib.llama_token_bos()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_bos.argtypes = []
|
|
||||||
_lib.llama_token_bos.restype = llama_token
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API llama_token llama_token_eos(); // end-of-sentence
|
|
||||||
def llama_token_eos() -> int:
|
|
||||||
return _lib.llama_token_eos()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_eos.argtypes = []
|
|
||||||
_lib.llama_token_eos.restype = llama_token
|
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API llama_token llama_token_nl(); // next-line
|
|
||||||
def llama_token_nl() -> int:
|
|
||||||
return _lib.llama_token_nl()
|
|
||||||
|
|
||||||
|
|
||||||
_lib.llama_token_nl.argtypes = []
|
|
||||||
_lib.llama_token_nl.restype = llama_token
|
|
||||||
|
|
||||||
|
|
||||||
|
# //
|
||||||
# // Grammar
|
# // Grammar
|
||||||
# //
|
# //
|
||||||
|
|
||||||
|
|
||||||
# LLAMA_API struct llama_grammar * llama_grammar_init(
|
# LLAMA_API struct llama_grammar * llama_grammar_init(
|
||||||
# const llama_grammar_element ** rules,
|
# const llama_grammar_element ** rules,
|
||||||
# size_t n_rules,
|
# size_t n_rules,
|
||||||
|
@ -991,8 +1049,78 @@ def llama_grammar_free(grammar: llama_grammar_p):
|
||||||
_lib.llama_grammar_free.argtypes = [llama_grammar_p]
|
_lib.llama_grammar_free.argtypes = [llama_grammar_p]
|
||||||
_lib.llama_grammar_free.restype = None
|
_lib.llama_grammar_free.restype = None
|
||||||
|
|
||||||
|
# //
|
||||||
|
# // Beam search
|
||||||
|
# //
|
||||||
|
|
||||||
# Sampling functions
|
|
||||||
|
# struct llama_beam_view {
|
||||||
|
# const llama_token * tokens;
|
||||||
|
# size_t n_tokens;
|
||||||
|
# float p; // Cumulative beam probability (renormalized relative to all beams)
|
||||||
|
# bool eob; // Callback should set this to true when a beam is at end-of-beam.
|
||||||
|
# };
|
||||||
|
class llama_beam_view(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("tokens", llama_token_p),
|
||||||
|
("n_tokens", c_size_t),
|
||||||
|
("p", c_float),
|
||||||
|
("eob", c_bool),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# // Passed to beam_search_callback function.
|
||||||
|
# // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams
|
||||||
|
# // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks.
|
||||||
|
# // These pointers are valid only during the synchronous callback, so should not be saved.
|
||||||
|
# struct llama_beams_state {
|
||||||
|
# struct llama_beam_view * beam_views;
|
||||||
|
# size_t n_beams; // Number of elements in beam_views[].
|
||||||
|
# size_t common_prefix_length; // Current max length of prefix tokens shared by all beams.
|
||||||
|
# bool last_call; // True iff this is the last callback invocation.
|
||||||
|
# };
|
||||||
|
class llama_beams_state(ctypes.Structure):
|
||||||
|
_fields_ = [
|
||||||
|
("beam_views", POINTER(llama_beam_view)),
|
||||||
|
("n_beams", c_size_t),
|
||||||
|
("common_prefix_length", c_size_t),
|
||||||
|
("last_call", c_bool),
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# // Type of pointer to the beam_search_callback function.
|
||||||
|
# // void* callback_data is any custom data passed to llama_beam_search, that is subsequently
|
||||||
|
# // passed back to beam_search_callback. This avoids having to use global variables in the callback.
|
||||||
|
# typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, llama_beams_state);
|
||||||
|
llama_beam_search_callback_fn_t = ctypes.CFUNCTYPE(None, c_void_p, llama_beams_state)
|
||||||
|
|
||||||
|
|
||||||
|
# /// @details Deterministically returns entire sentence constructed by a beam search.
|
||||||
|
# /// @param ctx Pointer to the llama_context.
|
||||||
|
# /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state.
|
||||||
|
# /// @param callback_data A pointer that is simply passed back to callback.
|
||||||
|
# /// @param n_beams Number of beams to use.
|
||||||
|
# /// @param n_past Number of tokens already evaluated.
|
||||||
|
# /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier.
|
||||||
|
# /// @param n_threads Number of threads as passed to llama_eval().
|
||||||
|
# LLAMA_API void llama_beam_search(struct llama_context * ctx, llama_beam_search_callback_fn_t callback, void * callback_data, size_t n_beams, int n_past, int n_predict, int n_threads);
|
||||||
|
def llama_beam_search(
|
||||||
|
ctx: llama_context_p,
|
||||||
|
callback: "ctypes._CFuncPtr[None, c_void_p, llama_beams_state]", # type: ignore
|
||||||
|
callback_data: c_void_p,
|
||||||
|
n_beams: c_size_t,
|
||||||
|
n_past: c_int,
|
||||||
|
n_predict: c_int,
|
||||||
|
n_threads: c_int,
|
||||||
|
):
|
||||||
|
return _lib.llama_beam_search(
|
||||||
|
ctx, callback, callback_data, n_beams, n_past, n_predict, n_threads
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# //
|
||||||
|
# // Sampling functions
|
||||||
|
# //
|
||||||
|
|
||||||
|
|
||||||
# @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
# @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||||
|
@ -1351,6 +1479,19 @@ def llama_print_system_info() -> bytes:
|
||||||
_lib.llama_print_system_info.argtypes = []
|
_lib.llama_print_system_info.argtypes = []
|
||||||
_lib.llama_print_system_info.restype = c_char_p
|
_lib.llama_print_system_info.restype = c_char_p
|
||||||
|
|
||||||
|
|
||||||
|
# // Set callback for all future logging events.
|
||||||
|
# // If this is not called, or NULL is supplied, everything is output on stderr.
|
||||||
|
# LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
||||||
|
def llama_log_set(
|
||||||
|
log_callback: "ctypes._FuncPointer", user_data: c_void_p # type: ignore
|
||||||
|
):
|
||||||
|
return _lib.llama_log_set(log_callback, user_data)
|
||||||
|
|
||||||
|
|
||||||
|
_lib.llama_log_set.argtypes = [llama_log_callback, c_void_p]
|
||||||
|
_lib.llama_log_set.restype = None
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,9 @@ scikit-build = "0.17.6"
|
||||||
[tool.poetry.extras]
|
[tool.poetry.extras]
|
||||||
server = ["uvicorn", "fastapi", "pydantic-settings", "sse-starlette"]
|
server = ["uvicorn", "fastapi", "pydantic-settings", "sse-starlette"]
|
||||||
|
|
||||||
|
[tool.pytest.ini_options]
|
||||||
|
addopts = "--ignore=vendor"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = [
|
requires = [
|
||||||
"setuptools>=42",
|
"setuptools>=42",
|
||||||
|
|
|
@ -1,20 +1,32 @@
|
||||||
|
import pytest
|
||||||
import llama_cpp
|
import llama_cpp
|
||||||
|
|
||||||
MODEL = "./vendor/llama.cpp/models/ggml-vocab.bin"
|
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf"
|
||||||
|
|
||||||
|
|
||||||
def test_llama():
|
def test_llama_cpp_tokenization():
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
|
||||||
|
|
||||||
assert llama
|
assert llama
|
||||||
assert llama.ctx is not None
|
assert llama.ctx is not None
|
||||||
|
|
||||||
text = b"Hello World"
|
text = b"Hello World"
|
||||||
|
|
||||||
assert llama.detokenize(llama.tokenize(text)) == text
|
tokens = llama.tokenize(text)
|
||||||
|
assert tokens[0] == llama.token_bos()
|
||||||
|
assert tokens == [1, 15043, 2787]
|
||||||
|
detokenized = llama.detokenize(tokens)
|
||||||
|
assert detokenized == text
|
||||||
|
|
||||||
|
tokens = llama.tokenize(text, add_bos=False)
|
||||||
|
assert tokens[0] != llama.token_bos()
|
||||||
|
assert tokens == [15043, 2787]
|
||||||
|
|
||||||
|
detokenized = llama.detokenize(tokens)
|
||||||
|
assert detokenized != text
|
||||||
|
|
||||||
|
|
||||||
# @pytest.mark.skip(reason="need to update sample mocking")
|
@pytest.mark.skip(reason="bug in tokenization where leading space is always inserted even if not after eos")
|
||||||
def test_llama_patch(monkeypatch):
|
def test_llama_patch(monkeypatch):
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
||||||
n_vocab = llama_cpp.llama_n_vocab(llama.ctx)
|
n_vocab = llama_cpp.llama_n_vocab(llama.ctx)
|
||||||
|
|
2
vendor/llama.cpp
vendored
2
vendor/llama.cpp
vendored
|
@ -1 +1 @@
|
||||||
Subproject commit 604b8bdfa6320bbcb018eebcc1252dfede603c6b
|
Subproject commit 232caf3c1581a6cb023571780ff41dc2d66d1ca0
|
Loading…
Reference in a new issue