From bbbf0f4fc47bd5f9880b799c82ad7c06f5003cb7 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 24 Aug 2023 00:17:00 -0400 Subject: [PATCH 01/11] Update llama.cpp --- llama_cpp/llama.py | 30 +- llama_cpp/llama_cpp.py | 718 +++++++++++++++++++++++------------------ vendor/llama.cpp | 2 +- 3 files changed, 415 insertions(+), 335 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 21c0875..bfcae18 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -371,8 +371,8 @@ class Llama: sorted=sorted, ) self._candidates = candidates - self._token_nl = Llama.token_nl() - self._token_eos = Llama.token_eos() + self._token_nl = self.token_nl() + self._token_eos = self.token_eos() self._candidates_data_id = np.arange(self._n_vocab, dtype=np.intc) # type: ignore self._candidates_data_p = np.zeros(self._n_vocab, dtype=np.single) @@ -450,10 +450,14 @@ class Llama: """ assert self.ctx is not None output = b"" + buffer_size = 32 + buffer = (ctypes.c_char * buffer_size)() for token in tokens: - output += llama_cpp.llama_token_to_str( - self.ctx, llama_cpp.llama_token(token) + n = llama_cpp.llama_token_to_str( + self.ctx, llama_cpp.llama_token(token), buffer, buffer_size ) + assert n <= buffer_size + output += bytes(buffer[:n]) return output def set_cache(self, cache: Optional[BaseLlamaCache]): @@ -1681,20 +1685,20 @@ class Llama: assert self.ctx is not None return LlamaTokenizer(self) - @staticmethod - def token_eos() -> int: + def token_eos(self) -> int: """Return the end-of-sequence token.""" - return llama_cpp.llama_token_eos() + assert self.ctx is not None + return llama_cpp.llama_token_eos(self.ctx) - @staticmethod - def token_bos() -> int: + def token_bos(self) -> int: """Return the beginning-of-sequence token.""" - return llama_cpp.llama_token_bos() + assert self.ctx is not None + return llama_cpp.llama_token_bos(self.ctx) - @staticmethod - def token_nl() -> int: + def token_nl(self) -> int: """Return the newline token.""" - return llama_cpp.llama_token_nl() + assert self.ctx is not None + return llama_cpp.llama_token_nl(self.ctx) @staticmethod def logits_to_logprobs(logits: List[float]) -> List[float]: diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 0fd3209..c9200c6 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -90,26 +90,17 @@ GGML_USE_CUBLAS = hasattr(_lib, "ggml_init_cublas") GGML_CUDA_MAX_DEVICES = ctypes.c_int(16) LLAMA_MAX_DEVICES = GGML_CUDA_MAX_DEVICES if GGML_USE_CUBLAS else ctypes.c_int(1) -# #define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' -LLAMA_FILE_MAGIC_GGJT = ctypes.c_uint(0x67676A74) -# #define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' -LLAMA_FILE_MAGIC_GGLA = ctypes.c_uint(0x67676C61) -# #define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf' -LLAMA_FILE_MAGIC_GGMF = ctypes.c_uint(0x67676D66) -# #define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml' -LLAMA_FILE_MAGIC_GGML = ctypes.c_uint(0x67676D6C) -# #define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' +# define LLAMA_DEFAULT_SEED 0xFFFFFFFF +LLAMA_DEFAULT_SEED = ctypes.c_int(0xFFFFFFFF) + +# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' LLAMA_FILE_MAGIC_GGSN = ctypes.c_uint(0x6767736E) -# #define LLAMA_FILE_VERSION 3 -LLAMA_FILE_VERSION = c_int(3) -LLAMA_FILE_MAGIC = LLAMA_FILE_MAGIC_GGJT -LLAMA_FILE_MAGIC_UNVERSIONED = LLAMA_FILE_MAGIC_GGML +# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN -LLAMA_SESSION_VERSION = c_int(1) +# define LLAMA_SESSION_VERSION 1 +LLAMA_SESSION_VERSION = ctypes.c_int(1) -# #define LLAMA_DEFAULT_SEED 0xFFFFFFFF -LLAMA_DEFAULT_SEED = c_int(0xFFFFFFFF) # struct llama_model; llama_model_p = c_void_p @@ -122,6 +113,82 @@ llama_context_p = c_void_p llama_token = c_int llama_token_p = POINTER(llama_token) +# enum llama_log_level { +# LLAMA_LOG_LEVEL_ERROR = 2, +# LLAMA_LOG_LEVEL_WARN = 3, +# LLAMA_LOG_LEVEL_INFO = 4 +# }; +LLAMA_LOG_LEVEL_ERROR = c_int(2) +LLAMA_LOG_LEVEL_WARN = c_int(3) +LLAMA_LOG_LEVEL_INFO = c_int(4) + +# enum llama_vocab_type { +# LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece +# LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding +# }; +LLAMA_VOCAB_TYPE_SPM = c_int(0) +LLAMA_VOCAB_TYPE_BPE = c_int(1) + + +# enum llama_token_type { +# LLAMA_TOKEN_TYPE_UNDEFINED = 0, +# LLAMA_TOKEN_TYPE_NORMAL = 1, +# LLAMA_TOKEN_TYPE_UNKNOWN = 2, +# LLAMA_TOKEN_TYPE_CONTROL = 3, +# LLAMA_TOKEN_TYPE_USER_DEFINED = 4, +# LLAMA_TOKEN_TYPE_UNUSED = 5, +# LLAMA_TOKEN_TYPE_BYTE = 6, +# }; +LLAMA_TOKEN_TYPE_UNDEFINED = c_int(0) +LLAMA_TOKEN_TYPE_NORMAL = c_int(1) +LLAMA_TOKEN_TYPE_UNKNOWN = c_int(2) +LLAMA_TOKEN_TYPE_CONTROL = c_int(3) +LLAMA_TOKEN_TYPE_USER_DEFINED = c_int(4) +LLAMA_TOKEN_TYPE_UNUSED = c_int(5) +LLAMA_TOKEN_TYPE_BYTE = c_int(6) + +# enum llama_ftype { +# LLAMA_FTYPE_ALL_F32 = 0, +# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 +# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed +# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed +# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors +# LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors +# +# LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file +# }; +LLAMA_FTYPE_ALL_F32 = c_int(0) +LLAMA_FTYPE_MOSTLY_F16 = c_int(1) +LLAMA_FTYPE_MOSTLY_Q4_0 = c_int(2) +LLAMA_FTYPE_MOSTLY_Q4_1 = c_int(3) +LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int(4) +LLAMA_FTYPE_MOSTLY_Q8_0 = c_int(7) +LLAMA_FTYPE_MOSTLY_Q5_0 = c_int(8) +LLAMA_FTYPE_MOSTLY_Q5_1 = c_int(9) +LLAMA_FTYPE_MOSTLY_Q2_K = c_int(10) +LLAMA_FTYPE_MOSTLY_Q3_K_S = c_int(11) +LLAMA_FTYPE_MOSTLY_Q3_K_M = c_int(12) +LLAMA_FTYPE_MOSTLY_Q3_K_L = c_int(13) +LLAMA_FTYPE_MOSTLY_Q4_K_S = c_int(14) +LLAMA_FTYPE_MOSTLY_Q4_K_M = c_int(15) +LLAMA_FTYPE_MOSTLY_Q5_K_S = c_int(16) +LLAMA_FTYPE_MOSTLY_Q5_K_M = c_int(17) +LLAMA_FTYPE_MOSTLY_Q6_K = c_int(18) +LLAMA_FTYPE_GUESSED = c_int(1024) + # typedef struct llama_token_data { # llama_token id; // token id @@ -157,35 +224,13 @@ llama_token_data_array_p = POINTER(llama_token_data_array) # typedef void (*llama_progress_callback)(float progress, void *ctx); llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p) - -# enum llama_log_level { -# LLAMA_LOG_LEVEL_ERROR = 2, -# LLAMA_LOG_LEVEL_WARN = 3, -# LLAMA_LOG_LEVEL_INFO = 4 -# }; -LLAMA_LOG_LEVEL_ERROR = c_int(2) -LLAMA_LOG_LEVEL_WARN = c_int(3) -LLAMA_LOG_LEVEL_INFO = c_int(4) - - -# // Signature for logging events -# // Note that text includes the new line character at the end for most events. -# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it -# // if it exists. -# // It might not exist for progress report where '.' is output repeatedly. -# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); -llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p) - - # struct llama_context_params { # uint32_t seed; // RNG seed, -1 for random # int32_t n_ctx; // text context # int32_t n_batch; // prompt processing batch size -# int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams) -# float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) # int32_t n_gpu_layers; // number of layers to store in VRAM # int32_t main_gpu; // the GPU that is used for scratch and small tensors -# + # const float * tensor_split; // how to split layers across multiple GPUs (size: LLAMA_MAX_DEVICES) # // ref: https://github.com/ggerganov/llama.cpp/pull/2054 @@ -213,11 +258,9 @@ class llama_context_params(Structure): ("seed", c_uint32), ("n_ctx", c_int32), ("n_batch", c_int32), - ("n_gqa", c_int32), - ("rms_norm_eps", c_float), ("n_gpu_layers", c_int32), ("main_gpu", c_int32), - ("tensor_split", POINTER(c_float)), + ("tensor_split", c_float_p), ("rope_freq_base", c_float), ("rope_freq_scale", c_float), ("progress_callback", llama_progress_callback), @@ -235,50 +278,20 @@ class llama_context_params(Structure): llama_context_params_p = POINTER(llama_context_params) -# enum llama_ftype { -# LLAMA_FTYPE_ALL_F32 = 0, -# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 -# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed -# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed -# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors -# LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors -# }; -LLAMA_FTYPE_ALL_F32 = c_int(0) -LLAMA_FTYPE_MOSTLY_F16 = c_int(1) -LLAMA_FTYPE_MOSTLY_Q4_0 = c_int(2) -LLAMA_FTYPE_MOSTLY_Q4_1 = c_int(3) -LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int(4) -LLAMA_FTYPE_MOSTLY_Q8_0 = c_int(7) -LLAMA_FTYPE_MOSTLY_Q5_0 = c_int(8) -LLAMA_FTYPE_MOSTLY_Q5_1 = c_int(9) -LLAMA_FTYPE_MOSTLY_Q2_K = c_int(10) -LLAMA_FTYPE_MOSTLY_Q3_K_S = c_int(11) -LLAMA_FTYPE_MOSTLY_Q3_K_M = c_int(12) -LLAMA_FTYPE_MOSTLY_Q3_K_L = c_int(13) -LLAMA_FTYPE_MOSTLY_Q4_K_S = c_int(14) -LLAMA_FTYPE_MOSTLY_Q4_K_M = c_int(15) -LLAMA_FTYPE_MOSTLY_Q5_K_S = c_int(16) -LLAMA_FTYPE_MOSTLY_Q5_K_M = c_int(17) -LLAMA_FTYPE_MOSTLY_Q6_K = c_int(18) + +# // Signature for logging events +# // Note that text includes the new line character at the end for most events. +# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it +# // if it exists. +# // It might not exist for progress report where '.' is output repeatedly. +# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); +llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p) # // model quantization parameters # typedef struct llama_model_quantize_params { # int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() -# enum llama_ftype ftype; // quantize to this llama_ftype +# enum llama_ftype ftype; // quantize to this llama_ftype # bool allow_requantize; // allow quantizing non-f32/f16 tensors # bool quantize_output_tensor; // quantize output.weight # } llama_model_quantize_params; @@ -370,29 +383,7 @@ class llama_timings(Structure): ] -# // Set callback for all future logging events. -# // If this is not called, or NULL is supplied, everything is output on stderr. -# LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); -def llama_log_set( - log_callback: "ctypes._FuncPointer", user_data: c_void_p # type: ignore -): - return _lib.llama_log_set(log_callback, user_data) - - -_lib.llama_log_set.argtypes = [llama_log_callback, c_void_p] -_lib.llama_log_set.restype = None - - -# LLAMA_API int llama_max_devices(); -def llama_max_devices() -> int: - return _lib.llama_max_devices() - - -_lib.llama_max_devices.argtypes = [] -_lib.llama_max_devices.restype = c_int - - -# LLAMA_API struct llama_context_params llama_context_default_params(); +# LLAMA_API struct llama_context_params llama_context_default_params(void); def llama_context_default_params() -> llama_context_params: return _lib.llama_context_default_params() @@ -401,7 +392,7 @@ _lib.llama_context_default_params.argtypes = [] _lib.llama_context_default_params.restype = llama_context_params -# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(); +# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); def llama_model_quantize_default_params() -> llama_model_quantize_params: return _lib.llama_model_quantize_default_params() @@ -410,25 +401,6 @@ _lib.llama_model_quantize_default_params.argtypes = [] _lib.llama_model_quantize_default_params.restype = llama_model_quantize_params -# LLAMA_API bool llama_mmap_supported(); -def llama_mmap_supported() -> bool: - return _lib.llama_mmap_supported() - - -_lib.llama_mmap_supported.argtypes = [] -_lib.llama_mmap_supported.restype = c_bool - - -# LLAMA_API bool llama_mlock_supported(); -def llama_mlock_supported() -> bool: - return _lib.llama_mlock_supported() - - -_lib.llama_mlock_supported.argtypes = [] -_lib.llama_mlock_supported.restype = c_bool - - -# // TODO: not great API - very likely to change # // Initialize the llama + ggml backend # // If numa is true, use NUMA optimizations # // Call once at the start of the program @@ -442,7 +414,7 @@ _lib.llama_backend_init.restype = None # // Call once at the end of the program - currently only used for MPI -# LLAMA_API void llama_backend_free(); +# LLAMA_API void llama_backend_free(void); def llama_backend_free(): return _lib.llama_backend_free() @@ -452,7 +424,7 @@ _lib.llama_backend_free.restype = None # LLAMA_API struct llama_model * llama_load_model_from_file( -# const char * path_model, +# const char * path_model, # struct llama_context_params params); def llama_load_model_from_file( path_model: bytes, params: llama_context_params @@ -474,7 +446,7 @@ _lib.llama_free_model.restype = None # LLAMA_API struct llama_context * llama_new_context_with_model( -# struct llama_model * model, +# struct llama_model * model, # struct llama_context_params params); def llama_new_context_with_model( model: llama_model_p, params: llama_context_params @@ -486,7 +458,17 @@ _lib.llama_new_context_with_model.argtypes = [llama_model_p, llama_context_param _lib.llama_new_context_with_model.restype = llama_context_p -# LLAMA_API int64_t llama_time_us(); +# // Frees all allocated memory +# LLAMA_API void llama_free(struct llama_context * ctx); +def llama_free(ctx: llama_context_p): + return _lib.llama_free(ctx) + + +_lib.llama_free.argtypes = [llama_context_p] +_lib.llama_free.restype = None + + +# LLAMA_API int64_t llama_time_us(void); def llama_time_us() -> int: return _lib.llama_time_us() @@ -495,30 +477,95 @@ _lib.llama_time_us.argtypes = [] _lib.llama_time_us.restype = ctypes.c_int64 -# // Various functions for loading a ggml llama model. -# // Allocate (almost) all memory needed for the model. -# // Return NULL on failure -# LLAMA_API struct llama_context * llama_init_from_file( -# const char * path_model, -# struct llama_context_params params); -def llama_init_from_file( - path_model: bytes, params: llama_context_params -) -> llama_context_p: - return _lib.llama_init_from_file(path_model, params) +# LLAMA_API int llama_max_devices (void); +def llama_max_devices() -> int: + return _lib.llama_max_devices() -_lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params] -_lib.llama_init_from_file.restype = llama_context_p +_lib.llama_max_devices.argtypes = [] +_lib.llama_max_devices.restype = c_int -# Frees all allocated memory -# LLAMA_API void llama_free(struct llama_context * ctx); -def llama_free(ctx: llama_context_p): - return _lib.llama_free(ctx) +# LLAMA_API bool llama_mmap_supported (void); +def llama_mmap_supported() -> bool: + return _lib.llama_mmap_supported() -_lib.llama_free.argtypes = [llama_context_p] -_lib.llama_free.restype = None +_lib.llama_mmap_supported.argtypes = [] +_lib.llama_mmap_supported.restype = c_bool + + +# LLAMA_API bool llama_mlock_supported(void); +def llama_mlock_supported() -> bool: + return _lib.llama_mlock_supported() + + +_lib.llama_mlock_supported.argtypes = [] +_lib.llama_mlock_supported.restype = c_bool + + +# LLAMA_API int llama_n_vocab(const struct llama_context * ctx); +def llama_n_vocab(ctx: llama_context_p) -> int: + return _lib.llama_n_vocab(ctx) + + +_lib.llama_n_vocab.argtypes = [llama_context_p] +_lib.llama_n_vocab.restype = c_int + + +# LLAMA_API int llama_n_ctx (const struct llama_context * ctx); +def llama_n_ctx(ctx: llama_context_p) -> int: + return _lib.llama_n_ctx(ctx) + + +_lib.llama_n_ctx.argtypes = [llama_context_p] +_lib.llama_n_ctx.restype = c_int + + +# LLAMA_API int llama_n_embd (const struct llama_context * ctx); +def llama_n_embd(ctx: llama_context_p) -> int: + return _lib.llama_n_embd(ctx) + + +_lib.llama_n_embd.argtypes = [llama_context_p] +_lib.llama_n_embd.restype = c_int + + +# LLAMA_API int llama_model_n_vocab(const struct llama_model * model); +def llama_model_n_vocab(model: llama_model_p) -> int: + return _lib.llama_model_n_vocab(model) + + +_lib.llama_model_n_vocab.argtypes = [llama_model_p] +_lib.llama_model_n_vocab.restype = c_int + + +# LLAMA_API int llama_model_n_ctx (const struct llama_model * model); +def llama_model_n_ctx(model: llama_model_p) -> int: + return _lib.llama_model_n_ctx(model) + + +_lib.llama_model_n_ctx.argtypes = [llama_model_p] +_lib.llama_model_n_ctx.restype = c_int + + +# LLAMA_API int llama_model_n_embd (const struct llama_model * model); +def llama_model_n_embd(model: llama_model_p) -> int: + return _lib.llama_model_n_embd(model) + + +_lib.llama_model_n_embd.argtypes = [llama_model_p] +_lib.llama_model_n_embd.restype = c_int + + +# // Get a string describing the model type +# LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size); +def llama_model_type(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int: + return _lib.llama_model_type(model, buf, buf_size) + + +_lib.llama_model_type.argtypes = [llama_model_p, c_char_p, c_size_t] +_lib.llama_model_type.restype = c_int # // Returns 0 on success @@ -737,147 +784,17 @@ _lib.llama_eval_embd.argtypes = [llama_context_p, c_float_p, c_int, c_int, c_int _lib.llama_eval_embd.restype = c_int -# Convert the provided text into tokens. -# The tokens pointer must be large enough to hold the resulting tokens. -# Returns the number of tokens on success, no more than n_max_tokens -# Returns a negative number on failure - the number of tokens that would have been returned -# TODO: not sure if correct -# LLAMA_API int llama_tokenize( -# struct llama_context * ctx, -# const char * text, -# llama_token * tokens, -# int n_max_tokens, -# bool add_bos); -def llama_tokenize( - ctx: llama_context_p, - text: bytes, - tokens, # type: Array[llama_token] - n_max_tokens: c_int, - add_bos: c_bool, -) -> int: - return _lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos) +# // Export a static computation graph for context of 511 and batch size of 1 +# // NOTE: since this functionality is mostly for debugging and demonstration purposes, we hardcode these +# // parameters here to keep things simple +# // IMPORTANT: do not use for anything else other than debugging and testing! +# LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname); +def llama_eval_export(ctx: llama_context_p, fname: bytes) -> int: + return _lib.llama_eval_export(ctx, fname) -_lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool] -_lib.llama_tokenize.restype = c_int - - -# LLAMA_API int llama_tokenize_with_model( -# const struct llama_model * model, -# const char * text, -# llama_token * tokens, -# int n_max_tokens, -# bool add_bos); -def llama_tokenize_with_model( - model: llama_model_p, - text: bytes, - tokens, # type: Array[llama_token] - n_max_tokens: c_int, - add_bos: c_bool, -) -> int: - return _lib.llama_tokenize_with_model(model, text, tokens, n_max_tokens, add_bos) - - -# LLAMA_API int llama_n_vocab(const struct llama_context * ctx); -def llama_n_vocab(ctx: llama_context_p) -> int: - return _lib.llama_n_vocab(ctx) - - -_lib.llama_n_vocab.argtypes = [llama_context_p] -_lib.llama_n_vocab.restype = c_int - - -# LLAMA_API int llama_n_ctx (const struct llama_context * ctx); -def llama_n_ctx(ctx: llama_context_p) -> int: - return _lib.llama_n_ctx(ctx) - - -_lib.llama_n_ctx.argtypes = [llama_context_p] -_lib.llama_n_ctx.restype = c_int - - -# LLAMA_API int llama_n_embd (const struct llama_context * ctx); -def llama_n_embd(ctx: llama_context_p) -> int: - return _lib.llama_n_embd(ctx) - - -_lib.llama_n_embd.argtypes = [llama_context_p] -_lib.llama_n_embd.restype = c_int - - -# LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model); -def llama_n_vocab_from_model(model: llama_model_p) -> int: - return _lib.llama_n_vocab_from_model(model) - - -_lib.llama_n_vocab_from_model.argtypes = [llama_model_p] -_lib.llama_n_vocab_from_model.restype = c_int - - -# LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model); -def llama_n_ctx_from_model(model: llama_model_p) -> int: - return _lib.llama_n_ctx_from_model(model) - - -_lib.llama_n_ctx_from_model.argtypes = [llama_model_p] -_lib.llama_n_ctx_from_model.restype = c_int - - -# LLAMA_API int llama_n_embd_from_model (const struct llama_model * model); -def llama_n_embd_from_model(model: llama_model_p) -> int: - return _lib.llama_n_embd_from_model(model) - - -_lib.llama_n_embd_from_model.argtypes = [llama_model_p] -_lib.llama_n_embd_from_model.restype = c_int - - -# // Get the vocabulary as output parameters. -# // Returns number of results. -# LLAMA_API int llama_get_vocab( -# const struct llama_context * ctx, -# const char * * strings, -# float * scores, -# int capacity); -def llama_get_vocab( - ctx: llama_context_p, - strings, # type: Array[c_char_p] # type: ignore - scores, # type: Array[c_float] # type: ignore - capacity: c_int, -) -> int: - return _lib.llama_get_vocab(ctx, strings, scores, capacity) - - -_lib.llama_get_vocab.argtypes = [ - llama_context_p, - POINTER(c_char_p), - POINTER(c_float), - c_int, -] -_lib.llama_get_vocab.restype = c_int - - -# LLAMA_API int llama_get_vocab_from_model( -# const struct llama_model * model, -# const char * * strings, -# float * scores, -# int capacity); -def llama_get_vocab_from_model( - model: llama_model_p, - strings, # type: Array[c_char_p] # type: ignore - scores, # type: Array[c_float] # type: ignore - capacity: c_int, -) -> int: - return _lib.llama_get_vocab_from_model(model, strings, scores, capacity) - - -_lib.llama_get_vocab_from_model.argtypes = [ - llama_model_p, - POINTER(c_char_p), - POINTER(c_float), - c_int, -] -_lib.llama_get_vocab_from_model.restype = c_int +_lib.llama_eval_export.argtypes = [llama_context_p, c_char_p] +_lib.llama_eval_export.restype = c_int # Token logits obtained from the last call to llama_eval() @@ -909,16 +826,186 @@ _lib.llama_get_embeddings.argtypes = [llama_context_p] _lib.llama_get_embeddings.restype = c_float_p +# // +# // Vocab +# // + + +# LLAMA_API const char * llama_token_get_text(const struct llama_context * ctx, llama_token token); +def llama_token_get_text(ctx: llama_context_p, token: llama_token) -> bytes: + return _lib.llama_token_get_text(ctx, token) + + +_lib.llama_token_get_text.argtypes = [llama_context_p, llama_token] +_lib.llama_token_get_text.restype = c_char_p + + +# LLAMA_API float llama_token_get_score(const struct llama_context * ctx, llama_token token); +def llama_token_get_score(ctx: llama_context_p, token: llama_token) -> float: + return _lib.llama_token_get_score(ctx, token) + + +_lib.llama_token_get_score.argtypes = [llama_context_p, llama_token] +_lib.llama_token_get_score.restype = c_float + + +# LLAMA_API llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token); +def llama_token_get_type(ctx: llama_context_p, token: llama_token) -> int: + return _lib.llama_token_get_type(ctx, token) + + +_lib.llama_token_get_type.argtypes = [llama_context_p, llama_token] +_lib.llama_token_get_type.restype = ctypes.c_int + + +# // Special tokens + + +# LLAMA_API llama_token llama_token_bos(const struct llama_context * ctx); // beginning-of-sentence +def llama_token_bos(ctx: llama_context_p) -> llama_token: + return _lib.llama_token_bos(ctx) + + +_lib.llama_token_bos.argtypes = [llama_context_p] +_lib.llama_token_bos.restype = llama_token + + +# LLAMA_API llama_token llama_token_eos(const struct llama_context * ctx); // end-of-sentence +def llama_token_eos(ctx: llama_context_p) -> llama_token: + return _lib.llama_token_eos(ctx) + + +_lib.llama_token_eos.argtypes = [llama_context_p] +_lib.llama_token_eos.restype = llama_token + + +# LLAMA_API llama_token llama_token_nl (const struct llama_context * ctx); // next-line +def llama_token_nl(ctx: llama_context_p) -> llama_token: + return _lib.llama_token_nl(ctx) + + +_lib.llama_token_nl.argtypes = [llama_context_p] +_lib.llama_token_nl.restype = llama_token + + +# // +# // Tokenization +# // + + +# Convert the provided text into tokens. +# The tokens pointer must be large enough to hold the resulting tokens. +# Returns the number of tokens on success, no more than n_max_tokens +# Returns a negative number on failure - the number of tokens that would have been returned +# TODO: not sure if correct +# LLAMA_API int llama_tokenize( +# struct llama_context * ctx, +# const char * text, +# llama_token * tokens, +# int n_max_tokens, +# bool add_bos); +def llama_tokenize( + ctx: llama_context_p, + text: bytes, + tokens, # type: Array[llama_token] + n_max_tokens: c_int, + add_bos: c_bool, +) -> int: + return _lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos) + + +_lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool] +_lib.llama_tokenize.restype = c_int + + +# LLAMA_API int llama_tokenize_bpe( +# struct llama_context * ctx, +# const char * text, +# llama_token * tokens, +# int n_max_tokens, +# bool add_bos); +def llama_tokenize_bpe( + ctx: llama_context_p, + text: bytes, + tokens, # type: Array[llama_token] + n_max_tokens: c_int, + add_bos: c_bool, +) -> int: + return _lib.llama_tokenize_bpe(ctx, text, tokens, n_max_tokens, add_bos) + + +_lib.llama_tokenize_bpe.argtypes = [ + llama_context_p, + c_char_p, + llama_token_p, + c_int, + c_bool, +] +_lib.llama_tokenize_bpe.restype = c_int + + +# LLAMA_API int llama_tokenize_with_model( +# const struct llama_model * model, +# const char * text, +# llama_token * tokens, +# int n_max_tokens, +# bool add_bos); +def llama_tokenize_with_model( + model: llama_model_p, + text: bytes, + tokens, # type: Array[llama_token] + n_max_tokens: c_int, + add_bos: c_bool, +) -> int: + return _lib.llama_tokenize_with_model(model, text, tokens, n_max_tokens, add_bos) + + +_lib.llama_tokenize_with_model.argtypes = [ + llama_model_p, + c_char_p, + llama_token_p, + c_int, + c_bool, +] +_lib.llama_tokenize_with_model.restype = c_int + + # // Token Id -> String. Uses the vocabulary in the provided context -# LLAMA_API const char * llama_token_to_str( +# // Does not write null terminator to the buffer +# LLAMA_API int llama_token_to_str( # const struct llama_context * ctx, -# llama_token token); -def llama_token_to_str(ctx: llama_context_p, token: llama_token) -> bytes: - return _lib.llama_token_to_str(ctx, token) +# llama_token token, +# char * buf, +# int length); +def llama_token_to_str( + ctx: llama_context_p, token: llama_token, buf: bytes, length: c_int +) -> int: + return _lib.llama_token_to_str(ctx, token, buf, length) -_lib.llama_token_to_str.argtypes = [llama_context_p, llama_token] -_lib.llama_token_to_str.restype = c_char_p +_lib.llama_tokenize_with_model.argtypes = [ + llama_model_p, + c_char_p, + llama_token_p, + c_int, + c_bool, +] +_lib.llama_tokenize_with_model.restype = c_int + + +# LLAMA_API int llama_token_to_str_bpe( +# const struct llama_context * ctx, +# llama_token token, +# char * buf, +# int length); +def llama_token_to_str_bpe( + ctx: llama_context_p, token: llama_token, buf: bytes, length: c_int +) -> int: + return _lib.llama_token_to_str_bpe(ctx, token, buf, length) + + +_lib.llama_token_to_str_bpe.argtypes = [llama_context_p, llama_token, c_char_p, c_int] +_lib.llama_token_to_str_bpe.restype = c_int # LLAMA_API const char * llama_token_to_str_with_model( @@ -931,38 +1018,12 @@ def llama_token_to_str_with_model(model: llama_model_p, token: llama_token) -> b _lib.llama_token_to_str_with_model.argtypes = [llama_model_p, llama_token] _lib.llama_token_to_str_with_model.restype = c_char_p -# Special tokens - - -# LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence -def llama_token_bos() -> int: - return _lib.llama_token_bos() - - -_lib.llama_token_bos.argtypes = [] -_lib.llama_token_bos.restype = llama_token - - -# LLAMA_API llama_token llama_token_eos(); // end-of-sentence -def llama_token_eos() -> int: - return _lib.llama_token_eos() - - -_lib.llama_token_eos.argtypes = [] -_lib.llama_token_eos.restype = llama_token - - -# LLAMA_API llama_token llama_token_nl(); // next-line -def llama_token_nl() -> int: - return _lib.llama_token_nl() - - -_lib.llama_token_nl.argtypes = [] -_lib.llama_token_nl.restype = llama_token - +# // # // Grammar # // + + # LLAMA_API struct llama_grammar * llama_grammar_init( # const llama_grammar_element ** rules, # size_t n_rules, @@ -992,7 +1053,9 @@ _lib.llama_grammar_free.argtypes = [llama_grammar_p] _lib.llama_grammar_free.restype = None -# Sampling functions +# // +# // Sampling functions +# // # @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. @@ -1351,6 +1414,19 @@ def llama_print_system_info() -> bytes: _lib.llama_print_system_info.argtypes = [] _lib.llama_print_system_info.restype = c_char_p + +# // Set callback for all future logging events. +# // If this is not called, or NULL is supplied, everything is output on stderr. +# LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); +def llama_log_set( + log_callback: "ctypes._FuncPointer", user_data: c_void_p # type: ignore +): + return _lib.llama_log_set(log_callback, user_data) + + +_lib.llama_log_set.argtypes = [llama_log_callback, c_void_p] +_lib.llama_log_set.restype = None + ################################################################################################### diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 604b8bd..f5fe98d 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 604b8bdfa6320bbcb018eebcc1252dfede603c6b +Subproject commit f5fe98d11bdf9e7797bcfb05c0c3601ffc4b9d26 From 4ed632c4b352f80a8cd19ad5923e2981a2bb3c53 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 24 Aug 2023 01:01:05 -0400 Subject: [PATCH 02/11] Remove deprecated params --- llama_cpp/llama.py | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index bfcae18..d6fd830 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -228,7 +228,7 @@ class Llama: rope_freq_scale: float = 1.0, n_gqa: Optional[int] = None, # (TEMPORARY) must be 8 for llama2 70b rms_norm_eps: Optional[float] = None, # (TEMPORARY) - mul_mat_q: Optional[bool] = None, # (TEMPORARY) + mul_mat_q: Optional[bool] = None, verbose: bool = True, ): """Load a llama.cpp model from `model_path`. @@ -290,11 +290,6 @@ class Llama: self.params.rope_freq_base = rope_freq_base self.params.rope_freq_scale = rope_freq_scale - if n_gqa is not None: - self.params.n_gqa = n_gqa - - if rms_norm_eps is not None: - self.params.rms_norm_eps = rms_norm_eps if mul_mat_q is not None: self.params.mul_mat_q = mul_mat_q @@ -453,6 +448,8 @@ class Llama: buffer_size = 32 buffer = (ctypes.c_char * buffer_size)() for token in tokens: + if token == llama_cpp.llama_token_bos(self.ctx): + continue n = llama_cpp.llama_token_to_str( self.ctx, llama_cpp.llama_token(token), buffer, buffer_size ) @@ -1585,13 +1582,7 @@ class Llama: lora_base=self.lora_base, lora_path=self.lora_path, tensor_split=self.tensor_split, - ### TEMPORARY ### - n_gqa=self.params.n_gqa, - rms_norm_eps=self.params.rms_norm_eps, - ### TEMPORARY ### - ### DEPRECATED ### - n_parts=self.n_parts, - ### DEPRECATED ### + mul_mat_q=self.params.mul_mat_q, ) def __setstate__(self, state): @@ -1613,14 +1604,8 @@ class Llama: lora_base=state["lora_base"], lora_path=state["lora_path"], tensor_split=state["tensor_split"], + mul_mat_q=state["mul_mat_q"], verbose=state["verbose"], - ### TEMPORARY ### - n_gqa=state["n_gqa"], - rms_norm_eps=state["rms_norm_eps"], - ### TEMPORARY ### - ### DEPRECATED ### - n_parts=state["n_parts"], - ### DEPRECATED ### ) def save_state(self) -> LlamaState: From db982a861fe74ffa88af901cf8d3df07ae68f0e1 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 24 Aug 2023 01:01:12 -0400 Subject: [PATCH 03/11] Fix --- llama_cpp/llama_cpp.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index c9200c6..5442708 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -1008,15 +1008,24 @@ _lib.llama_token_to_str_bpe.argtypes = [llama_context_p, llama_token, c_char_p, _lib.llama_token_to_str_bpe.restype = c_int -# LLAMA_API const char * llama_token_to_str_with_model( -# const struct llama_model * model, -# llama_token token); -def llama_token_to_str_with_model(model: llama_model_p, token: llama_token) -> bytes: - return _lib.llama_token_to_str_with_model(model, token) +# LLAMA_API int llama_token_to_str_with_model( +# const struct llama_model * model, +# llama_token token, +# char * buf, +# int length); +def llama_token_to_str_with_model( + model: llama_model_p, token: llama_token, buf: bytes, length: c_int +) -> int: + return _lib.llama_token_to_str_with_model(model, token, buf, length) -_lib.llama_token_to_str_with_model.argtypes = [llama_model_p, llama_token] -_lib.llama_token_to_str_with_model.restype = c_char_p +_lib.llama_token_to_str_with_model.argtypes = [ + llama_model_p, + llama_token, + c_char_p, + c_int, +] +_lib.llama_token_to_str_with_model.restype = c_int # // From 3674e5ed4ef2f0271b30eb92d982d8b7524d2f34 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 24 Aug 2023 01:01:20 -0400 Subject: [PATCH 04/11] Update model path --- tests/test_llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_llama.py b/tests/test_llama.py index 941287d..9701321 100644 --- a/tests/test_llama.py +++ b/tests/test_llama.py @@ -1,6 +1,6 @@ import llama_cpp -MODEL = "./vendor/llama.cpp/models/ggml-vocab.bin" +MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf" def test_llama(): From c2d1deaa8a759518eb9a8486e44ae7a6059d131d Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 24 Aug 2023 18:01:42 -0400 Subject: [PATCH 05/11] Update llama.cpp --- llama_cpp/llama_cpp.py | 52 ++++++++---------------------------------- vendor/llama.cpp | 2 +- 2 files changed, 11 insertions(+), 43 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 5442708..62ddbf4 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -531,6 +531,15 @@ _lib.llama_n_embd.argtypes = [llama_context_p] _lib.llama_n_embd.restype = c_int +# LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_context * ctx); +def llama_vocab_type(ctx: llama_context_p) -> int: + return _lib.llama_vocab_type(ctx) + + +_lib.llama_vocab_type.argtypes = [llama_context_p] +_lib.llama_vocab_type.restype = c_int + + # LLAMA_API int llama_model_n_vocab(const struct llama_model * model); def llama_model_n_vocab(model: llama_model_p) -> int: return _lib.llama_model_n_vocab(model) @@ -849,7 +858,7 @@ _lib.llama_token_get_score.argtypes = [llama_context_p, llama_token] _lib.llama_token_get_score.restype = c_float -# LLAMA_API llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token); +# LLAMA_API enum llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token); def llama_token_get_type(ctx: llama_context_p, token: llama_token) -> int: return _lib.llama_token_get_type(ctx, token) @@ -918,32 +927,6 @@ _lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, _lib.llama_tokenize.restype = c_int -# LLAMA_API int llama_tokenize_bpe( -# struct llama_context * ctx, -# const char * text, -# llama_token * tokens, -# int n_max_tokens, -# bool add_bos); -def llama_tokenize_bpe( - ctx: llama_context_p, - text: bytes, - tokens, # type: Array[llama_token] - n_max_tokens: c_int, - add_bos: c_bool, -) -> int: - return _lib.llama_tokenize_bpe(ctx, text, tokens, n_max_tokens, add_bos) - - -_lib.llama_tokenize_bpe.argtypes = [ - llama_context_p, - c_char_p, - llama_token_p, - c_int, - c_bool, -] -_lib.llama_tokenize_bpe.restype = c_int - - # LLAMA_API int llama_tokenize_with_model( # const struct llama_model * model, # const char * text, @@ -993,21 +976,6 @@ _lib.llama_tokenize_with_model.argtypes = [ _lib.llama_tokenize_with_model.restype = c_int -# LLAMA_API int llama_token_to_str_bpe( -# const struct llama_context * ctx, -# llama_token token, -# char * buf, -# int length); -def llama_token_to_str_bpe( - ctx: llama_context_p, token: llama_token, buf: bytes, length: c_int -) -> int: - return _lib.llama_token_to_str_bpe(ctx, token, buf, length) - - -_lib.llama_token_to_str_bpe.argtypes = [llama_context_p, llama_token, c_char_p, c_int] -_lib.llama_token_to_str_bpe.restype = c_int - - # LLAMA_API int llama_token_to_str_with_model( # const struct llama_model * model, # llama_token token, diff --git a/vendor/llama.cpp b/vendor/llama.cpp index f5fe98d..2e5f70a 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit f5fe98d11bdf9e7797bcfb05c0c3601ffc4b9d26 +Subproject commit 2e5f70a25fc4576e9ed78603fe493eb7702c37a3 From 8ac59465b9b5c39fb6cc833dc5c3319664e60ec0 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 04:56:48 -0400 Subject: [PATCH 06/11] Strip leading space when de-tokenizing. --- llama_cpp/llama.py | 10 +++++----- tests/test_llama.py | 20 ++++++++++++++++---- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index d6fd830..b8f76e9 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -445,17 +445,17 @@ class Llama: """ assert self.ctx is not None output = b"" - buffer_size = 32 + buffer_size = 8 buffer = (ctypes.c_char * buffer_size)() for token in tokens: - if token == llama_cpp.llama_token_bos(self.ctx): - continue n = llama_cpp.llama_token_to_str( self.ctx, llama_cpp.llama_token(token), buffer, buffer_size ) assert n <= buffer_size output += bytes(buffer[:n]) - return output + # NOTE: Llama1 models automatically added a space at the start of the prompt + # this line removes a leading space if the first token is a beginning of sentence token + return output[1:] if len(tokens) > 0 and tokens[0] == self.token_bos() else output def set_cache(self, cache: Optional[BaseLlamaCache]): """Set the cache. @@ -886,7 +886,7 @@ class Llama: created: int = int(time.time()) completion_tokens: List[int] = [] # Add blank space to start of prompt to match OG llama tokenizer - prompt_tokens: List[int] = self.tokenize(b" " + prompt.encode("utf-8")) + prompt_tokens: List[int] = self.tokenize(prompt.encode("utf-8")) if prompt != "" else [self.token_bos()] text: bytes = b"" returned_tokens: int = 0 stop = ( diff --git a/tests/test_llama.py b/tests/test_llama.py index 9701321..c240122 100644 --- a/tests/test_llama.py +++ b/tests/test_llama.py @@ -1,20 +1,32 @@ +import pytest import llama_cpp MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf" -def test_llama(): - llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) +def test_llama_cpp_tokenization(): + llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False) assert llama assert llama.ctx is not None text = b"Hello World" - assert llama.detokenize(llama.tokenize(text)) == text + tokens = llama.tokenize(text) + assert tokens[0] == llama.token_bos() + assert tokens == [1, 15043, 2787] + detokenized = llama.detokenize(tokens) + assert detokenized == text + + tokens = llama.tokenize(text, add_bos=False) + assert tokens[0] != llama.token_bos() + assert tokens == [15043, 2787] + + detokenized = llama.detokenize(tokens) + assert detokenized != text -# @pytest.mark.skip(reason="need to update sample mocking") +@pytest.mark.skip(reason="bug in tokenization where leading space is always inserted even if not after eos") def test_llama_patch(monkeypatch): llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True) n_vocab = llama_cpp.llama_n_vocab(llama.ctx) From 80389f71da6dc8688af0859c6b93812821abb181 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 05:02:48 -0400 Subject: [PATCH 07/11] Update README --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index 7c515d0..0901b63 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,9 @@ This package provides: Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest). +> [!WARNING] +> Starting with version 0.1.79 the model format has changed from `ggmlv3` to `gguf` + ## Installation from PyPI (recommended) From 48cf43b4270a95ac875fc2ffc24bb28196ac3014 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 13:43:16 -0400 Subject: [PATCH 08/11] Use _with_model variants for tokenization --- llama_cpp/llama.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index b8f76e9..fd3e2c4 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -408,11 +408,11 @@ class Llama: Returns: A list of tokens. """ - assert self.ctx is not None + assert self.model is not None n_ctx = self._n_ctx tokens = (llama_cpp.llama_token * n_ctx)() - n_tokens = llama_cpp.llama_tokenize( - self.ctx, + n_tokens = llama_cpp.llama_tokenize_with_model( + self.model, text, tokens, llama_cpp.c_int(n_ctx), @@ -421,8 +421,8 @@ class Llama: if n_tokens < 0: n_tokens = abs(n_tokens) tokens = (llama_cpp.llama_token * n_tokens)() - n_tokens = llama_cpp.llama_tokenize( - self.ctx, + n_tokens = llama_cpp.llama_tokenize_with_model( + self.model, text, tokens, llama_cpp.c_int(n_tokens), @@ -443,15 +443,15 @@ class Llama: Returns: The detokenized string. """ - assert self.ctx is not None + assert self.model is not None output = b"" - buffer_size = 8 - buffer = (ctypes.c_char * buffer_size)() + size = 8 + buffer = (ctypes.c_char * size)() for token in tokens: - n = llama_cpp.llama_token_to_str( - self.ctx, llama_cpp.llama_token(token), buffer, buffer_size + n = llama_cpp.llama_token_to_str_with_model( + self.model, llama_cpp.llama_token(token), buffer, size ) - assert n <= buffer_size + assert n <= size output += bytes(buffer[:n]) # NOTE: Llama1 models automatically added a space at the start of the prompt # this line removes a leading space if the first token is a beginning of sentence token From c8a7637978436538d26a5d7d79b8c8e3c3fab4da Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 14:35:27 -0400 Subject: [PATCH 09/11] Ignore vendor directory for tests --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 8735b60..11d38b4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,6 +35,9 @@ scikit-build = "0.17.6" [tool.poetry.extras] server = ["uvicorn", "fastapi", "pydantic-settings", "sse-starlette"] +[tool.pytest.ini_options] +addopts = "--ignore=vendor" + [build-system] requires = [ "setuptools>=42", From ef23d1e545a1db51a6fe110d1f6b1317374a7de3 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 14:35:53 -0400 Subject: [PATCH 10/11] Update llama.cpp --- llama_cpp/llama_cpp.py | 98 +++++++++++++++++++++++++++++++++++++++--- vendor/llama.cpp | 2 +- 2 files changed, 94 insertions(+), 6 deletions(-) diff --git a/llama_cpp/llama_cpp.py b/llama_cpp/llama_cpp.py index 62ddbf4..1731878 100644 --- a/llama_cpp/llama_cpp.py +++ b/llama_cpp/llama_cpp.py @@ -568,13 +568,33 @@ _lib.llama_model_n_embd.restype = c_int # // Get a string describing the model type -# LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size); -def llama_model_type(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int: - return _lib.llama_model_type(model, buf, buf_size) +# LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size); +def llama_model_desc(model: llama_model_p, buf: bytes, buf_size: c_size_t) -> int: + return _lib.llama_model_desc(model, buf, buf_size) -_lib.llama_model_type.argtypes = [llama_model_p, c_char_p, c_size_t] -_lib.llama_model_type.restype = c_int +_lib.llama_model_desc.argtypes = [llama_model_p, c_char_p, c_size_t] +_lib.llama_model_desc.restype = c_int + + +# // Returns the total size of all the tensors in the model in bytes +# LLAMA_API uint64_t llama_model_size(const struct llama_model * model); +def llama_model_size(model: llama_model_p) -> int: + return _lib.llama_model_size(model) + + +_lib.llama_model_size.argtypes = [llama_model_p] +_lib.llama_model_size.restype = ctypes.c_uint64 + + +# // Returns the total number of parameters in the model +# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); +def llama_model_n_params(model: llama_model_p) -> int: + return _lib.llama_model_n_params(model) + + +_lib.llama_model_n_params.argtypes = [llama_model_p] +_lib.llama_model_n_params.restype = ctypes.c_uint64 # // Returns 0 on success @@ -1029,6 +1049,74 @@ def llama_grammar_free(grammar: llama_grammar_p): _lib.llama_grammar_free.argtypes = [llama_grammar_p] _lib.llama_grammar_free.restype = None +# // +# // Beam search +# // + + +# struct llama_beam_view { +# const llama_token * tokens; +# size_t n_tokens; +# float p; // Cumulative beam probability (renormalized relative to all beams) +# bool eob; // Callback should set this to true when a beam is at end-of-beam. +# }; +class llama_beam_view(ctypes.Structure): + _fields_ = [ + ("tokens", llama_token_p), + ("n_tokens", c_size_t), + ("p", c_float), + ("eob", c_bool), + ] + + +# // Passed to beam_search_callback function. +# // Whenever 0 < common_prefix_length, this number of tokens should be copied from any of the beams +# // (e.g. beams[0]) as they will be removed (shifted) from all beams in all subsequent callbacks. +# // These pointers are valid only during the synchronous callback, so should not be saved. +# struct llama_beams_state { +# struct llama_beam_view * beam_views; +# size_t n_beams; // Number of elements in beam_views[]. +# size_t common_prefix_length; // Current max length of prefix tokens shared by all beams. +# bool last_call; // True iff this is the last callback invocation. +# }; +class llama_beams_state(ctypes.Structure): + _fields_ = [ + ("beam_views", POINTER(llama_beam_view)), + ("n_beams", c_size_t), + ("common_prefix_length", c_size_t), + ("last_call", c_bool), + ] + + +# // Type of pointer to the beam_search_callback function. +# // void* callback_data is any custom data passed to llama_beam_search, that is subsequently +# // passed back to beam_search_callback. This avoids having to use global variables in the callback. +# typedef void (*llama_beam_search_callback_fn_t)(void * callback_data, llama_beams_state); +llama_beam_search_callback_fn_t = ctypes.CFUNCTYPE(None, c_void_p, llama_beams_state) + + +# /// @details Deterministically returns entire sentence constructed by a beam search. +# /// @param ctx Pointer to the llama_context. +# /// @param callback Invoked for each iteration of the beam_search loop, passing in beams_state. +# /// @param callback_data A pointer that is simply passed back to callback. +# /// @param n_beams Number of beams to use. +# /// @param n_past Number of tokens already evaluated. +# /// @param n_predict Maximum number of tokens to predict. EOS may occur earlier. +# /// @param n_threads Number of threads as passed to llama_eval(). +# LLAMA_API void llama_beam_search(struct llama_context * ctx, llama_beam_search_callback_fn_t callback, void * callback_data, size_t n_beams, int n_past, int n_predict, int n_threads); +def llama_beam_search( + ctx: llama_context_p, + callback: "ctypes._CFuncPtr[None, c_void_p, llama_beams_state]", # type: ignore + callback_data: c_void_p, + n_beams: c_size_t, + n_past: c_int, + n_predict: c_int, + n_threads: c_int, +): + return _lib.llama_beam_search( + ctx, callback, callback_data, n_beams, n_past, n_predict, n_threads + ) + # // # // Sampling functions diff --git a/vendor/llama.cpp b/vendor/llama.cpp index 2e5f70a..232caf3 160000 --- a/vendor/llama.cpp +++ b/vendor/llama.cpp @@ -1 +1 @@ -Subproject commit 2e5f70a25fc4576e9ed78603fe493eb7702c37a3 +Subproject commit 232caf3c1581a6cb023571780ff41dc2d66d1ca0 From ac37ea562bb9286bd222e5bd83e11d34f91256b1 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 25 Aug 2023 15:11:08 -0400 Subject: [PATCH 11/11] Add temporary docs for GGUF model conversion --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0901b63..b9d72f9 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ This package provides: Documentation is available at [https://llama-cpp-python.readthedocs.io/en/latest](https://llama-cpp-python.readthedocs.io/en/latest). > [!WARNING] -> Starting with version 0.1.79 the model format has changed from `ggmlv3` to `gguf` +> Starting with version 0.1.79 the model format has changed from `ggmlv3` to `gguf`. Old model files can be converted using the `convert-llama-ggmlv3-to-gguf.py` script in [`llama.cpp`](https://github.com/ggerganov/llama.cpp) ## Installation from PyPI (recommended)