2023-04-03 01:50:13 +00:00
import sys
import os
2023-03-23 09:33:06 +00:00
import ctypes
2023-04-11 15:59:03 +00:00
from ctypes import (
c_int ,
c_float ,
c_char_p ,
c_void_p ,
c_bool ,
POINTER ,
2023-05-05 17:54:22 +00:00
_Pointer , # type: ignore
2023-04-11 15:59:03 +00:00
Structure ,
Array ,
c_uint8 ,
c_size_t ,
)
2023-03-23 09:33:06 +00:00
import pathlib
2023-04-11 15:59:03 +00:00
2023-03-23 09:33:06 +00:00
# Load the library
2023-05-05 16:22:27 +00:00
def _load_shared_library ( lib_base_name : str ) :
2023-04-03 01:50:13 +00:00
# Determine the file extension based on the platform
if sys . platform . startswith ( " linux " ) :
lib_ext = " .so "
elif sys . platform == " darwin " :
2023-04-08 06:45:21 +00:00
lib_ext = " .so "
2023-04-03 01:50:13 +00:00
elif sys . platform == " win32 " :
lib_ext = " .dll "
else :
raise RuntimeError ( " Unsupported platform " )
# Construct the paths to the possible shared library names
_base_path = pathlib . Path ( __file__ ) . parent . resolve ( )
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths = [
_base_path / f " lib { lib_base_name } { lib_ext } " ,
2023-04-11 15:59:03 +00:00
_base_path / f " { lib_base_name } { lib_ext } " ,
2023-04-03 01:50:13 +00:00
]
2023-04-11 15:59:03 +00:00
if " LLAMA_CPP_LIB " in os . environ :
2023-04-10 15:27:17 +00:00
lib_base_name = os . environ [ " LLAMA_CPP_LIB " ]
2023-04-10 15:12:25 +00:00
_lib = pathlib . Path ( lib_base_name )
_base_path = _lib . parent . resolve ( )
_lib_paths = [ _lib . resolve ( ) ]
2023-04-10 15:00:35 +00:00
2023-04-03 01:50:13 +00:00
# Add the library directory to the DLL search path on Windows (if needed)
if sys . platform == " win32 " and sys . version_info > = ( 3 , 8 ) :
os . add_dll_directory ( str ( _base_path ) )
# Try to load the shared library, handling potential errors
for _lib_path in _lib_paths :
if _lib_path . exists ( ) :
try :
2023-05-15 02:08:11 +00:00
return ctypes . CDLL ( str ( _lib_path ) , winmode = 0 )
2023-04-03 01:50:13 +00:00
except Exception as e :
raise RuntimeError ( f " Failed to load shared library ' { _lib_path } ' : { e } " )
2023-04-11 15:59:03 +00:00
raise FileNotFoundError (
f " Shared library with base name ' { lib_base_name } ' not found "
)
2023-04-03 01:50:13 +00:00
# Specify the base name of the shared library to load
2023-04-03 17:06:50 +00:00
_lib_base_name = " llama "
2023-04-03 01:50:13 +00:00
# Load the library
2023-04-03 17:06:50 +00:00
_lib = _load_shared_library ( _lib_base_name )
2023-03-23 09:33:06 +00:00
# C types
2023-05-14 04:04:22 +00:00
LLAMA_FILE_VERSION = c_int ( 2 )
2023-05-01 14:44:28 +00:00
LLAMA_FILE_MAGIC = b " ggjt "
LLAMA_FILE_MAGIC_UNVERSIONED = b " ggml "
LLAMA_SESSION_MAGIC = b " ggsn "
2023-05-05 18:05:31 +00:00
LLAMA_SESSION_VERSION = c_int ( 1 )
2023-05-01 14:44:28 +00:00
2023-03-24 18:58:42 +00:00
llama_context_p = c_void_p
2023-03-23 09:33:06 +00:00
llama_token = c_int
llama_token_p = POINTER ( llama_token )
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
class llama_token_data ( Structure ) :
_fields_ = [
2023-03-24 18:35:41 +00:00
( " id " , llama_token ) , # token id
2023-05-01 14:44:28 +00:00
( " logit " , c_float ) , # log-odds of the token
2023-03-24 18:35:41 +00:00
( " p " , c_float ) , # probability of the token
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
llama_token_data_p = POINTER ( llama_token_data )
2023-05-01 14:44:28 +00:00
class llama_token_data_array ( Structure ) :
_fields_ = [
( " data " , llama_token_data_p ) ,
( " size " , c_size_t ) ,
( " sorted " , c_bool ) ,
]
llama_token_data_array_p = POINTER ( llama_token_data_array )
2023-03-29 01:10:23 +00:00
llama_progress_callback = ctypes . CFUNCTYPE ( None , c_float , c_void_p )
2023-03-24 18:35:41 +00:00
2023-03-25 20:26:03 +00:00
2023-03-23 09:33:06 +00:00
class llama_context_params ( Structure ) :
_fields_ = [
2023-03-24 18:35:41 +00:00
( " n_ctx " , c_int ) , # text context
( " n_parts " , c_int ) , # -1 for default
2023-05-14 04:04:22 +00:00
( " n_gpu_layers " , c_int ) , # number of layers to store in VRAM
2023-03-24 18:35:41 +00:00
( " seed " , c_int ) , # RNG seed, 0 for random
( " f16_kv " , c_bool ) , # use fp16 for KV cache
(
" logits_all " ,
c_bool ,
) , # the llama_eval() call computes all logits, not just the last one
( " vocab_only " , c_bool ) , # only load the vocabulary, no weights
2023-04-10 02:01:33 +00:00
( " use_mmap " , c_bool ) , # use mmap if possible
2023-03-24 18:58:42 +00:00
( " use_mlock " , c_bool ) , # force system to keep model in RAM
( " embedding " , c_bool ) , # embedding mode only
2023-03-25 16:12:09 +00:00
# called with a progress value between 0 and 1, pass NULL to disable
( " progress_callback " , llama_progress_callback ) ,
# context pointer passed to the progress callback
( " progress_callback_user_data " , c_void_p ) ,
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
llama_context_params_p = POINTER ( llama_context_params )
2023-05-05 18:05:31 +00:00
LLAMA_FTYPE_ALL_F32 = c_int ( 0 )
LLAMA_FTYPE_MOSTLY_F16 = c_int ( 1 ) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0 = c_int ( 2 ) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = c_int ( 3 ) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = c_int (
2023-04-18 05:30:04 +00:00
4
) # tok_embeddings.weight and output.weight are F16
2023-05-14 04:04:22 +00:00
# LLAMA_FTYPE_MOSTLY_Q4_2 = c_int(5) # except 1d tensors
2023-05-05 18:05:31 +00:00
# LLAMA_FTYPE_MOSTYL_Q4_3 = c_int(6) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q8_0 = c_int ( 7 ) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_0 = c_int ( 8 ) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q5_1 = c_int ( 9 ) # except 1d tensors
2023-03-23 09:33:06 +00:00
2023-05-05 18:04:12 +00:00
# Misc
c_float_p = POINTER ( c_float )
c_uint8_p = POINTER ( c_uint8 )
c_size_t_p = POINTER ( c_size_t )
2023-03-24 18:58:42 +00:00
# Functions
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
def llama_context_default_params ( ) - > llama_context_params :
2023-03-29 01:10:23 +00:00
return _lib . llama_context_default_params ( )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_context_default_params . argtypes = [ ]
_lib . llama_context_default_params . restype = llama_context_params
2023-03-24 18:58:42 +00:00
2023-04-11 15:59:03 +00:00
2023-05-07 07:04:22 +00:00
def llama_mmap_supported ( ) - > bool :
2023-04-10 02:01:33 +00:00
return _lib . llama_mmap_supported ( )
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
_lib . llama_mmap_supported . argtypes = [ ]
_lib . llama_mmap_supported . restype = c_bool
2023-04-11 15:59:03 +00:00
2023-05-07 07:04:22 +00:00
def llama_mlock_supported ( ) - > bool :
2023-04-10 02:01:33 +00:00
return _lib . llama_mlock_supported ( )
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
_lib . llama_mlock_supported . argtypes = [ ]
_lib . llama_mlock_supported . restype = c_bool
2023-03-24 18:59:29 +00:00
2023-04-11 15:59:03 +00:00
2023-03-24 18:58:42 +00:00
# Various functions for loading a ggml llama model.
# Allocate (almost) all memory needed for the model.
# Return NULL on failure
2023-03-24 18:35:41 +00:00
def llama_init_from_file (
path_model : bytes , params : llama_context_params
) - > llama_context_p :
2023-03-24 22:43:29 +00:00
return _lib . llama_init_from_file ( path_model , params )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_init_from_file . argtypes = [ c_char_p , llama_context_params ]
_lib . llama_init_from_file . restype = llama_context_p
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-24 18:58:42 +00:00
# Frees all allocated memory
2023-03-23 09:33:06 +00:00
def llama_free ( ctx : llama_context_p ) :
2023-03-24 22:43:29 +00:00
_lib . llama_free ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_free . argtypes = [ llama_context_p ]
_lib . llama_free . restype = None
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-24 18:58:42 +00:00
# TODO: not great API - very likely to change
# Returns 0 on success
2023-04-21 21:40:27 +00:00
# nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given
2023-04-22 23:50:28 +00:00
def llama_model_quantize (
fname_inp : bytes , fname_out : bytes , ftype : c_int , nthread : c_int
) - > c_int :
2023-04-21 21:40:27 +00:00
return _lib . llama_model_quantize ( fname_inp , fname_out , ftype , nthread )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-04-21 21:40:27 +00:00
_lib . llama_model_quantize . argtypes = [ c_char_p , c_char_p , c_int , c_int ]
2023-03-24 22:43:29 +00:00
_lib . llama_model_quantize . restype = c_int
2023-03-24 18:58:42 +00:00
2023-04-11 15:59:03 +00:00
2023-04-18 05:30:04 +00:00
# Apply a LoRA adapter to a loaded model
# path_base_model is the path to a higher quality model to use as a base for
# the layers modified by the adapter. Can be NULL to use the current loaded model.
# The model needs to be reloaded before applying a new adapter, otherwise the adapter
# will be applied on top of the previous one
# Returns 0 on success
def llama_apply_lora_from_file (
2023-04-22 23:50:28 +00:00
ctx : llama_context_p ,
2023-05-05 18:05:31 +00:00
path_lora : c_char_p ,
path_base_model : c_char_p ,
2023-04-22 23:50:28 +00:00
n_threads : c_int ,
2023-04-18 05:30:04 +00:00
) - > c_int :
return _lib . llama_apply_lora_from_file ( ctx , path_lora , path_base_model , n_threads )
_lib . llama_apply_lora_from_file . argtypes = [ llama_context_p , c_char_p , c_char_p , c_int ]
_lib . llama_apply_lora_from_file . restype = c_int
2023-04-02 17:33:49 +00:00
# Returns the number of tokens in the KV cache
def llama_get_kv_cache_token_count ( ctx : llama_context_p ) - > c_int :
return _lib . llama_get_kv_cache_token_count ( ctx )
2023-04-11 15:59:03 +00:00
2023-04-02 17:33:49 +00:00
_lib . llama_get_kv_cache_token_count . argtypes = [ llama_context_p ]
_lib . llama_get_kv_cache_token_count . restype = c_int
2023-04-28 19:32:43 +00:00
2023-04-27 00:00:54 +00:00
# Sets the current rng seed.
def llama_set_rng_seed ( ctx : llama_context_p , seed : c_int ) :
return _lib . llama_set_rng_seed ( ctx , seed )
2023-04-28 19:32:43 +00:00
2023-04-27 00:00:54 +00:00
_lib . llama_set_rng_seed . argtypes = [ llama_context_p , c_int ]
_lib . llama_set_rng_seed . restype = None
2023-04-02 17:33:49 +00:00
2023-04-28 19:32:43 +00:00
2023-05-03 13:33:30 +00:00
# Returns the maximum size in bytes of the state (rng, logits, embedding
# and kv_cache) - will often be smaller after compacting tokens
2023-04-22 23:50:28 +00:00
def llama_get_state_size ( ctx : llama_context_p ) - > c_size_t :
return _lib . llama_get_state_size ( ctx )
_lib . llama_get_state_size . argtypes = [ llama_context_p ]
_lib . llama_get_state_size . restype = c_size_t
# Copies the state to the specified destination address.
# Destination needs to have allocated enough memory.
# Returns the number of bytes copied
2023-05-05 18:12:26 +00:00
def llama_copy_state_data (
2023-05-14 04:04:22 +00:00
ctx : llama_context_p , dst # type: Array[c_uint8]
2023-05-07 23:30:14 +00:00
) - > int :
2023-05-14 04:04:22 +00:00
return _lib . llama_copy_state_data ( ctx , dst )
2023-04-22 23:50:28 +00:00
2023-05-05 18:04:12 +00:00
_lib . llama_copy_state_data . argtypes = [ llama_context_p , c_uint8_p ]
2023-04-22 23:50:28 +00:00
_lib . llama_copy_state_data . restype = c_size_t
# Set the state reading from the specified address
# Returns the number of bytes read
2023-05-05 16:22:27 +00:00
def llama_set_state_data (
ctx : llama_context_p , src # type: Array[c_uint8]
2023-05-07 23:30:14 +00:00
) - > int :
2023-04-22 23:50:28 +00:00
return _lib . llama_set_state_data ( ctx , src )
2023-05-05 18:04:12 +00:00
_lib . llama_set_state_data . argtypes = [ llama_context_p , c_uint8_p ]
2023-04-22 23:50:28 +00:00
_lib . llama_set_state_data . restype = c_size_t
2023-04-28 19:32:43 +00:00
# Save/load session file
def llama_load_session_file (
ctx : llama_context_p ,
path_session : bytes ,
2023-05-05 18:12:26 +00:00
tokens_out , # type: Array[llama_token]
2023-04-28 19:32:43 +00:00
n_token_capacity : c_size_t ,
2023-05-05 18:12:26 +00:00
n_token_count_out , # type: _Pointer[c_size_t]
2023-04-28 19:32:43 +00:00
) - > c_size_t :
return _lib . llama_load_session_file (
ctx , path_session , tokens_out , n_token_capacity , n_token_count_out
)
_lib . llama_load_session_file . argtypes = [
llama_context_p ,
c_char_p ,
llama_token_p ,
c_size_t ,
2023-05-05 18:04:12 +00:00
c_size_t_p ,
2023-04-28 19:32:43 +00:00
]
_lib . llama_load_session_file . restype = c_size_t
def llama_save_session_file (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
path_session : bytes ,
2023-05-05 18:12:26 +00:00
tokens , # type: Array[llama_token]
2023-05-05 16:22:27 +00:00
n_token_count : c_size_t ,
2023-04-28 19:32:43 +00:00
) - > c_size_t :
return _lib . llama_save_session_file ( ctx , path_session , tokens , n_token_count )
_lib . llama_save_session_file . argtypes = [
llama_context_p ,
c_char_p ,
llama_token_p ,
c_size_t ,
]
_lib . llama_save_session_file . restype = c_size_t
2023-03-24 18:58:42 +00:00
# Run the llama inference to obtain the logits and probabilities for the next token.
# tokens + n_tokens is the provided batch of new tokens to process
# n_past is the number of tokens to use from previous eval calls
# Returns 0 on success
2023-03-24 18:35:41 +00:00
def llama_eval (
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
tokens , # type: Array[llama_token]
2023-03-24 18:35:41 +00:00
n_tokens : c_int ,
n_past : c_int ,
n_threads : c_int ,
) - > c_int :
2023-03-24 22:43:29 +00:00
return _lib . llama_eval ( ctx , tokens , n_tokens , n_past , n_threads )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_eval . argtypes = [ llama_context_p , llama_token_p , c_int , c_int , c_int ]
_lib . llama_eval . restype = c_int
2023-03-24 18:58:42 +00:00
# Convert the provided text into tokens.
# The tokens pointer must be large enough to hold the resulting tokens.
# Returns the number of tokens on success, no more than n_max_tokens
# Returns a negative number on failure - the number of tokens that would have been returned
# TODO: not sure if correct
2023-03-24 18:35:41 +00:00
def llama_tokenize (
ctx : llama_context_p ,
text : bytes ,
2023-05-05 18:12:26 +00:00
tokens , # type: Array[llama_token]
2023-03-24 18:35:41 +00:00
n_max_tokens : c_int ,
add_bos : c_bool ,
2023-05-12 18:28:22 +00:00
) - > int :
2023-03-24 22:43:29 +00:00
return _lib . llama_tokenize ( ctx , text , tokens , n_max_tokens , add_bos )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_tokenize . argtypes = [ llama_context_p , c_char_p , llama_token_p , c_int , c_bool ]
_lib . llama_tokenize . restype = c_int
2023-03-24 18:58:42 +00:00
2023-03-23 09:33:06 +00:00
def llama_n_vocab ( ctx : llama_context_p ) - > c_int :
2023-03-24 22:43:29 +00:00
return _lib . llama_n_vocab ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_n_vocab . argtypes = [ llama_context_p ]
_lib . llama_n_vocab . restype = c_int
2023-03-24 18:58:42 +00:00
2023-03-23 09:33:06 +00:00
def llama_n_ctx ( ctx : llama_context_p ) - > c_int :
2023-03-24 22:43:29 +00:00
return _lib . llama_n_ctx ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_n_ctx . argtypes = [ llama_context_p ]
_lib . llama_n_ctx . restype = c_int
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-25 20:26:03 +00:00
def llama_n_embd ( ctx : llama_context_p ) - > c_int :
2023-04-08 19:05:33 +00:00
return _lib . llama_n_embd ( ctx )
2023-03-25 20:26:03 +00:00
_lib . llama_n_embd . argtypes = [ llama_context_p ]
_lib . llama_n_embd . restype = c_int
2023-03-24 18:58:42 +00:00
# Token logits obtained from the last call to llama_eval()
# The logits for the last token are stored in the last row
# Can be mutated in order to change the probabilities of the next token
# Rows: n_tokens
# Cols: n_vocab
2023-05-07 07:04:22 +00:00
def llama_get_logits (
ctx : llama_context_p ,
) : # type: (...) -> Array[float] # type: ignore
2023-03-24 22:43:29 +00:00
return _lib . llama_get_logits ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_get_logits . argtypes = [ llama_context_p ]
2023-05-05 18:04:12 +00:00
_lib . llama_get_logits . restype = c_float_p
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-24 18:58:42 +00:00
# Get the embeddings for the input
# shape: [n_embd] (1-dimensional)
2023-05-07 07:04:22 +00:00
def llama_get_embeddings (
ctx : llama_context_p ,
) : # type: (...) -> Array[float] # type: ignore
2023-03-24 22:43:29 +00:00
return _lib . llama_get_embeddings ( ctx )
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_get_embeddings . argtypes = [ llama_context_p ]
2023-05-05 18:04:12 +00:00
_lib . llama_get_embeddings . restype = c_float_p
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2023-03-24 18:58:42 +00:00
# Token Id -> String. Uses the vocabulary in the provided context
2023-03-31 07:25:12 +00:00
def llama_token_to_str ( ctx : llama_context_p , token : llama_token ) - > bytes :
2023-03-24 22:43:29 +00:00
return _lib . llama_token_to_str ( ctx , token )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_token_to_str . argtypes = [ llama_context_p , llama_token ]
_lib . llama_token_to_str . restype = c_char_p
2023-03-24 18:58:42 +00:00
# Special tokens
2023-03-24 18:59:29 +00:00
2023-03-23 09:33:06 +00:00
def llama_token_bos ( ) - > llama_token :
2023-03-24 22:43:29 +00:00
return _lib . llama_token_bos ( )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_token_bos . argtypes = [ ]
_lib . llama_token_bos . restype = llama_token
2023-03-24 18:58:42 +00:00
2023-03-23 09:33:06 +00:00
def llama_token_eos ( ) - > llama_token :
2023-03-24 22:43:29 +00:00
return _lib . llama_token_eos ( )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_token_eos . argtypes = [ ]
_lib . llama_token_eos . restype = llama_token
2023-03-24 18:58:42 +00:00
2023-05-01 14:44:28 +00:00
def llama_token_nl ( ) - > llama_token :
return _lib . llama_token_nl ( )
_lib . llama_token_nl . argtypes = [ ]
_lib . llama_token_nl . restype = llama_token
# Sampling functions
2023-05-01 18:02:06 +00:00
# @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
2023-05-01 14:44:28 +00:00
def llama_sample_repetition_penalty (
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
last_tokens_data , # type: Array[llama_token]
2023-05-01 14:44:28 +00:00
last_tokens_size : c_int ,
penalty : c_float ,
2023-05-01 18:02:06 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_repetition_penalty (
ctx , candidates , last_tokens_data , last_tokens_size , penalty
)
_lib . llama_sample_repetition_penalty . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
llama_token_p ,
c_int ,
c_float ,
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_repetition_penalty . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
2023-05-01 14:44:28 +00:00
def llama_sample_frequency_and_presence_penalties (
2023-03-24 18:35:41 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
last_tokens_data , # type: Array[llama_token]
2023-05-01 14:44:28 +00:00
last_tokens_size : c_int ,
alpha_frequency : c_float ,
alpha_presence : c_float ,
2023-05-01 18:02:06 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_frequency_and_presence_penalties (
ctx ,
candidates ,
last_tokens_data ,
last_tokens_size ,
alpha_frequency ,
alpha_presence ,
2023-03-24 18:35:41 +00:00
)
2023-03-23 09:33:06 +00:00
2023-05-01 14:44:28 +00:00
_lib . llama_sample_frequency_and_presence_penalties . argtypes = [
2023-03-24 18:58:42 +00:00
llama_context_p ,
2023-05-01 14:44:28 +00:00
llama_token_data_array_p ,
2023-03-24 18:58:42 +00:00
llama_token_p ,
c_int ,
2023-05-01 14:44:28 +00:00
c_float ,
c_float ,
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_frequency_and_presence_penalties . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
2023-05-05 18:12:26 +00:00
def llama_sample_softmax (
ctx : llama_context_p , candidates # type: _Pointer[llama_token_data]
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_softmax ( ctx , candidates )
_lib . llama_sample_softmax . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_softmax . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2023-05-01 18:47:55 +00:00
def llama_sample_top_k (
2023-05-05 17:54:22 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 17:54:22 +00:00
k : c_int ,
2023-05-07 04:12:47 +00:00
min_keep : c_size_t ,
2023-05-01 18:47:55 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_top_k ( ctx , candidates , k , min_keep )
_lib . llama_sample_top_k . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
c_int ,
2023-05-01 18:47:55 +00:00
c_size_t ,
2023-05-01 14:44:28 +00:00
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_top_k . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2023-05-01 18:47:55 +00:00
def llama_sample_top_p (
2023-05-05 17:54:22 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 17:54:22 +00:00
p : c_float ,
2023-05-07 04:12:47 +00:00
min_keep : c_size_t ,
2023-05-01 18:47:55 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_top_p ( ctx , candidates , p , min_keep )
_lib . llama_sample_top_p . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
c_float ,
2023-05-01 18:47:55 +00:00
c_size_t ,
2023-05-01 14:44:28 +00:00
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_top_p . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
2023-05-01 14:44:28 +00:00
def llama_sample_tail_free (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 16:22:27 +00:00
z : c_float ,
2023-05-07 04:12:47 +00:00
min_keep : c_size_t ,
2023-05-01 18:02:06 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_tail_free ( ctx , candidates , z , min_keep )
_lib . llama_sample_tail_free . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
c_float ,
2023-05-01 18:47:55 +00:00
c_size_t ,
2023-05-01 14:44:28 +00:00
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_tail_free . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
2023-05-01 18:47:55 +00:00
def llama_sample_typical (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 17:54:22 +00:00
p : c_float ,
2023-05-07 04:12:47 +00:00
min_keep : c_size_t ,
2023-05-01 18:47:55 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_typical ( ctx , candidates , p , min_keep )
_lib . llama_sample_typical . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
c_float ,
2023-05-01 18:47:55 +00:00
c_size_t ,
2023-05-01 14:44:28 +00:00
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_typical . restype = None
2023-05-01 14:44:28 +00:00
2023-05-05 16:22:27 +00:00
def llama_sample_temperature (
2023-05-05 18:12:26 +00:00
ctx : llama_context_p ,
candidates , # type: _Pointer[llama_token_data_array]
temp : c_float ,
2023-05-05 16:22:27 +00:00
) :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_temperature ( ctx , candidates , temp )
_lib . llama_sample_temperature . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
2023-03-29 01:10:23 +00:00
c_float ,
2023-05-01 14:44:28 +00:00
]
2023-05-01 18:02:06 +00:00
_lib . llama_sample_temperature . restype = None
2023-05-01 14:44:28 +00:00
2023-05-01 18:02:06 +00:00
# @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
# @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
# @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
# @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
# @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
# @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2023-05-01 14:44:28 +00:00
def llama_sample_token_mirostat (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 16:22:27 +00:00
tau : c_float ,
2023-05-05 17:54:22 +00:00
eta : c_float ,
2023-05-05 16:22:27 +00:00
m : c_int ,
2023-05-05 18:12:26 +00:00
mu , # type: _Pointer[c_float]
2023-05-01 14:44:28 +00:00
) - > llama_token :
return _lib . llama_sample_token_mirostat ( ctx , candidates , tau , eta , m , mu )
_lib . llama_sample_token_mirostat . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
c_float ,
c_float ,
c_int ,
2023-05-05 18:04:12 +00:00
c_float_p ,
2023-05-01 14:44:28 +00:00
]
_lib . llama_sample_token_mirostat . restype = llama_token
2023-05-01 18:02:06 +00:00
# @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
# @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
# @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
# @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
# @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2023-05-01 14:44:28 +00:00
def llama_sample_token_mirostat_v2 (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2023-05-05 18:12:26 +00:00
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 17:54:22 +00:00
tau : c_float ,
2023-05-05 16:22:27 +00:00
eta : c_float ,
2023-05-05 18:12:26 +00:00
mu , # type: _Pointer[c_float]
2023-05-01 14:44:28 +00:00
) - > llama_token :
return _lib . llama_sample_token_mirostat_v2 ( ctx , candidates , tau , eta , mu )
_lib . llama_sample_token_mirostat_v2 . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
2023-03-29 01:10:23 +00:00
c_float ,
c_float ,
2023-05-05 18:04:12 +00:00
c_float_p ,
2023-05-01 14:44:28 +00:00
]
_lib . llama_sample_token_mirostat_v2 . restype = llama_token
2023-05-01 18:02:06 +00:00
# @details Selects the token with the highest probability.
2023-05-05 16:22:27 +00:00
def llama_sample_token_greedy (
2023-05-05 18:12:26 +00:00
ctx : llama_context_p ,
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 16:22:27 +00:00
) - > llama_token :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_token_greedy ( ctx , candidates )
_lib . llama_sample_token_greedy . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
]
_lib . llama_sample_token_greedy . restype = llama_token
2023-05-01 18:02:06 +00:00
# @details Randomly selects a token from the candidates based on their probabilities.
2023-05-05 16:22:27 +00:00
def llama_sample_token (
2023-05-05 18:12:26 +00:00
ctx : llama_context_p ,
candidates , # type: _Pointer[llama_token_data_array]
2023-05-05 16:22:27 +00:00
) - > llama_token :
2023-05-01 14:44:28 +00:00
return _lib . llama_sample_token ( ctx , candidates )
_lib . llama_sample_token . argtypes = [
llama_context_p ,
llama_token_data_array_p ,
2023-03-24 18:58:42 +00:00
]
2023-05-01 14:44:28 +00:00
_lib . llama_sample_token . restype = llama_token
2023-03-24 18:58:42 +00:00
# Performance information
2023-03-24 18:59:29 +00:00
2023-03-23 09:33:06 +00:00
def llama_print_timings ( ctx : llama_context_p ) :
2023-03-24 22:43:29 +00:00
_lib . llama_print_timings ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_print_timings . argtypes = [ llama_context_p ]
_lib . llama_print_timings . restype = None
2023-03-24 18:58:42 +00:00
2023-03-23 09:33:06 +00:00
def llama_reset_timings ( ctx : llama_context_p ) :
2023-03-24 22:43:29 +00:00
_lib . llama_reset_timings ( ctx )
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_reset_timings . argtypes = [ llama_context_p ]
_lib . llama_reset_timings . restype = None
2023-03-24 18:58:42 +00:00
# Print system information
2023-03-23 09:33:06 +00:00
def llama_print_system_info ( ) - > bytes :
2023-03-24 22:43:29 +00:00
return _lib . llama_print_system_info ( )
2023-03-24 18:58:42 +00:00
2023-03-24 22:43:29 +00:00
_lib . llama_print_system_info . argtypes = [ ]
_lib . llama_print_system_info . restype = c_char_p