docs: Add docstrings from llama.cpp

This commit is contained in:
Andrei Betlen 2023-11-23 00:26:26 -05:00
parent 31cf0ec680
commit be1f64d569

View file

@ -272,6 +272,19 @@ llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p)
# llama_seq_id all_seq_id; // used if seq_id == NULL # llama_seq_id all_seq_id; // used if seq_id == NULL
# } llama_batch; # } llama_batch;
class llama_batch(Structure): class llama_batch(Structure):
"""Input data for llama_decode
A llama_batch object can contain input about one or many sequences
The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
Attributes:
token (ctypes.Array[llama_token]): the token ids of the input (used when embd is NULL)
embd (ctypes.Array[ctypes.c_float]): token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
pos (ctypes.Array[ctypes.Array[llama_pos]]): the positions of the respective token in the sequence
seq_id (ctypes.Array[ctypes.Array[llama_seq_id]]): the sequence to which the respective token belongs
"""
_fields_ = [ _fields_ = [
("n_tokens", c_int32), ("n_tokens", c_int32),
("token", POINTER(llama_token)), ("token", POINTER(llama_token)),
@ -368,6 +381,11 @@ class llama_context_params(Structure):
# // It might not exist for progress report where '.' is output repeatedly. # // It might not exist for progress report where '.' is output repeatedly.
# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); # typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p) llama_log_callback = ctypes.CFUNCTYPE(None, c_int, c_char_p, c_void_p)
"""Signature for logging events
Note that text includes the new line character at the end for most events.
If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
if it exists.
It might not exist for progress report where '.' is output repeatedly."""
# // model quantization parameters # // model quantization parameters
@ -501,6 +519,9 @@ _lib.llama_model_quantize_default_params.restype = llama_model_quantize_params
# // Call once at the start of the program # // Call once at the start of the program
# LLAMA_API void llama_backend_init(bool numa); # LLAMA_API void llama_backend_init(bool numa);
def llama_backend_init(numa: Union[c_bool, bool]): def llama_backend_init(numa: Union[c_bool, bool]):
"""Initialize the llama + ggml backend
If numa is true, use NUMA optimizations
Call once at the start of the program"""
return _lib.llama_backend_init(numa) return _lib.llama_backend_init(numa)
@ -511,6 +532,7 @@ _lib.llama_backend_init.restype = None
# // Call once at the end of the program - currently only used for MPI # // Call once at the end of the program - currently only used for MPI
# LLAMA_API void llama_backend_free(void); # LLAMA_API void llama_backend_free(void);
def llama_backend_free(): def llama_backend_free():
"""Call once at the end of the program - currently only used for MPI"""
return _lib.llama_backend_free() return _lib.llama_backend_free()
@ -556,6 +578,7 @@ _lib.llama_new_context_with_model.restype = llama_context_p
# // Frees all allocated memory # // Frees all allocated memory
# LLAMA_API void llama_free(struct llama_context * ctx); # LLAMA_API void llama_free(struct llama_context * ctx);
def llama_free(ctx: llama_context_p): def llama_free(ctx: llama_context_p):
"""Frees all allocated memory"""
return _lib.llama_free(ctx) return _lib.llama_free(ctx)
@ -656,6 +679,7 @@ _lib.llama_n_embd.restype = c_int
# // Get the model's RoPE frequency scaling factor # // Get the model's RoPE frequency scaling factor
# LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model); # LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
def llama_rope_freq_scale_train(model: llama_model_p) -> float: def llama_rope_freq_scale_train(model: llama_model_p) -> float:
"""Get the model's RoPE frequency scaling factor"""
return _lib.llama_rope_freq_scale_train(model) return _lib.llama_rope_freq_scale_train(model)
@ -673,6 +697,7 @@ _lib.llama_rope_freq_scale_train.restype = c_float
def llama_model_meta_val_str( def llama_model_meta_val_str(
model: llama_model_p, key: Union[c_char_p, bytes], buf: bytes, buf_size: int model: llama_model_p, key: Union[c_char_p, bytes], buf: bytes, buf_size: int
) -> int: ) -> int:
"""Get metadata value as a string by key name"""
return _lib.llama_model_meta_val_str(model, key, buf, buf_size) return _lib.llama_model_meta_val_str(model, key, buf, buf_size)
@ -683,6 +708,7 @@ _lib.llama_model_meta_val_str.restype = c_int
# // Get the number of metadata key/value pairs # // Get the number of metadata key/value pairs
# LLAMA_API int llama_model_meta_count(const struct llama_model * model); # LLAMA_API int llama_model_meta_count(const struct llama_model * model);
def llama_model_meta_count(model: llama_model_p) -> int: def llama_model_meta_count(model: llama_model_p) -> int:
"""Get the number of metadata key/value pairs"""
return _lib.llama_model_meta_count(model) return _lib.llama_model_meta_count(model)
@ -695,6 +721,7 @@ _lib.llama_model_meta_count.restype = c_int
def llama_model_meta_key_by_index( def llama_model_meta_key_by_index(
model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int
) -> int: ) -> int:
"""Get metadata key name by index"""
return _lib.llama_model_meta_key_by_index(model, i, buf, buf_size) return _lib.llama_model_meta_key_by_index(model, i, buf, buf_size)
@ -707,6 +734,7 @@ _lib.llama_model_meta_key_by_index.restype = c_int
def llama_model_meta_val_str_by_index( def llama_model_meta_val_str_by_index(
model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int
) -> int: ) -> int:
"""Get metadata value as a string by index"""
return _lib.llama_model_meta_val_str_by_index(model, i, buf, buf_size) return _lib.llama_model_meta_val_str_by_index(model, i, buf, buf_size)
@ -724,6 +752,7 @@ _lib.llama_model_meta_val_str_by_index.restype = c_int
def llama_model_desc( def llama_model_desc(
model: llama_model_p, buf: bytes, buf_size: Union[c_size_t, int] model: llama_model_p, buf: bytes, buf_size: Union[c_size_t, int]
) -> int: ) -> int:
"""Get a string describing the model type"""
return _lib.llama_model_desc(model, buf, buf_size) return _lib.llama_model_desc(model, buf, buf_size)
@ -734,6 +763,7 @@ _lib.llama_model_desc.restype = c_int
# // Returns the total size of all the tensors in the model in bytes # // Returns the total size of all the tensors in the model in bytes
# LLAMA_API uint64_t llama_model_size(const struct llama_model * model); # LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
def llama_model_size(model: llama_model_p) -> int: def llama_model_size(model: llama_model_p) -> int:
"""Returns the total size of all the tensors in the model in bytes"""
return _lib.llama_model_size(model) return _lib.llama_model_size(model)
@ -744,6 +774,7 @@ _lib.llama_model_size.restype = ctypes.c_uint64
# // Returns the total number of parameters in the model # // Returns the total number of parameters in the model
# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model); # LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
def llama_model_n_params(model: llama_model_p) -> int: def llama_model_n_params(model: llama_model_p) -> int:
"""Returns the total number of parameters in the model"""
return _lib.llama_model_n_params(model) return _lib.llama_model_n_params(model)
@ -756,6 +787,7 @@ _lib.llama_model_n_params.restype = ctypes.c_uint64
def llama_get_model_tensor( def llama_get_model_tensor(
model: llama_model_p, name: Union[c_char_p, bytes] model: llama_model_p, name: Union[c_char_p, bytes]
) -> c_void_p: ) -> c_void_p:
"""Get a llama model tensor"""
return _lib.llama_get_model_tensor(model, name) return _lib.llama_get_model_tensor(model, name)
@ -773,6 +805,7 @@ def llama_model_quantize(
fname_out: bytes, fname_out: bytes,
params, # type: POINTER(llama_model_quantize_params) # type: ignore params, # type: POINTER(llama_model_quantize_params) # type: ignore
) -> int: ) -> int:
"""Returns 0 on success"""
return _lib.llama_model_quantize(fname_inp, fname_out, params) return _lib.llama_model_quantize(fname_inp, fname_out, params)
@ -804,6 +837,12 @@ def llama_apply_lora_from_file(
path_base_model: Union[c_char_p, bytes], path_base_model: Union[c_char_p, bytes],
n_threads: Union[c_int, int], n_threads: Union[c_int, int],
) -> int: ) -> int:
"""Apply a LoRA adapter to a loaded model
path_base_model is the path to a higher quality model to use as a base for
the layers modified by the adapter. Can be NULL to use the current loaded model.
The model needs to be reloaded before applying a new adapter, otherwise the adapter
will be applied on top of the previous one
Returns 0 on success"""
return _lib.llama_apply_lora_from_file( return _lib.llama_apply_lora_from_file(
ctx, path_lora, scale, path_base_model, n_threads ctx, path_lora, scale, path_base_model, n_threads
) )
@ -855,6 +894,7 @@ _lib.llama_model_apply_lora_from_file.restype = c_int
# LLAMA_API DEPRECATED(int llama_get_kv_cache_token_count(const struct llama_context * ctx), # LLAMA_API DEPRECATED(int llama_get_kv_cache_token_count(const struct llama_context * ctx),
# "avoid using this, it will be removed in the future, instead - count the tokens in user code"); # "avoid using this, it will be removed in the future, instead - count the tokens in user code");
def llama_get_kv_cache_token_count(ctx: llama_context_p) -> int: def llama_get_kv_cache_token_count(ctx: llama_context_p) -> int:
"""Returns the number of tokens in the KV cache"""
return _lib.llama_get_kv_cache_token_count(ctx) return _lib.llama_get_kv_cache_token_count(ctx)
@ -866,6 +906,7 @@ _lib.llama_get_kv_cache_token_count.restype = c_int
# LLAMA_API void llama_kv_cache_clear( # LLAMA_API void llama_kv_cache_clear(
# struct llama_context * ctx); # struct llama_context * ctx);
def llama_kv_cache_clear(ctx: llama_context_p): def llama_kv_cache_clear(ctx: llama_context_p):
"""Clear the KV cache"""
return _lib.llama_kv_cache_clear(ctx) return _lib.llama_kv_cache_clear(ctx)
@ -888,6 +929,10 @@ def llama_kv_cache_seq_rm(
p0: Union[llama_pos, int], p0: Union[llama_pos, int],
p1: Union[llama_pos, int], p1: Union[llama_pos, int],
): ):
"""Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
seq_id < 0 : match any sequence
p0 < 0 : [0, p1]
p1 < 0 : [p0, inf)"""
return _lib.llama_kv_cache_seq_rm(ctx, seq_id, p0, p1) return _lib.llama_kv_cache_seq_rm(ctx, seq_id, p0, p1)
@ -917,6 +962,10 @@ def llama_kv_cache_seq_cp(
p0: Union[llama_pos, int], p0: Union[llama_pos, int],
p1: Union[llama_pos, int], p1: Union[llama_pos, int],
): ):
"""Copy all tokens that belong to the specified sequence to another sequence
Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
p0 < 0 : [0, p1]
p1 < 0 : [p0, inf)"""
return _lib.llama_kv_cache_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1) return _lib.llama_kv_cache_seq_cp(ctx, seq_id_src, seq_id_dst, p0, p1)
@ -938,6 +987,7 @@ def llama_kv_cache_seq_keep(
ctx: llama_context_p, ctx: llama_context_p,
seq_id: Union[llama_seq_id, int], seq_id: Union[llama_seq_id, int],
): ):
"""Removes all tokens that do not belong to the specified sequence"""
return _lib.llama_kv_cache_seq_keep(ctx, seq_id) return _lib.llama_kv_cache_seq_keep(ctx, seq_id)
@ -962,6 +1012,10 @@ def llama_kv_cache_seq_shift(
p1: Union[llama_pos, int], p1: Union[llama_pos, int],
delta: Union[llama_pos, int], delta: Union[llama_pos, int],
): ):
"""Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
If the KV cache is RoPEd, the KV data is updated accordingly
p0 < 0 : [0, p1]
p1 < 0 : [p0, inf)"""
return _lib.llama_kv_cache_seq_shift(ctx, seq_id, p0, p1, delta) return _lib.llama_kv_cache_seq_shift(ctx, seq_id, p0, p1, delta)
@ -983,6 +1037,8 @@ _lib.llama_kv_cache_seq_shift.restype = None
# and kv_cache) - will often be smaller after compacting tokens # and kv_cache) - will often be smaller after compacting tokens
# LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx); # LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
def llama_get_state_size(ctx: llama_context_p) -> int: def llama_get_state_size(ctx: llama_context_p) -> int:
"""Returns the maximum size in bytes of the state (rng, logits, embedding
and kv_cache) - will often be smaller after compacting tokens"""
return _lib.llama_get_state_size(ctx) return _lib.llama_get_state_size(ctx)
@ -999,6 +1055,9 @@ _lib.llama_get_state_size.restype = c_size_t
def llama_copy_state_data( def llama_copy_state_data(
ctx: llama_context_p, dst # type: Array[c_uint8] ctx: llama_context_p, dst # type: Array[c_uint8]
) -> int: ) -> int:
"""Copies the state to the specified destination address.
Destination needs to have allocated enough memory.
Returns the number of bytes copied"""
return _lib.llama_copy_state_data(ctx, dst) return _lib.llama_copy_state_data(ctx, dst)
@ -1014,6 +1073,7 @@ _lib.llama_copy_state_data.restype = c_size_t
def llama_set_state_data( def llama_set_state_data(
ctx: llama_context_p, src # type: Array[c_uint8] ctx: llama_context_p, src # type: Array[c_uint8]
) -> int: ) -> int:
"""Set the state reading from the specified address"""
return _lib.llama_set_state_data(ctx, src) return _lib.llama_set_state_data(ctx, src)
@ -1094,6 +1154,11 @@ def llama_eval(
n_tokens: Union[c_int, int], n_tokens: Union[c_int, int],
n_past: Union[c_int, int], n_past: Union[c_int, int],
) -> int: ) -> int:
"""Run the llama inference to obtain the logits and probabilities for the next token(s).
tokens + n_tokens is the provided batch of new tokens to process
n_past is the number of tokens to use from previous eval calls
Returns 0 on success
DEPRECATED: use llama_decode() instead"""
return _lib.llama_eval(ctx, tokens, n_tokens, n_past) return _lib.llama_eval(ctx, tokens, n_tokens, n_past)
@ -1115,6 +1180,8 @@ def llama_eval_embd(
n_tokens: Union[c_int, int], n_tokens: Union[c_int, int],
n_past: Union[c_int, int], n_past: Union[c_int, int],
) -> int: ) -> int:
"""Same as llama_eval, but use float matrix input directly.
DEPRECATED: use llama_decode() instead"""
return _lib.llama_eval_embd(ctx, embd, n_tokens, n_past) return _lib.llama_eval_embd(ctx, embd, n_tokens, n_past)
@ -1137,6 +1204,9 @@ def llama_batch_get_one(
pos_0: Union[llama_pos, int], pos_0: Union[llama_pos, int],
seq_id: llama_seq_id, seq_id: llama_seq_id,
) -> llama_batch: ) -> llama_batch:
"""Return batch for single sequence of tokens starting at pos_0
NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it"""
return _lib.llama_batch_get_one(tokens, n_tokens, pos_0, seq_id) return _lib.llama_batch_get_one(tokens, n_tokens, pos_0, seq_id)
@ -1165,6 +1235,13 @@ def llama_batch_init(
embd: Union[c_int32, int], embd: Union[c_int32, int],
n_seq_max: Union[c_int32, int], n_seq_max: Union[c_int32, int],
) -> llama_batch: ) -> llama_batch:
"""Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
Each token can be assigned up to n_seq_max sequence ids
The batch has to be freed with llama_batch_free()
If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
The rest of the llama_batch members are allocated with size n_tokens
All members are left uninitialized"""
return _lib.llama_batch_init(n_tokens, embd, n_seq_max) return _lib.llama_batch_init(n_tokens, embd, n_seq_max)
@ -1175,6 +1252,7 @@ _lib.llama_batch_init.restype = llama_batch
# // Frees a batch of tokens allocated with llama_batch_init() # // Frees a batch of tokens allocated with llama_batch_init()
# LLAMA_API void llama_batch_free(struct llama_batch batch); # LLAMA_API void llama_batch_free(struct llama_batch batch);
def llama_batch_free(batch: llama_batch): def llama_batch_free(batch: llama_batch):
"""Frees a batch of tokens allocated with llama_batch_init()"""
return _lib.llama_batch_free(batch) return _lib.llama_batch_free(batch)
@ -1190,6 +1268,10 @@ _lib.llama_batch_free.restype = None
# struct llama_context * ctx, # struct llama_context * ctx,
# struct llama_batch batch); # struct llama_batch batch);
def llama_decode(ctx: llama_context_p, batch: llama_batch) -> int: def llama_decode(ctx: llama_context_p, batch: llama_batch) -> int:
"""Positive return values does not mean a fatal error, but rather a warning.
0 - success
1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
< 0 - error"""
return _lib.llama_decode(ctx, batch) return _lib.llama_decode(ctx, batch)
@ -1206,6 +1288,9 @@ def llama_set_n_threads(
n_threads: Union[c_uint32, int], n_threads: Union[c_uint32, int],
n_threads_batch: Union[c_uint32, int], n_threads_batch: Union[c_uint32, int],
): ):
"""Set the number of threads used for decoding
n_threads is the number of threads used for generation (single token)
n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)"""
return _lib.llama_set_n_threads(ctx, n_threads, n_threads_batch) return _lib.llama_set_n_threads(ctx, n_threads, n_threads_batch)
@ -1222,6 +1307,11 @@ _lib.llama_set_n_threads.restype = None
def llama_get_logits( def llama_get_logits(
ctx: llama_context_p, ctx: llama_context_p,
): # type: (...) -> Array[float] # type: ignore ): # type: (...) -> Array[float] # type: ignore
"""Token logits obtained from the last call to llama_eval()
The logits for the last token are stored in the last row
Logits for which llama_batch.logits[i] == 0 are undefined
Rows: n_tokens provided with llama_batch
Cols: n_vocab"""
return _lib.llama_get_logits(ctx) return _lib.llama_get_logits(ctx)
@ -1235,6 +1325,8 @@ _lib.llama_get_logits.restype = c_float_p
def llama_get_logits_ith( def llama_get_logits_ith(
ctx: llama_context_p, i: Union[c_int32, int] ctx: llama_context_p, i: Union[c_int32, int]
): # type: (...) -> Array[float] # type: ignore ): # type: (...) -> Array[float] # type: ignore
"""Logits for the ith token. Equivalent to:
llama_get_logits(ctx) + i*n_vocab"""
return _lib.llama_get_logits_ith(ctx, i) return _lib.llama_get_logits_ith(ctx, i)
@ -1248,6 +1340,8 @@ _lib.llama_get_logits_ith.restype = c_float_p
def llama_get_embeddings( def llama_get_embeddings(
ctx: llama_context_p, ctx: llama_context_p,
): # type: (...) -> Array[float] # type: ignore ): # type: (...) -> Array[float] # type: ignore
"""Get the embeddings for the input
shape: [n_embd] (1-dimensional)"""
return _lib.llama_get_embeddings(ctx) return _lib.llama_get_embeddings(ctx)
@ -1294,6 +1388,7 @@ _lib.llama_token_get_type.restype = ctypes.c_int
# LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence # LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
def llama_token_bos(model: llama_model_p) -> int: def llama_token_bos(model: llama_model_p) -> int:
"""beginning-of-sentence"""
return _lib.llama_token_bos(model) return _lib.llama_token_bos(model)
@ -1303,6 +1398,7 @@ _lib.llama_token_bos.restype = llama_token
# LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence # LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
def llama_token_eos(model: llama_model_p) -> int: def llama_token_eos(model: llama_model_p) -> int:
"""end-of-sentence"""
return _lib.llama_token_eos(model) return _lib.llama_token_eos(model)
@ -1312,6 +1408,7 @@ _lib.llama_token_eos.restype = llama_token
# LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line # LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
def llama_token_nl(model: llama_model_p) -> int: def llama_token_nl(model: llama_model_p) -> int:
"""next-line"""
return _lib.llama_token_nl(model) return _lib.llama_token_nl(model)
@ -1322,6 +1419,7 @@ _lib.llama_token_nl.restype = llama_token
# // Returns -1 if unknown, 1 for true or 0 for false. # // Returns -1 if unknown, 1 for true or 0 for false.
# LLAMA_API int llama_add_bos_token(const struct llama_model * model); # LLAMA_API int llama_add_bos_token(const struct llama_model * model);
def llama_add_bos_token(model: llama_model_p) -> int: def llama_add_bos_token(model: llama_model_p) -> int:
"""Returns -1 if unknown, 1 for true or 0 for false."""
return _lib.llama_add_bos_token(model) return _lib.llama_add_bos_token(model)
@ -1332,6 +1430,7 @@ _lib.llama_add_bos_token.restype = c_int
# // Returns -1 if unknown, 1 for true or 0 for false. # // Returns -1 if unknown, 1 for true or 0 for false.
# LLAMA_API int llama_add_eos_token(const struct llama_model * model); # LLAMA_API int llama_add_eos_token(const struct llama_model * model);
def llama_add_eos_token(model: llama_model_p) -> int: def llama_add_eos_token(model: llama_model_p) -> int:
"""Returns -1 if unknown, 1 for true or 0 for false."""
return _lib.llama_add_eos_token(model) return _lib.llama_add_eos_token(model)
@ -1342,6 +1441,7 @@ _lib.llama_add_eos_token.restype = c_int
# // codellama infill tokens # // codellama infill tokens
# LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix # LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
def llama_token_prefix(model: llama_model_p) -> int: def llama_token_prefix(model: llama_model_p) -> int:
"""codellama infill tokens"""
return _lib.llama_token_prefix(model) return _lib.llama_token_prefix(model)
@ -1404,6 +1504,7 @@ def llama_tokenize(
add_bos: Union[c_bool, bool], add_bos: Union[c_bool, bool],
special: Union[c_bool, bool], special: Union[c_bool, bool],
) -> int: ) -> int:
"""Convert the provided text into tokens."""
return _lib.llama_tokenize( return _lib.llama_tokenize(
model, text, text_len, tokens, n_max_tokens, add_bos, special model, text, text_len, tokens, n_max_tokens, add_bos, special
) )
@ -1436,6 +1537,10 @@ def llama_token_to_piece(
buf: Union[c_char_p, bytes], buf: Union[c_char_p, bytes],
length: Union[c_int, int], length: Union[c_int, int],
) -> int: ) -> int:
"""Token Id -> Piece.
Uses the vocabulary in the provided context.
Does not write null terminator to the buffer.
User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens."""
return _lib.llama_token_to_piece(model, token, buf, length) return _lib.llama_token_to_piece(model, token, buf, length)
@ -1493,6 +1598,7 @@ _lib.llama_grammar_copy.restype = llama_grammar_p
# // Sets the current rng seed. # // Sets the current rng seed.
# LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed); # LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
def llama_set_rng_seed(ctx: llama_context_p, seed: Union[c_uint32, int]): def llama_set_rng_seed(ctx: llama_context_p, seed: Union[c_uint32, int]):
"""Sets the current rng seed."""
return _lib.llama_set_rng_seed(ctx, seed) return _lib.llama_set_rng_seed(ctx, seed)
@ -1519,6 +1625,8 @@ def llama_sample_repetition_penalties(
penalty_freq: Union[c_float, float], penalty_freq: Union[c_float, float],
penalty_present: Union[c_float, float], penalty_present: Union[c_float, float],
): ):
"""Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details."""
return _lib.llama_sample_repetition_penalties( return _lib.llama_sample_repetition_penalties(
ctx, ctx,
candidates, candidates,
@ -1557,6 +1665,7 @@ def llama_sample_classifier_free_guidance(
guidance_ctx: llama_context_p, guidance_ctx: llama_context_p,
scale: Union[c_float, float], scale: Union[c_float, float],
): ):
"""Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806"""
return _lib.llama_sample_classifier_free_guidance( return _lib.llama_sample_classifier_free_guidance(
ctx, candidates, guidance_ctx, scale ctx, candidates, guidance_ctx, scale
) )
@ -1578,6 +1687,7 @@ _lib.llama_sample_classifier_free_guidance.restype = None
def llama_sample_softmax( def llama_sample_softmax(
ctx: llama_context_p, candidates # type: _Pointer[llama_token_data] ctx: llama_context_p, candidates # type: _Pointer[llama_token_data]
): ):
"""Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits."""
return _lib.llama_sample_softmax(ctx, candidates) return _lib.llama_sample_softmax(ctx, candidates)
@ -1600,6 +1710,7 @@ def llama_sample_top_k(
k: Union[c_int, int], k: Union[c_int, int],
min_keep: Union[c_size_t, int], min_keep: Union[c_size_t, int],
): ):
"""Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751"""
return _lib.llama_sample_top_k(ctx, candidates, k, min_keep) return _lib.llama_sample_top_k(ctx, candidates, k, min_keep)
@ -1624,6 +1735,7 @@ def llama_sample_top_p(
p: Union[c_float, float], p: Union[c_float, float],
min_keep: Union[c_size_t, int], min_keep: Union[c_size_t, int],
): ):
"""Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751"""
return _lib.llama_sample_top_p(ctx, candidates, p, min_keep) return _lib.llama_sample_top_p(ctx, candidates, p, min_keep)
@ -1648,6 +1760,7 @@ def llama_sample_min_p(
p: Union[c_float, float], p: Union[c_float, float],
min_keep: Union[c_size_t, int], min_keep: Union[c_size_t, int],
): ):
"""Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841"""
return _lib.llama_sample_min_p(ctx, candidates, p, min_keep) return _lib.llama_sample_min_p(ctx, candidates, p, min_keep)
@ -1672,6 +1785,7 @@ def llama_sample_tail_free(
z: Union[c_float, float], z: Union[c_float, float],
min_keep: Union[c_size_t, int], min_keep: Union[c_size_t, int],
): ):
"""Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/."""
return _lib.llama_sample_tail_free(ctx, candidates, z, min_keep) return _lib.llama_sample_tail_free(ctx, candidates, z, min_keep)
@ -1696,6 +1810,7 @@ def llama_sample_typical(
p: Union[c_float, float], p: Union[c_float, float],
min_keep: Union[c_size_t, int], min_keep: Union[c_size_t, int],
): ):
"""Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666."""
return _lib.llama_sample_typical(ctx, candidates, p, min_keep) return _lib.llama_sample_typical(ctx, candidates, p, min_keep)