llama.cpp/llama_cpp/llama_cpp.py

657 lines
18 KiB
Python
Raw Normal View History

import sys
import os
2023-03-23 09:33:06 +00:00
import ctypes
2023-04-11 15:59:03 +00:00
from ctypes import (
c_int,
c_float,
c_char_p,
c_void_p,
c_bool,
POINTER,
Structure,
Array,
c_uint8,
c_size_t,
)
2023-03-23 09:33:06 +00:00
import pathlib
2023-04-11 15:59:03 +00:00
2023-03-23 09:33:06 +00:00
# Load the library
def _load_shared_library(lib_base_name):
# Determine the file extension based on the platform
if sys.platform.startswith("linux"):
lib_ext = ".so"
elif sys.platform == "darwin":
lib_ext = ".so"
elif sys.platform == "win32":
lib_ext = ".dll"
else:
raise RuntimeError("Unsupported platform")
# Construct the paths to the possible shared library names
_base_path = pathlib.Path(__file__).parent.resolve()
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths = [
_base_path / f"lib{lib_base_name}{lib_ext}",
2023-04-11 15:59:03 +00:00
_base_path / f"{lib_base_name}{lib_ext}",
]
2023-04-11 15:59:03 +00:00
if "LLAMA_CPP_LIB" in os.environ:
lib_base_name = os.environ["LLAMA_CPP_LIB"]
2023-04-10 15:12:25 +00:00
_lib = pathlib.Path(lib_base_name)
_base_path = _lib.parent.resolve()
_lib_paths = [_lib.resolve()]
# Add the library directory to the DLL search path on Windows (if needed)
if sys.platform == "win32" and sys.version_info >= (3, 8):
os.add_dll_directory(str(_base_path))
# Try to load the shared library, handling potential errors
for _lib_path in _lib_paths:
if _lib_path.exists():
try:
return ctypes.CDLL(str(_lib_path))
except Exception as e:
raise RuntimeError(f"Failed to load shared library '{_lib_path}': {e}")
2023-04-11 15:59:03 +00:00
raise FileNotFoundError(
f"Shared library with base name '{lib_base_name}' not found"
)
# Specify the base name of the shared library to load
_lib_base_name = "llama"
# Load the library
_lib = _load_shared_library(_lib_base_name)
2023-03-23 09:33:06 +00:00
# C types
2023-05-01 14:44:28 +00:00
LLAMA_FILE_VERSION = ctypes.c_int(1)
LLAMA_FILE_MAGIC = b"ggjt"
LLAMA_FILE_MAGIC_UNVERSIONED = b"ggml"
LLAMA_SESSION_MAGIC = b"ggsn"
LLAMA_SESSION_VERSION = ctypes.c_int(0)
llama_context_p = c_void_p
2023-03-23 09:33:06 +00:00
llama_token = c_int
llama_token_p = POINTER(llama_token)
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
class llama_token_data(Structure):
_fields_ = [
2023-03-24 18:35:41 +00:00
("id", llama_token), # token id
2023-05-01 14:44:28 +00:00
("logit", c_float), # log-odds of the token
2023-03-24 18:35:41 +00:00
("p", c_float), # probability of the token
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
llama_token_data_p = POINTER(llama_token_data)
2023-05-01 14:44:28 +00:00
class llama_token_data_array(Structure):
_fields_ = [
("data", llama_token_data_p),
("size", c_size_t),
("sorted", c_bool),
]
llama_token_data_array_p = POINTER(llama_token_data_array)
2023-03-29 01:10:23 +00:00
llama_progress_callback = ctypes.CFUNCTYPE(None, c_float, c_void_p)
2023-03-24 18:35:41 +00:00
2023-03-25 20:26:03 +00:00
2023-03-23 09:33:06 +00:00
class llama_context_params(Structure):
_fields_ = [
2023-03-24 18:35:41 +00:00
("n_ctx", c_int), # text context
("n_parts", c_int), # -1 for default
("seed", c_int), # RNG seed, 0 for random
("f16_kv", c_bool), # use fp16 for KV cache
(
"logits_all",
c_bool,
), # the llama_eval() call computes all logits, not just the last one
("vocab_only", c_bool), # only load the vocabulary, no weights
2023-04-10 02:01:33 +00:00
("use_mmap", c_bool), # use mmap if possible
("use_mlock", c_bool), # force system to keep model in RAM
("embedding", c_bool), # embedding mode only
2023-03-25 16:12:09 +00:00
# called with a progress value between 0 and 1, pass NULL to disable
("progress_callback", llama_progress_callback),
# context pointer passed to the progress callback
("progress_callback_user_data", c_void_p),
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
llama_context_params_p = POINTER(llama_context_params)
2023-04-11 15:59:03 +00:00
LLAMA_FTYPE_ALL_F32 = ctypes.c_int(0)
LLAMA_FTYPE_MOSTLY_F16 = ctypes.c_int(1) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0 = ctypes.c_int(2) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = ctypes.c_int(3) # except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = ctypes.c_int(
4
) # tok_embeddings.weight and output.weight are F16
2023-04-21 21:40:27 +00:00
LLAMA_FTYPE_MOSTLY_Q4_2 = ctypes.c_int(5) # except 1d tensors
2023-05-01 14:44:28 +00:00
# LLAMA_FTYPE_MOSTYL_Q4_3 = ctypes.c_int(6) # except 1d tensors
2023-04-25 23:03:41 +00:00
LLAMA_FTYPE_MOSTYL_Q8_0 = ctypes.c_int(7) # except 1d tensors
2023-04-27 00:00:54 +00:00
LLAMA_FTYPE_MOSTYL_Q5_0 = ctypes.c_int(8) # except 1d tensors
LLAMA_FTYPE_MOSTYL_Q5_1 = ctypes.c_int(9) # except 1d tensors
2023-03-23 09:33:06 +00:00
# Functions
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
def llama_context_default_params() -> llama_context_params:
2023-03-29 01:10:23 +00:00
return _lib.llama_context_default_params()
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_context_default_params.argtypes = []
_lib.llama_context_default_params.restype = llama_context_params
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
def llama_mmap_supported() -> c_bool:
return _lib.llama_mmap_supported()
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
_lib.llama_mmap_supported.argtypes = []
_lib.llama_mmap_supported.restype = c_bool
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
def llama_mlock_supported() -> c_bool:
return _lib.llama_mlock_supported()
2023-04-11 15:59:03 +00:00
2023-04-10 02:01:33 +00:00
_lib.llama_mlock_supported.argtypes = []
_lib.llama_mlock_supported.restype = c_bool
2023-03-24 18:59:29 +00:00
2023-04-11 15:59:03 +00:00
# Various functions for loading a ggml llama model.
# Allocate (almost) all memory needed for the model.
# Return NULL on failure
2023-03-24 18:35:41 +00:00
def llama_init_from_file(
path_model: bytes, params: llama_context_params
) -> llama_context_p:
return _lib.llama_init_from_file(path_model, params)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_init_from_file.argtypes = [c_char_p, llama_context_params]
_lib.llama_init_from_file.restype = llama_context_p
2023-03-24 18:59:29 +00:00
# Frees all allocated memory
2023-03-23 09:33:06 +00:00
def llama_free(ctx: llama_context_p):
_lib.llama_free(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_free.argtypes = [llama_context_p]
_lib.llama_free.restype = None
2023-03-24 18:59:29 +00:00
# TODO: not great API - very likely to change
# Returns 0 on success
2023-04-21 21:40:27 +00:00
# nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given
2023-04-22 23:50:28 +00:00
def llama_model_quantize(
fname_inp: bytes, fname_out: bytes, ftype: c_int, nthread: c_int
) -> c_int:
2023-04-21 21:40:27 +00:00
return _lib.llama_model_quantize(fname_inp, fname_out, ftype, nthread)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-04-21 21:40:27 +00:00
_lib.llama_model_quantize.argtypes = [c_char_p, c_char_p, c_int, c_int]
_lib.llama_model_quantize.restype = c_int
2023-04-11 15:59:03 +00:00
# Apply a LoRA adapter to a loaded model
# path_base_model is the path to a higher quality model to use as a base for
# the layers modified by the adapter. Can be NULL to use the current loaded model.
# The model needs to be reloaded before applying a new adapter, otherwise the adapter
# will be applied on top of the previous one
# Returns 0 on success
def llama_apply_lora_from_file(
2023-04-22 23:50:28 +00:00
ctx: llama_context_p,
path_lora: ctypes.c_char_p,
path_base_model: ctypes.c_char_p,
n_threads: c_int,
) -> c_int:
return _lib.llama_apply_lora_from_file(ctx, path_lora, path_base_model, n_threads)
_lib.llama_apply_lora_from_file.argtypes = [llama_context_p, c_char_p, c_char_p, c_int]
_lib.llama_apply_lora_from_file.restype = c_int
# Returns the number of tokens in the KV cache
def llama_get_kv_cache_token_count(ctx: llama_context_p) -> c_int:
return _lib.llama_get_kv_cache_token_count(ctx)
2023-04-11 15:59:03 +00:00
_lib.llama_get_kv_cache_token_count.argtypes = [llama_context_p]
_lib.llama_get_kv_cache_token_count.restype = c_int
2023-04-28 19:32:43 +00:00
2023-04-27 00:00:54 +00:00
# Sets the current rng seed.
def llama_set_rng_seed(ctx: llama_context_p, seed: c_int):
return _lib.llama_set_rng_seed(ctx, seed)
2023-04-28 19:32:43 +00:00
2023-04-27 00:00:54 +00:00
_lib.llama_set_rng_seed.argtypes = [llama_context_p, c_int]
_lib.llama_set_rng_seed.restype = None
2023-04-28 19:32:43 +00:00
2023-04-22 23:50:28 +00:00
# Returns the size in bytes of the state (rng, logits, embedding and kv_cache)
def llama_get_state_size(ctx: llama_context_p) -> c_size_t:
return _lib.llama_get_state_size(ctx)
_lib.llama_get_state_size.argtypes = [llama_context_p]
_lib.llama_get_state_size.restype = c_size_t
# Copies the state to the specified destination address.
# Destination needs to have allocated enough memory.
# Returns the number of bytes copied
def llama_copy_state_data(ctx: llama_context_p, dest) -> c_size_t:
return _lib.llama_copy_state_data(ctx, dest)
_lib.llama_copy_state_data.argtypes = [llama_context_p, POINTER(c_uint8)]
_lib.llama_copy_state_data.restype = c_size_t
# Set the state reading from the specified address
# Returns the number of bytes read
def llama_set_state_data(ctx: llama_context_p, src) -> c_size_t:
return _lib.llama_set_state_data(ctx, src)
_lib.llama_set_state_data.argtypes = [llama_context_p, POINTER(c_uint8)]
_lib.llama_set_state_data.restype = c_size_t
2023-04-28 19:32:43 +00:00
# Save/load session file
def llama_load_session_file(
ctx: llama_context_p,
path_session: bytes,
tokens_out,
n_token_capacity: c_size_t,
n_token_count_out,
) -> c_size_t:
return _lib.llama_load_session_file(
ctx, path_session, tokens_out, n_token_capacity, n_token_count_out
)
_lib.llama_load_session_file.argtypes = [
llama_context_p,
c_char_p,
llama_token_p,
c_size_t,
POINTER(c_size_t),
]
_lib.llama_load_session_file.restype = c_size_t
def llama_save_session_file(
ctx: llama_context_p, path_session: bytes, tokens, n_token_count: c_size_t
) -> c_size_t:
return _lib.llama_save_session_file(ctx, path_session, tokens, n_token_count)
_lib.llama_save_session_file.argtypes = [
llama_context_p,
c_char_p,
llama_token_p,
c_size_t,
]
_lib.llama_save_session_file.restype = c_size_t
# Run the llama inference to obtain the logits and probabilities for the next token.
# tokens + n_tokens is the provided batch of new tokens to process
# n_past is the number of tokens to use from previous eval calls
# Returns 0 on success
2023-03-24 18:35:41 +00:00
def llama_eval(
ctx: llama_context_p,
2023-03-31 07:20:15 +00:00
tokens, # type: Array[llama_token]
2023-03-24 18:35:41 +00:00
n_tokens: c_int,
n_past: c_int,
n_threads: c_int,
) -> c_int:
return _lib.llama_eval(ctx, tokens, n_tokens, n_past, n_threads)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_eval.argtypes = [llama_context_p, llama_token_p, c_int, c_int, c_int]
_lib.llama_eval.restype = c_int
# Convert the provided text into tokens.
# The tokens pointer must be large enough to hold the resulting tokens.
# Returns the number of tokens on success, no more than n_max_tokens
# Returns a negative number on failure - the number of tokens that would have been returned
# TODO: not sure if correct
2023-03-24 18:35:41 +00:00
def llama_tokenize(
ctx: llama_context_p,
text: bytes,
2023-03-31 07:20:15 +00:00
tokens, # type: Array[llama_token]
2023-03-24 18:35:41 +00:00
n_max_tokens: c_int,
add_bos: c_bool,
) -> c_int:
return _lib.llama_tokenize(ctx, text, tokens, n_max_tokens, add_bos)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_tokenize.argtypes = [llama_context_p, c_char_p, llama_token_p, c_int, c_bool]
_lib.llama_tokenize.restype = c_int
2023-03-23 09:33:06 +00:00
def llama_n_vocab(ctx: llama_context_p) -> c_int:
return _lib.llama_n_vocab(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_n_vocab.argtypes = [llama_context_p]
_lib.llama_n_vocab.restype = c_int
2023-03-23 09:33:06 +00:00
def llama_n_ctx(ctx: llama_context_p) -> c_int:
return _lib.llama_n_ctx(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_n_ctx.argtypes = [llama_context_p]
_lib.llama_n_ctx.restype = c_int
2023-03-24 18:59:29 +00:00
2023-03-25 20:26:03 +00:00
def llama_n_embd(ctx: llama_context_p) -> c_int:
return _lib.llama_n_embd(ctx)
2023-03-25 20:26:03 +00:00
_lib.llama_n_embd.argtypes = [llama_context_p]
_lib.llama_n_embd.restype = c_int
# Token logits obtained from the last call to llama_eval()
# The logits for the last token are stored in the last row
# Can be mutated in order to change the probabilities of the next token
# Rows: n_tokens
# Cols: n_vocab
2023-03-31 07:20:15 +00:00
def llama_get_logits(ctx: llama_context_p):
return _lib.llama_get_logits(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_get_logits.argtypes = [llama_context_p]
_lib.llama_get_logits.restype = POINTER(c_float)
2023-03-24 18:59:29 +00:00
# Get the embeddings for the input
# shape: [n_embd] (1-dimensional)
2023-03-31 07:20:15 +00:00
def llama_get_embeddings(ctx: llama_context_p):
return _lib.llama_get_embeddings(ctx)
2023-03-24 18:59:29 +00:00
_lib.llama_get_embeddings.argtypes = [llama_context_p]
_lib.llama_get_embeddings.restype = POINTER(c_float)
2023-03-24 18:59:29 +00:00
# Token Id -> String. Uses the vocabulary in the provided context
2023-03-31 07:25:12 +00:00
def llama_token_to_str(ctx: llama_context_p, token: llama_token) -> bytes:
return _lib.llama_token_to_str(ctx, token)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_token_to_str.argtypes = [llama_context_p, llama_token]
_lib.llama_token_to_str.restype = c_char_p
# Special tokens
2023-03-24 18:59:29 +00:00
2023-03-23 09:33:06 +00:00
def llama_token_bos() -> llama_token:
return _lib.llama_token_bos()
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_token_bos.argtypes = []
_lib.llama_token_bos.restype = llama_token
2023-03-23 09:33:06 +00:00
def llama_token_eos() -> llama_token:
return _lib.llama_token_eos()
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_token_eos.argtypes = []
_lib.llama_token_eos.restype = llama_token
2023-05-01 14:44:28 +00:00
def llama_token_nl() -> llama_token:
return _lib.llama_token_nl()
_lib.llama_token_nl.argtypes = []
_lib.llama_token_nl.restype = llama_token
# Sampling functions
def llama_sample_repetition_penalty(
ctx: llama_context_p,
candidates,
last_tokens_data,
last_tokens_size: c_int,
penalty: c_float,
) -> llama_token:
return _lib.llama_sample_repetition_penalty(
ctx, candidates, last_tokens_data, last_tokens_size, penalty
)
_lib.llama_sample_repetition_penalty.argtypes = [
llama_context_p,
llama_token_data_array_p,
llama_token_p,
c_int,
c_float,
]
_lib.llama_sample_repetition_penalty.restype = llama_token
# LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
def llama_sample_frequency_and_presence_penalties(
2023-03-24 18:35:41 +00:00
ctx: llama_context_p,
2023-05-01 14:44:28 +00:00
candidates,
last_tokens_data,
last_tokens_size: c_int,
alpha_frequency: c_float,
alpha_presence: c_float,
2023-03-24 18:35:41 +00:00
) -> llama_token:
2023-05-01 14:44:28 +00:00
return _lib.llama_sample_frequency_and_presence_penalties(
ctx,
candidates,
last_tokens_data,
last_tokens_size,
alpha_frequency,
alpha_presence,
2023-03-24 18:35:41 +00:00
)
2023-03-23 09:33:06 +00:00
2023-05-01 14:44:28 +00:00
_lib.llama_sample_frequency_and_presence_penalties.argtypes = [
llama_context_p,
2023-05-01 14:44:28 +00:00
llama_token_data_array_p,
llama_token_p,
c_int,
2023-05-01 14:44:28 +00:00
c_float,
c_float,
]
_lib.llama_sample_frequency_and_presence_penalties.restype = llama_token
# LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
def llama_sample_softmax(ctx: llama_context_p, candidates) -> llama_token:
return _lib.llama_sample_softmax(ctx, candidates)
_lib.llama_sample_softmax.argtypes = [
llama_context_p,
llama_token_data_array_p,
]
_lib.llama_sample_softmax.restype = llama_token
# LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep = 1);
def llama_sample_top_k(
ctx: llama_context_p, candidates, k: c_int, min_keep: c_int
) -> llama_token:
return _lib.llama_sample_top_k(ctx, candidates, k, min_keep)
_lib.llama_sample_top_k.argtypes = [
llama_context_p,
llama_token_data_array_p,
c_int,
c_int,
]
# LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep = 1);
def llama_sample_top_p(
ctx: llama_context_p, candidates, p: c_float, min_keep: c_int
) -> llama_token:
return _lib.llama_sample_top_p(ctx, candidates, p, min_keep)
_lib.llama_sample_top_p.argtypes = [
llama_context_p,
llama_token_data_array_p,
c_float,
c_int,
]
_lib.llama_sample_top_p.restype = llama_token
# LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep = 1);
def llama_sample_tail_free(
ctx: llama_context_p, candidates, z: c_float, min_keep: c_int
) -> llama_token:
return _lib.llama_sample_tail_free(ctx, candidates, z, min_keep)
_lib.llama_sample_tail_free.argtypes = [
llama_context_p,
llama_token_data_array_p,
c_float,
c_int,
]
_lib.llama_sample_tail_free.restype = llama_token
# LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep = 1);
def llama_sample_typical(
ctx: llama_context_p, candidates, p: c_float, min_keep: c_int
) -> llama_token:
return _lib.llama_sample_typical(ctx, candidates, p, min_keep)
_lib.llama_sample_typical.argtypes = [
llama_context_p,
llama_token_data_array_p,
c_float,
c_int,
2023-05-01 14:44:28 +00:00
]
_lib.llama_sample_typical.restype = llama_token
# LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
def llama_sample_temperature(
ctx: llama_context_p, candidates, temp: c_float
) -> llama_token:
return _lib.llama_sample_temperature(ctx, candidates, temp)
_lib.llama_sample_temperature.argtypes = [
llama_context_p,
llama_token_data_array_p,
2023-03-29 01:10:23 +00:00
c_float,
2023-05-01 14:44:28 +00:00
]
_lib.llama_sample_temperature.restype = llama_token
# LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
def llama_sample_token_mirostat(
ctx: llama_context_p, candidates, tau: c_float, eta: c_float, m: c_int, mu
) -> llama_token:
return _lib.llama_sample_token_mirostat(ctx, candidates, tau, eta, m, mu)
_lib.llama_sample_token_mirostat.argtypes = [
llama_context_p,
llama_token_data_array_p,
c_float,
c_float,
c_int,
POINTER(c_float),
]
_lib.llama_sample_token_mirostat.restype = llama_token
# LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
def llama_sample_token_mirostat_v2(
ctx: llama_context_p, candidates, tau: c_float, eta: c_float, mu
) -> llama_token:
return _lib.llama_sample_token_mirostat_v2(ctx, candidates, tau, eta, mu)
_lib.llama_sample_token_mirostat_v2.argtypes = [
llama_context_p,
llama_token_data_array_p,
2023-03-29 01:10:23 +00:00
c_float,
c_float,
2023-05-01 14:44:28 +00:00
POINTER(c_float),
]
_lib.llama_sample_token_mirostat_v2.restype = llama_token
# LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
def llama_sample_token_greedy(ctx: llama_context_p, candidates) -> llama_token:
return _lib.llama_sample_token_greedy(ctx, candidates)
_lib.llama_sample_token_greedy.argtypes = [
llama_context_p,
llama_token_data_array_p,
]
_lib.llama_sample_token_greedy.restype = llama_token
# LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
def llama_sample_token(ctx: llama_context_p, candidates) -> llama_token:
return _lib.llama_sample_token(ctx, candidates)
_lib.llama_sample_token.argtypes = [
llama_context_p,
llama_token_data_array_p,
]
2023-05-01 14:44:28 +00:00
_lib.llama_sample_token.restype = llama_token
# Performance information
2023-03-24 18:59:29 +00:00
2023-03-23 09:33:06 +00:00
def llama_print_timings(ctx: llama_context_p):
_lib.llama_print_timings(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_print_timings.argtypes = [llama_context_p]
_lib.llama_print_timings.restype = None
2023-03-23 09:33:06 +00:00
def llama_reset_timings(ctx: llama_context_p):
_lib.llama_reset_timings(ctx)
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
_lib.llama_reset_timings.argtypes = [llama_context_p]
_lib.llama_reset_timings.restype = None
# Print system information
2023-03-23 09:33:06 +00:00
def llama_print_system_info() -> bytes:
return _lib.llama_print_system_info()
_lib.llama_print_system_info.argtypes = []
_lib.llama_print_system_info.restype = c_char_p