2024-02-22 07:00:09 +00:00
from __future__ import annotations
2023-04-03 01:50:13 +00:00
import sys
import os
2023-03-23 09:33:06 +00:00
import ctypes
2024-02-23 08:39:38 +00:00
import functools
2023-03-23 09:33:06 +00:00
import pathlib
2024-02-23 08:39:38 +00:00
2024-02-22 07:00:09 +00:00
from typing import (
2024-02-23 08:39:38 +00:00
Any ,
Callable ,
2024-02-22 07:00:09 +00:00
List ,
Union ,
NewType ,
Optional ,
TYPE_CHECKING ,
TypeVar ,
Generic ,
)
2024-02-22 08:27:28 +00:00
from typing_extensions import TypeAlias
2023-03-23 09:33:06 +00:00
2023-04-11 15:59:03 +00:00
2023-03-23 09:33:06 +00:00
# Load the library
2023-05-05 16:22:27 +00:00
def _load_shared_library ( lib_base_name : str ) :
2023-06-08 04:27:19 +00:00
# Construct the paths to the possible shared library names
2023-09-14 18:51:43 +00:00
_base_path = pathlib . Path ( os . path . abspath ( os . path . dirname ( __file__ ) ) )
2023-06-08 04:27:19 +00:00
# Searching for the library in the current directory under the name "libllama" (default name
# for llamacpp) and "llama" (default name for this repo)
_lib_paths : List [ pathlib . Path ] = [ ]
2023-04-03 01:50:13 +00:00
# Determine the file extension based on the platform
if sys . platform . startswith ( " linux " ) :
2023-06-08 04:27:19 +00:00
_lib_paths + = [
_base_path / f " lib { lib_base_name } .so " ,
]
2023-04-03 01:50:13 +00:00
elif sys . platform == " darwin " :
2023-06-08 04:27:19 +00:00
_lib_paths + = [
_base_path / f " lib { lib_base_name } .so " ,
_base_path / f " lib { lib_base_name } .dylib " ,
]
2023-04-03 01:50:13 +00:00
elif sys . platform == " win32 " :
2023-06-08 04:27:19 +00:00
_lib_paths + = [
_base_path / f " { lib_base_name } .dll " ,
2023-11-01 22:55:57 +00:00
_base_path / f " lib { lib_base_name } .dll " ,
2023-06-08 04:27:19 +00:00
]
2023-04-03 01:50:13 +00:00
else :
raise RuntimeError ( " Unsupported platform " )
2023-04-11 15:59:03 +00:00
if " LLAMA_CPP_LIB " in os . environ :
2023-04-10 15:27:17 +00:00
lib_base_name = os . environ [ " LLAMA_CPP_LIB " ]
2023-04-10 15:12:25 +00:00
_lib = pathlib . Path ( lib_base_name )
_base_path = _lib . parent . resolve ( )
_lib_paths = [ _lib . resolve ( ) ]
2023-04-10 15:00:35 +00:00
2023-05-19 15:59:33 +00:00
cdll_args = dict ( ) # type: ignore
2023-04-03 01:50:13 +00:00
# Add the library directory to the DLL search path on Windows (if needed)
if sys . platform == " win32 " and sys . version_info > = ( 3 , 8 ) :
os . add_dll_directory ( str ( _base_path ) )
2023-05-17 19:26:38 +00:00
if " CUDA_PATH " in os . environ :
2023-05-19 15:59:33 +00:00
os . add_dll_directory ( os . path . join ( os . environ [ " CUDA_PATH " ] , " bin " ) )
os . add_dll_directory ( os . path . join ( os . environ [ " CUDA_PATH " ] , " lib " ) )
2023-12-22 20:29:56 +00:00
if " HIP_PATH " in os . environ :
os . add_dll_directory ( os . path . join ( os . environ [ " HIP_PATH " ] , " bin " ) )
os . add_dll_directory ( os . path . join ( os . environ [ " HIP_PATH " ] , " lib " ) )
2023-09-16 18:57:49 +00:00
cdll_args [ " winmode " ] = ctypes . RTLD_GLOBAL
2023-04-03 01:50:13 +00:00
# Try to load the shared library, handling potential errors
for _lib_path in _lib_paths :
if _lib_path . exists ( ) :
try :
2024-02-22 07:00:09 +00:00
return ctypes . CDLL ( str ( _lib_path ) , * * cdll_args ) # type: ignore
2023-04-03 01:50:13 +00:00
except Exception as e :
raise RuntimeError ( f " Failed to load shared library ' { _lib_path } ' : { e } " )
2023-04-11 15:59:03 +00:00
raise FileNotFoundError (
f " Shared library with base name ' { lib_base_name } ' not found "
)
2023-04-03 01:50:13 +00:00
# Specify the base name of the shared library to load
2023-04-03 17:06:50 +00:00
_lib_base_name = " llama "
2023-04-03 01:50:13 +00:00
# Load the library
2023-04-03 17:06:50 +00:00
_lib = _load_shared_library ( _lib_base_name )
2023-03-23 09:33:06 +00:00
2024-02-22 07:00:09 +00:00
# ctypes sane type hint helpers
#
# - Generic Pointer and Array types
# - PointerOrRef type with a type hinted byref function
#
# NOTE: Only use these for static type checking not for runtime checks
# no good will come of that
if TYPE_CHECKING :
CtypesCData = TypeVar ( " CtypesCData " , bound = ctypes . _CData ) # type: ignore
CtypesArray : TypeAlias = ctypes . Array [ CtypesCData ] # type: ignore
CtypesPointer : TypeAlias = ctypes . _Pointer [ CtypesCData ] # type: ignore
CtypesVoidPointer : TypeAlias = ctypes . c_void_p
class CtypesRef ( Generic [ CtypesCData ] ) :
pass
CtypesPointerOrRef : TypeAlias = Union [
CtypesPointer [ CtypesCData ] , CtypesRef [ CtypesCData ]
]
CtypesFuncPointer : TypeAlias = ctypes . _FuncPointer # type: ignore
2024-02-25 21:54:37 +00:00
F = TypeVar ( " F " , bound = Callable [ . . . , Any ] )
2023-05-21 21:47:21 +00:00
2024-02-28 19:27:16 +00:00
2024-02-23 08:39:38 +00:00
def ctypes_function_for_shared_library ( lib : ctypes . CDLL ) :
def ctypes_function (
name : str , argtypes : List [ Any ] , restype : Any , enabled : bool = True
) :
2024-02-25 21:54:37 +00:00
def decorator ( f : F ) - > F :
2024-02-23 08:39:38 +00:00
if enabled :
func = getattr ( lib , name )
func . argtypes = argtypes
func . restype = restype
functools . wraps ( f ) ( func )
return func
else :
return f
return decorator
return ctypes_function
ctypes_function = ctypes_function_for_shared_library ( _lib )
def byref ( obj : CtypesCData , offset : Optional [ int ] = None ) - > CtypesRef [ CtypesCData ] :
""" Type-annotated version of ctypes.byref """
. . .
byref = ctypes . byref # type: ignore
2024-04-01 14:19:28 +00:00
# from ggml.h
# // NOTE: always add types at the end of the enum to keep backward compatibility
# enum ggml_type {
# GGML_TYPE_F32 = 0,
# GGML_TYPE_F16 = 1,
# GGML_TYPE_Q4_0 = 2,
# GGML_TYPE_Q4_1 = 3,
# // GGML_TYPE_Q4_2 = 4, support has been removed
# // GGML_TYPE_Q4_3 = 5, support has been removed
# GGML_TYPE_Q5_0 = 6,
# GGML_TYPE_Q5_1 = 7,
# GGML_TYPE_Q8_0 = 8,
# GGML_TYPE_Q8_1 = 9,
# GGML_TYPE_Q2_K = 10,
# GGML_TYPE_Q3_K = 11,
# GGML_TYPE_Q4_K = 12,
# GGML_TYPE_Q5_K = 13,
# GGML_TYPE_Q6_K = 14,
# GGML_TYPE_Q8_K = 15,
# GGML_TYPE_IQ2_XXS = 16,
# GGML_TYPE_IQ2_XS = 17,
# GGML_TYPE_IQ3_XXS = 18,
# GGML_TYPE_IQ1_S = 19,
# GGML_TYPE_IQ4_NL = 20,
# GGML_TYPE_IQ3_S = 21,
# GGML_TYPE_IQ2_S = 22,
# GGML_TYPE_IQ4_XS = 23,
# GGML_TYPE_I8 = 24,
# GGML_TYPE_I16 = 25,
# GGML_TYPE_I32 = 26,
# GGML_TYPE_I64 = 27,
# GGML_TYPE_F64 = 28,
# GGML_TYPE_IQ1_M = 29,
# GGML_TYPE_COUNT,
# };
GGML_TYPE_F32 = 0
GGML_TYPE_F16 = 1
GGML_TYPE_Q4_0 = 2
GGML_TYPE_Q4_1 = 3
GGML_TYPE_Q5_0 = 6
GGML_TYPE_Q5_1 = 7
GGML_TYPE_Q8_0 = 8
GGML_TYPE_Q8_1 = 9
GGML_TYPE_Q2_K = 10
GGML_TYPE_Q3_K = 11
GGML_TYPE_Q4_K = 12
GGML_TYPE_Q5_K = 13
GGML_TYPE_Q6_K = 14
GGML_TYPE_Q8_K = 15
GGML_TYPE_IQ2_XXS = 16
GGML_TYPE_IQ2_XS = 17
GGML_TYPE_IQ3_XXS = 18
GGML_TYPE_IQ1_S = 19
GGML_TYPE_IQ4_NL = 20
GGML_TYPE_IQ3_S = 21
GGML_TYPE_IQ2_S = 22
GGML_TYPE_IQ4_XS = 23
GGML_TYPE_I8 = 24
GGML_TYPE_I16 = 25
GGML_TYPE_I32 = 26
GGML_TYPE_I64 = 27
GGML_TYPE_F64 = 28
GGML_TYPE_IQ1_M = 29
GGML_TYPE_COUNT = 30
2024-02-23 08:39:38 +00:00
2024-01-19 02:21:49 +00:00
# from ggml-backend.h
# typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
2024-02-22 07:00:09 +00:00
ggml_backend_sched_eval_callback = ctypes . CFUNCTYPE (
ctypes . c_bool , ctypes . c_void_p , ctypes . c_bool , ctypes . c_void_p
)
2024-01-19 02:21:49 +00:00
2024-03-03 03:20:04 +00:00
# // Abort callback
# // If not NULL, called before ggml computation
# // If it returns true, the computation is aborted
# typedef bool (*ggml_abort_callback)(void * data);
ggml_abort_callback = ctypes . CFUNCTYPE ( ctypes . c_bool , ctypes . c_void_p )
2023-05-21 21:47:21 +00:00
# llama.h bindings
2024-01-04 03:04:04 +00:00
_lib . llama_max_devices . argtypes = [ ]
2024-01-31 15:41:42 +00:00
_lib . llama_max_devices . restype = ctypes . c_size_t
2024-01-04 03:04:04 +00:00
2023-12-22 20:19:28 +00:00
LLAMA_MAX_DEVICES = _lib . llama_max_devices ( )
2023-06-06 20:23:55 +00:00
2023-08-24 04:17:00 +00:00
# define LLAMA_DEFAULT_SEED 0xFFFFFFFF
2023-09-14 01:11:52 +00:00
LLAMA_DEFAULT_SEED = 0xFFFFFFFF
2023-08-24 04:17:00 +00:00
2023-09-29 02:42:03 +00:00
# define LLAMA_MAX_RNG_STATE (64*1024)
LLAMA_MAX_RNG_STATE = 64 * 1024
2024-01-08 19:51:29 +00:00
# define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
2023-12-16 23:57:43 +00:00
LLAMA_FILE_MAGIC_GGLA = 0x67676C61
2023-08-24 04:17:00 +00:00
# define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
2023-09-14 01:11:52 +00:00
LLAMA_FILE_MAGIC_GGSN = 0x6767736E
2023-05-21 21:47:21 +00:00
2024-04-10 06:40:41 +00:00
# define LLAMA_FILE_MAGIC_GGSQ 0x67677371u // 'ggsq'
2024-04-09 13:53:49 +00:00
LLAMA_FILE_MAGIC_GGSQ = 0x67677371
2023-08-24 04:17:00 +00:00
# define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
2023-05-21 21:47:21 +00:00
LLAMA_SESSION_MAGIC = LLAMA_FILE_MAGIC_GGSN
2024-04-30 13:27:55 +00:00
# define LLAMA_SESSION_VERSION 6
LLAMA_SESSION_VERSION = 6
2023-05-01 14:44:28 +00:00
2024-04-10 06:40:41 +00:00
# define LLAMA_STATE_SEQ_MAGIC LLAMA_FILE_MAGIC_GGSQ
2024-04-09 13:53:49 +00:00
LLAMA_STATE_SEQ_MAGIC = LLAMA_FILE_MAGIC_GGSQ
2024-04-10 06:40:41 +00:00
# define LLAMA_STATE_SEQ_VERSION 1
2024-04-09 13:53:49 +00:00
LLAMA_STATE_SEQ_VERSION = 1
2023-07-05 05:00:14 +00:00
2023-06-26 12:50:38 +00:00
# struct llama_model;
2024-02-21 21:25:38 +00:00
llama_model_p = NewType ( " llama_model_p " , int )
llama_model_p_ctypes = ctypes . c_void_p
2023-06-26 12:50:38 +00:00
2023-05-21 21:47:21 +00:00
# struct llama_context;
2024-02-21 21:25:38 +00:00
llama_context_p = NewType ( " llama_context_p " , int )
llama_context_p_ctypes = ctypes . c_void_p
2023-03-24 18:58:42 +00:00
2023-09-29 02:42:03 +00:00
# typedef int32_t llama_pos;
2024-02-21 21:25:38 +00:00
llama_pos = ctypes . c_int32
2023-09-29 02:42:03 +00:00
# typedef int32_t llama_token;
2024-02-21 21:25:38 +00:00
llama_token = ctypes . c_int32
llama_token_p = ctypes . POINTER ( llama_token )
2023-09-29 02:42:03 +00:00
# typedef int32_t llama_seq_id;
2024-02-21 21:25:38 +00:00
llama_seq_id = ctypes . c_int32
2023-03-23 09:33:06 +00:00
2023-08-24 04:17:00 +00:00
# enum llama_vocab_type {
2024-03-15 16:58:45 +00:00
# LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
2024-03-28 16:06:46 +00:00
# LLAMA_VOCAB_TYPE_SPM = 1, // LLaMA tokenizer based on byte-level BPE with byte fallback
# LLAMA_VOCAB_TYPE_BPE = 2, // GPT-2 tokenizer based on byte-level BPE
# LLAMA_VOCAB_TYPE_WPM = 3, // BERT tokenizer based on WordPiece
2023-08-24 04:17:00 +00:00
# };
2024-03-15 16:58:45 +00:00
LLAMA_VOCAB_TYPE_NONE = 0
2024-03-28 16:06:46 +00:00
""" For models without vocab """
2024-03-15 16:58:45 +00:00
LLAMA_VOCAB_TYPE_SPM = 1
2024-03-28 16:06:46 +00:00
""" LLaMA tokenizer based on byte-level BPE with byte fallback """
2024-03-15 16:58:45 +00:00
LLAMA_VOCAB_TYPE_BPE = 2
2024-03-28 16:06:46 +00:00
""" GPT-2 tokenizer based on byte-level BPE """
2024-03-15 16:58:45 +00:00
LLAMA_VOCAB_TYPE_WPM = 3
2024-03-28 16:06:46 +00:00
""" BERT tokenizer based on WordPiece """
2023-08-24 04:17:00 +00:00
2024-04-30 03:34:55 +00:00
# // pre-tokenization types
# enum llama_vocab_pre_type {
# LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
# LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
# LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
# LLAMA_VOCAB_PRE_TYPE_MPT = 5,
# LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
# LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
2024-05-05 16:12:27 +00:00
# LLAMA_VOCAB_PRE_TYPE_REFACT = 8,
# LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9,
2024-05-22 06:40:31 +00:00
# LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10,
# LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
# LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
# LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
2024-05-27 14:51:57 +00:00
# LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
2024-04-30 03:34:55 +00:00
# };
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3
LLAMA_VOCAB_PRE_TYPE_FALCON = 4
LLAMA_VOCAB_PRE_TYPE_MPT = 5
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7
2024-05-05 16:12:27 +00:00
LLAMA_VOCAB_PRE_TYPE_REFACT = 8
LLAMA_VOCAB_PRE_TYPE_COMMAND_R = 9
2024-05-22 06:40:31 +00:00
LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11
LLAMA_VOCAB_PRE_TYPE_OLMO = 12
LLAMA_VOCAB_PRE_TYPE_DBRX = 13
2024-05-27 14:51:57 +00:00
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14
2024-04-30 03:34:55 +00:00
2024-02-26 01:52:14 +00:00
# // note: these values should be synchronized with ggml_rope
# // TODO: maybe move this enum to ggml.h (ggml_rope_type)
# enum llama_rope_type {
# LLAMA_ROPE_TYPE_NONE = -1,
# LLAMA_ROPE_TYPE_NORM = 0,
# LLAMA_ROPE_TYPE_NEOX = 2,
# LLAMA_ROPE_TYPE_GLM = 4,
# };
LLAMA_ROPE_TYPE_NONE = - 1
LLAMA_ROPE_TYPE_NORM = 0
LLAMA_ROPE_TYPE_NEOX = 2
LLAMA_ROPE_TYPE_GLM = 4
2024-06-07 06:02:12 +00:00
# enum llama_token_type { //TODO: remove, required until per token attributes are available from GGUF file
2023-08-24 04:17:00 +00:00
# LLAMA_TOKEN_TYPE_UNDEFINED = 0,
# LLAMA_TOKEN_TYPE_NORMAL = 1,
# LLAMA_TOKEN_TYPE_UNKNOWN = 2,
# LLAMA_TOKEN_TYPE_CONTROL = 3,
# LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
# LLAMA_TOKEN_TYPE_UNUSED = 5,
# LLAMA_TOKEN_TYPE_BYTE = 6,
# };
2023-09-14 01:11:52 +00:00
LLAMA_TOKEN_TYPE_UNDEFINED = 0
LLAMA_TOKEN_TYPE_NORMAL = 1
LLAMA_TOKEN_TYPE_UNKNOWN = 2
LLAMA_TOKEN_TYPE_CONTROL = 3
LLAMA_TOKEN_TYPE_USER_DEFINED = 4
LLAMA_TOKEN_TYPE_UNUSED = 5
LLAMA_TOKEN_TYPE_BYTE = 6
2023-08-24 04:17:00 +00:00
2023-09-29 02:42:03 +00:00
2024-06-07 06:02:12 +00:00
# enum llama_token_attr {
# LLAMA_TOKEN_ATTR_UNDEFINED = 0,
# LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0,
# LLAMA_TOKEN_ATTR_UNUSED = 1 << 1,
# LLAMA_TOKEN_ATTR_NORMAL = 1 << 2,
# LLAMA_TOKEN_ATTR_CONTROL = 1 << 3, // SPECIAL?
# LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4,
# LLAMA_TOKEN_ATTR_BYTE = 1 << 5,
# LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6,
# LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7,
# LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8,
# LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9,
# };
LLAMA_TOKEN_ATTR_UNDEFINED = 0
LLAMA_TOKEN_ATTR_UNKNOWN = 1 << 0
LLAMA_TOKEN_ATTR_UNUSED = 1 << 1
LLAMA_TOKEN_ATTR_NORMAL = 1 << 2
LLAMA_TOKEN_ATTR_CONTROL = 1 << 3
LLAMA_TOKEN_ATTR_USER_DEFINED = 1 << 4
LLAMA_TOKEN_ATTR_BYTE = 1 << 5
LLAMA_TOKEN_ATTR_NORMALIZED = 1 << 6
LLAMA_TOKEN_ATTR_LSTRIP = 1 << 7
LLAMA_TOKEN_ATTR_RSTRIP = 1 << 8
LLAMA_TOKEN_ATTR_SINGLE_WORD = 1 << 9
2023-09-29 02:42:03 +00:00
# // model file types
2023-08-24 04:17:00 +00:00
# enum llama_ftype {
# LLAMA_FTYPE_ALL_F32 = 0,
2023-09-29 02:42:03 +00:00
# LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
# // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
# // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
# LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q2_K = 10, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q3_K_S = 11, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q3_K_M = 12, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q3_K_L = 13, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q4_K_S = 14, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q4_K_M = 15, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q5_K_S = 16, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q5_K_M = 17, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q6_K = 18, // except 1d tensors
2024-01-08 19:51:29 +00:00
# LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19, // except 1d tensors
2024-01-12 03:51:12 +00:00
# LLAMA_FTYPE_MOSTLY_IQ2_XS = 20, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_Q2_K_S = 21, // except 1d tensors
2024-02-26 16:40:58 +00:00
# LLAMA_FTYPE_MOSTLY_IQ3_XS = 22, // except 1d tensors
2024-01-30 14:48:09 +00:00
# LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23, // except 1d tensors
2024-02-19 02:30:36 +00:00
# LLAMA_FTYPE_MOSTLY_IQ1_S = 24, // except 1d tensors
2024-02-21 16:05:58 +00:00
# LLAMA_FTYPE_MOSTLY_IQ4_NL = 25, // except 1d tensors
2024-02-25 04:47:29 +00:00
# LLAMA_FTYPE_MOSTLY_IQ3_S = 26, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_IQ3_M = 27, // except 1d tensors
2024-02-26 16:40:58 +00:00
# LLAMA_FTYPE_MOSTLY_IQ2_S = 28, // except 1d tensors
# LLAMA_FTYPE_MOSTLY_IQ2_M = 29, // except 1d tensors
2024-02-27 17:22:17 +00:00
# LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
2024-03-27 02:58:53 +00:00
# LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
2024-05-08 12:42:22 +00:00
# LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
2023-09-29 02:42:03 +00:00
2023-08-24 04:17:00 +00:00
# LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
# };
2023-09-14 01:11:52 +00:00
LLAMA_FTYPE_ALL_F32 = 0
LLAMA_FTYPE_MOSTLY_F16 = 1
LLAMA_FTYPE_MOSTLY_Q4_0 = 2
LLAMA_FTYPE_MOSTLY_Q4_1 = 3
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4
LLAMA_FTYPE_MOSTLY_Q8_0 = 7
LLAMA_FTYPE_MOSTLY_Q5_0 = 8
LLAMA_FTYPE_MOSTLY_Q5_1 = 9
LLAMA_FTYPE_MOSTLY_Q2_K = 10
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17
LLAMA_FTYPE_MOSTLY_Q6_K = 18
2024-01-12 03:51:12 +00:00
LLAMA_FTYPE_MOSTLY_IQ2_XXS = 19
LLAMA_FTYPE_MOSTLY_IQ2_XS = 20
LLAMA_FTYPE_MOSTLY_Q2_K_S = 21
2024-02-26 16:40:58 +00:00
LLAMA_FTYPE_MOSTLY_IQ3_XS = 22
2024-01-30 14:48:09 +00:00
LLAMA_FTYPE_MOSTLY_IQ3_XXS = 23
2024-02-19 02:30:36 +00:00
LLAMA_FTYPE_MOSTLY_IQ1_S = 24
2024-02-21 16:05:58 +00:00
LLAMA_FTYPE_MOSTLY_IQ4_NL = 25
2024-02-25 04:47:29 +00:00
LLAMA_FTYPE_MOSTLY_IQ3_S = 26
LLAMA_FTYPE_MOSTLY_IQ3_M = 27
2024-02-26 16:40:58 +00:00
LLAMA_FTYPE_MOSTLY_IQ2_S = 28
LLAMA_FTYPE_MOSTLY_IQ2_M = 29
2024-02-27 17:22:17 +00:00
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30
2024-05-08 12:42:22 +00:00
LLAMA_FTYPE_MOSTLY_IQ1_M = 31
LLAMA_FTYPE_MOSTLY_BF16 = 32
2023-09-14 01:11:52 +00:00
LLAMA_FTYPE_GUESSED = 1024
2023-08-24 04:17:00 +00:00
2023-11-02 17:40:20 +00:00
# enum llama_rope_scaling_type {
2024-02-25 21:53:58 +00:00
# LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = -1,
# LLAMA_ROPE_SCALING_TYPE_NONE = 0,
# LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
# LLAMA_ROPE_SCALING_TYPE_YARN = 2,
# LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
2023-11-02 17:40:20 +00:00
# };
2024-02-25 21:53:58 +00:00
LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED = - 1
LLAMA_ROPE_SCALING_TYPE_NONE = 0
LLAMA_ROPE_SCALING_TYPE_LINEAR = 1
LLAMA_ROPE_SCALING_TYPE_YARN = 2
LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN
2023-03-24 18:35:41 +00:00
2024-02-15 20:17:30 +00:00
# enum llama_pooling_type {
2024-03-03 16:27:04 +00:00
# LLAMA_POOLING_TYPE_UNSPECIFIED = -1,
2024-02-25 21:53:58 +00:00
# LLAMA_POOLING_TYPE_NONE = 0,
# LLAMA_POOLING_TYPE_MEAN = 1,
# LLAMA_POOLING_TYPE_CLS = 2,
2024-02-15 20:17:30 +00:00
# };
2024-03-03 16:27:04 +00:00
LLAMA_POOLING_TYPE_UNSPECIFIED = - 1
2024-02-25 21:53:58 +00:00
LLAMA_POOLING_TYPE_NONE = 0
LLAMA_POOLING_TYPE_MEAN = 1
LLAMA_POOLING_TYPE_CLS = 2
2024-02-15 20:17:30 +00:00
2024-01-14 03:37:49 +00:00
# enum llama_split_mode {
2024-02-25 21:53:58 +00:00
# LLAMA_SPLIT_MODE_NONE = 0, // single GPU
# LLAMA_SPLIT_MODE_LAYER = 1, // split layers and KV across GPUs
# LLAMA_SPLIT_MODE_ROW = 2, // split rows across GPUs
2024-01-14 03:37:49 +00:00
# };
2024-02-25 21:53:58 +00:00
LLAMA_SPLIT_MODE_NONE = 0
LLAMA_SPLIT_MODE_LAYER = 1
LLAMA_SPLIT_MODE_ROW = 2
2023-11-20 19:11:33 +00:00
2024-01-15 15:12:10 +00:00
2023-05-21 21:47:21 +00:00
# typedef struct llama_token_data {
# llama_token id; // token id
# float logit; // log-odds of the token
# float p; // probability of the token
# } llama_token_data;
2024-02-21 21:25:38 +00:00
class llama_token_data ( ctypes . Structure ) :
2023-11-28 00:03:02 +00:00
""" Used to store token data
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Attributes :
id ( llama_token ) : token id
logit ( float ) : log - odds of the token
p ( float ) : probability of the token """
2024-01-08 19:51:29 +00:00
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
id : llama_token
logit : float
p : float
2023-03-23 09:33:06 +00:00
_fields_ = [
2023-05-21 21:47:21 +00:00
( " id " , llama_token ) ,
2024-02-21 21:25:38 +00:00
( " logit " , ctypes . c_float ) ,
( " p " , ctypes . c_float ) ,
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2024-02-21 21:25:38 +00:00
llama_token_data_p = ctypes . POINTER ( llama_token_data )
2023-03-23 09:33:06 +00:00
2023-05-01 14:44:28 +00:00
2023-05-21 21:47:21 +00:00
# typedef struct llama_token_data_array {
# llama_token_data * data;
# size_t size;
# bool sorted;
# } llama_token_data_array;
2024-02-21 21:25:38 +00:00
class llama_token_data_array ( ctypes . Structure ) :
2023-11-28 00:03:02 +00:00
""" Used to sample tokens given logits
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Attributes :
data ( ctypes . Array [ llama_token_data ] ) : token data
size ( int ) : size of the array
sorted ( bool ) : whether the array is sorted """
2024-01-08 19:51:29 +00:00
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
data : CtypesArray [ llama_token_data ]
size : int
sorted : bool
2023-05-01 14:44:28 +00:00
_fields_ = [
( " data " , llama_token_data_p ) ,
2024-02-21 21:25:38 +00:00
( " size " , ctypes . c_size_t ) ,
( " sorted " , ctypes . c_bool ) ,
2023-05-01 14:44:28 +00:00
]
2024-02-21 21:25:38 +00:00
llama_token_data_array_p = ctypes . POINTER ( llama_token_data_array )
2023-05-01 14:44:28 +00:00
2024-05-05 16:12:27 +00:00
# typedef bool (*llama_progress_callback)(float progress, void * user_data);
2024-02-22 07:00:09 +00:00
llama_progress_callback = ctypes . CFUNCTYPE (
ctypes . c_bool , ctypes . c_float , ctypes . c_void_p
)
2023-03-24 18:35:41 +00:00
2023-08-24 04:17:00 +00:00
2023-09-29 02:42:03 +00:00
# // Input data for llama_decode
# // A llama_batch object can contain input about one or many sequences
# // The provided arrays (i.e. token, embd, pos, etc.) must have size of n_tokens
# //
# // - token : the token ids of the input (used when embd is NULL)
# // - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
# // - pos : the positions of the respective token in the sequence
# // - seq_id : the sequence to which the respective token belongs
2024-03-06 06:32:00 +00:00
# // - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
2023-09-29 02:42:03 +00:00
# //
# typedef struct llama_batch {
# int32_t n_tokens;
2023-10-19 06:55:08 +00:00
# llama_token * token;
# float * embd;
# llama_pos * pos;
2023-11-01 01:29:35 +00:00
# int32_t * n_seq_id;
2023-10-19 06:55:08 +00:00
# llama_seq_id ** seq_id;
2024-03-06 06:32:00 +00:00
# int8_t * logits; // TODO: rename this to "output"
2023-09-29 02:42:03 +00:00
# // NOTE: helpers for smooth API transition - can be deprecated in the future
# // for future-proof code, use the above fields instead and ignore everything below
# //
# // pos[i] = all_pos_0 + i*all_pos_1
# //
# llama_pos all_pos_0; // used if pos == NULL
# llama_pos all_pos_1; // used if pos == NULL
# llama_seq_id all_seq_id; // used if seq_id == NULL
# } llama_batch;
2024-02-21 21:25:38 +00:00
class llama_batch ( ctypes . Structure ) :
2023-11-23 05:26:26 +00:00
""" Input data for llama_decode
2023-11-23 21:26:00 +00:00
2023-11-23 05:26:26 +00:00
A llama_batch object can contain input about one or many sequences
2023-11-23 21:26:00 +00:00
2023-11-23 05:26:26 +00:00
The provided arrays ( i . e . token , embd , pos , etc . ) must have size of n_tokens
2023-11-23 21:26:00 +00:00
2023-11-23 05:26:26 +00:00
Attributes :
2024-03-09 01:58:50 +00:00
n_tokens ( int ) : number of tokens
2023-11-23 05:26:26 +00:00
token ( ctypes . Array [ llama_token ] ) : the token ids of the input ( used when embd is NULL )
2024-02-21 21:25:38 +00:00
embd ( ctypes . Array [ ctypes . ctypes . c_float ] ) : token embeddings ( i . e . float vector of size n_embd ) ( used when token is NULL )
2023-11-23 05:26:26 +00:00
pos ( ctypes . Array [ ctypes . Array [ llama_pos ] ] ) : the positions of the respective token in the sequence
2024-01-08 19:51:29 +00:00
seq_id ( ctypes . Array [ ctypes . Array [ llama_seq_id ] ] ) : the sequence to which the respective token belongs
2024-03-09 01:58:50 +00:00
logits ( ctypes . Array [ ctypes . ctypes . c_int8 ] ) : if zero , the logits for the respective token will not be output
2024-01-08 19:51:29 +00:00
"""
2023-11-23 05:26:26 +00:00
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
n_tokens : int
token : CtypesArray [ llama_token ]
embd : CtypesArray [ ctypes . c_float ]
pos : CtypesArray [ CtypesArray [ llama_pos ] ]
n_seq_id : CtypesArray [ ctypes . c_int ]
seq_id : CtypesArray [ CtypesArray [ llama_seq_id ] ]
logits : CtypesArray [ ctypes . c_int8 ]
2023-09-29 02:42:03 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " n_tokens " , ctypes . c_int32 ) ,
( " token " , ctypes . POINTER ( llama_token ) ) ,
2024-02-22 07:00:09 +00:00
( " embd " , ctypes . POINTER ( ctypes . c_float ) ) ,
2024-02-21 21:25:38 +00:00
( " pos " , ctypes . POINTER ( llama_pos ) ) ,
( " n_seq_id " , ctypes . POINTER ( ctypes . c_int32 ) ) ,
( " seq_id " , ctypes . POINTER ( ctypes . POINTER ( llama_seq_id ) ) ) ,
( " logits " , ctypes . POINTER ( ctypes . c_int8 ) ) ,
2023-09-29 02:42:03 +00:00
( " all_pos_0 " , llama_pos ) ,
( " all_pos_1 " , llama_pos ) ,
( " all_seq_id " , llama_seq_id ) ,
]
2023-07-15 19:11:01 +00:00
2024-01-08 19:51:29 +00:00
2023-12-11 15:21:35 +00:00
# enum llama_model_kv_override_type {
2024-02-25 21:53:58 +00:00
# LLAMA_KV_OVERRIDE_TYPE_INT,
# LLAMA_KV_OVERRIDE_TYPE_FLOAT,
# LLAMA_KV_OVERRIDE_TYPE_BOOL,
2024-04-28 03:41:54 +00:00
# LLAMA_KV_OVERRIDE_TYPE_STR,
2023-12-11 15:21:35 +00:00
# };
2024-02-25 21:53:58 +00:00
LLAMA_KV_OVERRIDE_TYPE_INT = 0
LLAMA_KV_OVERRIDE_TYPE_FLOAT = 1
LLAMA_KV_OVERRIDE_TYPE_BOOL = 2
2024-04-28 03:41:54 +00:00
LLAMA_KV_OVERRIDE_TYPE_STR = 3
2023-12-11 15:21:35 +00:00
2024-01-08 19:51:29 +00:00
2023-12-11 15:21:35 +00:00
# struct llama_model_kv_override {
# enum llama_model_kv_override_type tag;
2024-04-28 03:41:54 +00:00
# char key[128];
2023-12-11 15:21:35 +00:00
# union {
2024-04-28 03:41:54 +00:00
# int64_t val_i64;
# double val_f64;
# bool val_bool;
# char val_str[128];
2023-12-11 15:21:35 +00:00
# };
# };
2024-02-21 21:25:38 +00:00
class llama_model_kv_override_value ( ctypes . Union ) :
2023-12-11 15:21:35 +00:00
_fields_ = [
2024-05-29 06:28:58 +00:00
( " val_i64 " , ctypes . c_int64 ) ,
( " val_f64 " , ctypes . c_double ) ,
( " val_bool " , ctypes . c_bool ) ,
( " val_str " , ctypes . c_char * 128 ) ,
2023-12-11 15:21:35 +00:00
]
2023-09-29 02:42:03 +00:00
2024-04-28 03:41:54 +00:00
if TYPE_CHECKING :
2024-05-29 06:28:58 +00:00
val_i64 : int
val_f64 : float
val_bool : bool
val_str : bytes
2024-04-28 03:41:54 +00:00
2024-01-08 19:51:29 +00:00
2024-02-21 21:25:38 +00:00
class llama_model_kv_override ( ctypes . Structure ) :
2023-12-22 19:52:20 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " tag " , ctypes . c_int ) ,
2024-04-28 03:41:54 +00:00
( " key " , ctypes . c_char * 128 ) ,
2023-12-22 19:52:20 +00:00
( " value " , llama_model_kv_override_value ) ,
]
2024-04-28 03:41:54 +00:00
if TYPE_CHECKING :
tag : int
key : bytes
value : Union [ int , float , bool , bytes ]
2024-01-08 19:51:29 +00:00
2023-09-29 02:42:03 +00:00
# struct llama_model_params {
# int32_t n_gpu_layers; // number of layers to store in VRAM
2024-01-14 03:37:49 +00:00
# enum llama_split_mode split_mode; // how to split the model across multiple GPUs
# // main_gpu interpretation depends on split_mode:
# // LLAMA_SPLIT_NONE: the GPU that is used for the entire model
# // LLAMA_SPLIT_ROW: the GPU that is used for small tensors and intermediate results
# // LLAMA_SPLIT_LAYER: ignored
# int32_t main_gpu;
2024-01-31 15:41:42 +00:00
# // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices()
2024-01-14 03:37:49 +00:00
# const float * tensor_split;
2023-07-15 19:11:01 +00:00
2024-05-14 13:30:04 +00:00
# // comma separated list of RPC servers to use for offloading
# const char * rpc_servers;
2023-12-22 19:10:34 +00:00
# // Called with a progress value between 0.0 and 1.0. Pass NULL to disable.
# // If the provided progress_callback returns true, model loading continues.
# // If it returns false, model loading is immediately aborted.
2023-06-20 15:25:10 +00:00
# llama_progress_callback progress_callback;
2024-01-14 03:37:49 +00:00
2023-06-20 15:25:10 +00:00
# // context pointer passed to the progress callback
# void * progress_callback_user_data;
2023-05-21 21:47:21 +00:00
2023-12-11 15:21:35 +00:00
# // override key-value pairs of the model meta data
# const struct llama_model_kv_override * kv_overrides;
2023-07-20 22:51:53 +00:00
2024-01-15 15:12:10 +00:00
2023-06-20 15:25:10 +00:00
# // Keep the booleans together to avoid misalignment during copy-by-value.
2024-04-28 03:41:54 +00:00
# bool vocab_only; // only load the vocabulary, no weights
# bool use_mmap; // use mmap if possible
# bool use_mlock; // force system to keep model in RAM
# bool check_tensors; // validate model tensor data
2023-05-21 21:47:21 +00:00
# };
2024-02-21 21:25:38 +00:00
class llama_model_params ( ctypes . Structure ) :
2023-11-28 00:03:02 +00:00
""" Parameters for llama_model
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Attributes :
n_gpu_layers ( int ) : number of layers to store in VRAM
2024-01-14 03:37:49 +00:00
split_mode ( int ) : how to split the model across multiple GPUs
main_gpu ( int ) : the GPU that is used for the entire model . main_gpu interpretation depends on split_mode : LLAMA_SPLIT_NONE : the GPU that is used for the entire model LLAMA_SPLIT_ROW : the GPU that is used for small tensors and intermediate results LLAMA_SPLIT_LAYER : ignored
2024-02-21 21:25:38 +00:00
tensor_split ( ctypes . Array [ ctypes . ctypes . c_float ] ) : proportion of the model ( layers or rows ) to offload to each GPU , size : llama_max_devices ( )
2024-05-14 13:30:04 +00:00
rpc_servers ( ctypes . c_char_p ) : comma separated list of RPC servers to use for offloading
2023-12-22 19:10:34 +00:00
progress_callback ( llama_progress_callback ) : called with a progress value between 0.0 and 1.0 . Pass NULL to disable . If the provided progress_callback returns true , model loading continues . If it returns false , model loading is immediately aborted .
2024-02-21 21:25:38 +00:00
progress_callback_user_data ( ctypes . ctypes . c_void_p ) : context pointer passed to the progress callback
2023-12-11 15:21:35 +00:00
kv_overrides ( ctypes . Array [ llama_model_kv_override ] ) : override key - value pairs of the model meta data
2023-11-28 00:03:02 +00:00
vocab_only ( bool ) : only load the vocabulary , no weights
use_mmap ( bool ) : use mmap if possible
2024-04-28 03:41:54 +00:00
use_mlock ( bool ) : force system to keep model in RAM
check_tensors ( bool ) : validate model tensor data """
2024-01-08 19:51:29 +00:00
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
n_gpu_layers : int
split_mode : int
main_gpu : int
tensor_split : CtypesArray [ ctypes . c_float ]
2024-05-14 13:30:04 +00:00
rpc_servers : ctypes . c_char_p
2024-04-10 06:40:41 +00:00
progress_callback : Callable [ [ float , ctypes . c_void_p ] , bool ]
progress_callback_user_data : ctypes . c_void_p
kv_overrides : CtypesArray [ llama_model_kv_override ]
vocab_only : bool
use_mmap : bool
use_mlock : bool
2024-04-28 03:41:54 +00:00
check_tensors : bool
2024-04-10 06:40:41 +00:00
2023-03-23 09:33:06 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " n_gpu_layers " , ctypes . c_int32 ) ,
( " split_mode " , ctypes . c_int ) ,
( " main_gpu " , ctypes . c_int32 ) ,
2024-02-22 07:00:09 +00:00
( " tensor_split " , ctypes . POINTER ( ctypes . c_float ) ) ,
2024-05-14 13:30:04 +00:00
( " rpc_servers " , ctypes . c_char_p ) ,
2023-06-20 15:25:10 +00:00
( " progress_callback " , llama_progress_callback ) ,
2024-02-21 21:25:38 +00:00
( " progress_callback_user_data " , ctypes . c_void_p ) ,
( " kv_overrides " , ctypes . POINTER ( llama_model_kv_override ) ) ,
( " vocab_only " , ctypes . c_bool ) ,
( " use_mmap " , ctypes . c_bool ) ,
( " use_mlock " , ctypes . c_bool ) ,
2024-04-28 03:41:54 +00:00
( " check_tensors " , ctypes . c_bool ) ,
2023-03-23 09:33:06 +00:00
]
2023-03-24 18:35:41 +00:00
2024-05-27 14:51:57 +00:00
# // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
# // https://github.com/ggerganov/llama.cpp/pull/7544
2023-09-29 02:42:03 +00:00
# struct llama_context_params {
2023-11-03 15:34:50 +00:00
# uint32_t seed; // RNG seed, -1 for random
# uint32_t n_ctx; // text context, 0 = from model
2024-03-13 19:57:35 +00:00
# uint32_t n_batch; // logical maximum batch size that can be submitted to llama_decode
# uint32_t n_ubatch; // physical maximum batch size
# uint32_t n_seq_max; // max number of sequences (i.e. distinct states for recurrent models)
2023-11-03 15:34:50 +00:00
# uint32_t n_threads; // number of threads to use for generation
# uint32_t n_threads_batch; // number of threads to use for batch processing
2024-03-03 16:27:04 +00:00
# enum llama_rope_scaling_type rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
# enum llama_pooling_type pooling_type; // whether to pool (sum) embedding results by sequence id
# // (ignored if no pooling layer)
2023-09-29 02:42:03 +00:00
# // ref: https://github.com/ggerganov/llama.cpp/pull/2054
2023-11-02 17:40:20 +00:00
# float rope_freq_base; // RoPE base frequency, 0 = from model
# float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
2023-11-26 20:38:22 +00:00
# float yarn_ext_factor; // YaRN extrapolation mix factor, negative = from model
2023-11-02 17:40:20 +00:00
# float yarn_attn_factor; // YaRN magnitude scaling factor
# float yarn_beta_fast; // YaRN low correction dim
# float yarn_beta_slow; // YaRN high correction dim
# uint32_t yarn_orig_ctx; // YaRN original context size
2024-02-27 17:22:17 +00:00
# float defrag_thold; // defragment the KV cache if holes/size > thold, < 0 disabled (default)
2023-09-29 02:42:03 +00:00
2024-01-19 02:21:49 +00:00
# ggml_backend_sched_eval_callback cb_eval;
# void * cb_eval_user_data;
2024-05-27 14:51:57 +00:00
# enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
# enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
2023-09-29 02:42:03 +00:00
# // Keep the booleans together to avoid misalignment during copy-by-value.
2024-03-03 03:20:04 +00:00
# bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
2024-03-06 06:32:00 +00:00
# bool embeddings; // if true, extract embeddings (together with logits)
2023-12-11 15:21:35 +00:00
# bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
2024-05-27 14:51:57 +00:00
# bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
2024-03-18 14:26:36 +00:00
2024-03-03 03:20:04 +00:00
# // Abort callback
# // if it returns true, execution of llama_decode() will be aborted
# // currently works only with CPU execution
# ggml_abort_callback abort_callback;
# void * abort_callback_data;
2023-09-29 02:42:03 +00:00
# };
2024-02-21 21:25:38 +00:00
class llama_context_params ( ctypes . Structure ) :
2023-11-28 00:03:02 +00:00
""" Parameters for llama_context
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Attributes :
seed ( int ) : RNG seed , - 1 for random
n_ctx ( int ) : text context , 0 = from model
2024-03-13 19:57:35 +00:00
n_batch ( int ) : logical maximum batch size that can be submitted to llama_decode
n_ubatch ( int ) : physical maximum batch size
n_seq_max ( int ) : max number of sequences ( i . e . distinct states for recurrent models )
2023-11-28 00:03:02 +00:00
n_threads ( int ) : number of threads to use for generation
n_threads_batch ( int ) : number of threads to use for batch processing
rope_scaling_type ( int ) : RoPE scaling type , from ` enum llama_rope_scaling_type `
2024-03-03 16:27:04 +00:00
pooling_type ( int ) : whether to pool ( sum ) embedding results by sequence id ( ignored if no pooling layer )
2023-11-28 00:03:02 +00:00
rope_freq_base ( float ) : RoPE base frequency , 0 = from model
rope_freq_scale ( float ) : RoPE frequency scaling factor , 0 = from model
yarn_ext_factor ( float ) : YaRN extrapolation mix factor , negative = from model
yarn_attn_factor ( float ) : YaRN magnitude scaling factor
yarn_beta_fast ( float ) : YaRN low correction dim
yarn_beta_slow ( float ) : YaRN high correction dim
yarn_orig_ctx ( int ) : YaRN original context size
2024-02-27 17:22:17 +00:00
defrag_thold ( float ) : defragment the KV cache if holes / size > thold , < 0 disabled ( default )
2024-01-19 02:21:49 +00:00
cb_eval ( ggml_backend_sched_eval_callback ) : callback for scheduling eval
2024-02-21 21:25:38 +00:00
cb_eval_user_data ( ctypes . ctypes . c_void_p ) : user data for cb_eval
2023-12-11 15:21:35 +00:00
type_k ( int ) : data type for K cache
type_v ( int ) : data type for V cache
2023-12-14 02:43:16 +00:00
logits_all ( bool ) : the llama_eval ( ) call computes all logits , not just the last one ( DEPRECATED - set llama_batch . logits instead )
2024-03-06 06:32:00 +00:00
embeddings ( bool ) : if true , extract embeddings ( together with logits )
2024-01-08 19:51:29 +00:00
offload_kqv ( bool ) : whether to offload the KQV ops ( including the KV cache ) to GPU
2024-04-30 13:27:55 +00:00
flash_attn ( bool ) : whether to use flash attention
2024-03-03 03:20:04 +00:00
abort_callback ( ggml_abort_callback ) : abort callback if it returns true , execution of llama_decode ( ) will be aborted
abort_callback_data ( ctypes . ctypes . c_void_p ) : data for abort_callback
2024-01-08 19:51:29 +00:00
"""
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
seed : int
n_ctx : int
n_batch : int
n_ubatch : int
n_seq_max : int
n_threads : int
n_threads_batch : int
rope_scaling_type : int
pooling_type : int
rope_freq_base : float
rope_freq_scale : float
yarn_ext_factor : float
yarn_attn_factor : float
yarn_beta_fast : float
yarn_beta_slow : float
yarn_orig_ctx : int
defrag_thold : float
cb_eval : Callable [ [ ctypes . c_void_p , bool ] , bool ]
cb_eval_user_data : ctypes . c_void_p
type_k : int
type_v : int
logits_all : bool
embeddings : bool
offload_kqv : bool
2024-04-30 13:27:55 +00:00
flash_attn : bool
2024-04-10 06:40:41 +00:00
abort_callback : Callable [ [ ctypes . c_void_p ] , bool ]
abort_callback_data : ctypes . c_void_p
2023-09-29 02:42:03 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " seed " , ctypes . c_uint32 ) ,
( " n_ctx " , ctypes . c_uint32 ) ,
( " n_batch " , ctypes . c_uint32 ) ,
2024-03-13 19:57:35 +00:00
( " n_ubatch " , ctypes . c_uint32 ) ,
( " n_seq_max " , ctypes . c_uint32 ) ,
2024-02-21 21:25:38 +00:00
( " n_threads " , ctypes . c_uint32 ) ,
( " n_threads_batch " , ctypes . c_uint32 ) ,
2024-03-03 16:27:04 +00:00
( " rope_scaling_type " , ctypes . c_int ) ,
( " pooling_type " , ctypes . c_int ) ,
2024-02-21 21:25:38 +00:00
( " rope_freq_base " , ctypes . c_float ) ,
( " rope_freq_scale " , ctypes . c_float ) ,
( " yarn_ext_factor " , ctypes . c_float ) ,
( " yarn_attn_factor " , ctypes . c_float ) ,
( " yarn_beta_fast " , ctypes . c_float ) ,
( " yarn_beta_slow " , ctypes . c_float ) ,
( " yarn_orig_ctx " , ctypes . c_uint32 ) ,
2024-02-27 17:22:17 +00:00
( " defrag_thold " , ctypes . c_float ) ,
2024-01-19 02:21:49 +00:00
( " cb_eval " , ggml_backend_sched_eval_callback ) ,
2024-02-21 21:25:38 +00:00
( " cb_eval_user_data " , ctypes . c_void_p ) ,
( " type_k " , ctypes . c_int ) ,
( " type_v " , ctypes . c_int ) ,
( " logits_all " , ctypes . c_bool ) ,
2024-03-06 06:32:00 +00:00
( " embeddings " , ctypes . c_bool ) ,
2024-02-21 21:25:38 +00:00
( " offload_kqv " , ctypes . c_bool ) ,
2024-04-30 13:27:55 +00:00
( " flash_attn " , ctypes . c_bool ) ,
2024-03-03 03:20:04 +00:00
( " abort_callback " , ggml_abort_callback ) ,
( " abort_callback_data " , ctypes . c_void_p ) ,
2023-09-29 02:42:03 +00:00
]
2023-03-23 09:33:06 +00:00
2023-08-24 04:17:00 +00:00
# // Signature for logging events
# // Note that text includes the new line character at the end for most events.
# // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
# // if it exists.
# // It might not exist for progress report where '.' is output repeatedly.
# typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
2024-02-22 07:00:09 +00:00
llama_log_callback = ctypes . CFUNCTYPE (
None , ctypes . c_int , ctypes . c_char_p , ctypes . c_void_p
)
2023-11-23 05:26:26 +00:00
""" Signature for logging events
Note that text includes the new line character at the end for most events .
If your logging mechanism cannot handle that , check if the last character is ' \n ' and strip it
if it exists .
It might not exist for progress report where ' . ' is output repeatedly . """
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-06-10 16:17:38 +00:00
# // model quantization parameters
# typedef struct llama_model_quantize_params {
2024-03-23 03:43:29 +00:00
# int32_t nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency()
# enum llama_ftype ftype; // quantize to this llama_ftype
# enum ggml_type output_tensor_type; // output tensor type
# enum ggml_type token_embedding_type; // itoken embeddings tensor type
# bool allow_requantize; // allow quantizing non-f32/f16 tensors
# bool quantize_output_tensor; // quantize output.weight
# bool only_copy; // only copy tensors - ftype, allow_requantize and quantize_output_tensor are ignored
# bool pure; // quantize all tensors to the default type
2024-04-26 01:21:29 +00:00
# bool keep_split; // quantize to the same number of shards
2024-03-23 03:43:29 +00:00
# void * imatrix; // pointer to importance matrix data
2024-03-27 02:58:53 +00:00
# void * kv_overrides; // pointer to vector containing overrides
2023-06-10 16:17:38 +00:00
# } llama_model_quantize_params;
2024-02-21 21:25:38 +00:00
class llama_model_quantize_params ( ctypes . Structure ) :
2023-11-28 00:03:02 +00:00
""" Parameters for llama_model_quantize
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Attributes :
nthread ( int ) : number of threads to use for quantizing , if < = 0 will use std : : thread : : hardware_concurrency ( )
ftype ( int ) : quantize to this llama_ftype
2024-03-23 03:43:29 +00:00
output_tensor_type ( int ) : output tensor type
token_embedding_type ( int ) : itoken embeddings tensor type
2023-11-28 00:03:02 +00:00
allow_requantize ( bool ) : allow quantizing non - f32 / f16 tensors
quantize_output_tensor ( bool ) : quantize output . weight
only_copy ( bool ) : only copy tensors - ftype , allow_requantize and quantize_output_tensor are ignored
2024-03-13 19:57:35 +00:00
pure ( bool ) : quantize all tensors to the default type
2024-04-26 01:21:29 +00:00
keep_split ( bool ) : quantize to the same number of shards
2024-03-23 03:43:29 +00:00
imatrix ( ctypes . c_void_p ) : pointer to importance matrix data
2024-03-27 02:58:53 +00:00
kv_overrides ( ctypes . c_void_p ) : pointer to vector containing overrides
2024-01-08 19:51:29 +00:00
"""
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
nthread : int
ftype : int
output_tensor_type : int
token_embedding_type : int
allow_requantize : bool
quantize_output_tensor : bool
only_copy : bool
pure : bool
2024-04-26 01:21:29 +00:00
keep_split : bool
2024-04-10 06:40:41 +00:00
imatrix : ctypes . c_void_p
kv_overrides : ctypes . c_void_p
2023-06-10 16:17:38 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " nthread " , ctypes . c_int32 ) ,
( " ftype " , ctypes . c_int ) ,
2024-03-23 03:43:29 +00:00
( " output_tensor_type " , ctypes . c_int ) ,
( " token_embedding_type " , ctypes . c_int ) ,
2024-02-21 21:25:38 +00:00
( " allow_requantize " , ctypes . c_bool ) ,
( " quantize_output_tensor " , ctypes . c_bool ) ,
( " only_copy " , ctypes . c_bool ) ,
( " pure " , ctypes . c_bool ) ,
2024-04-26 01:21:29 +00:00
( " keep_split " , ctypes . c_bool ) ,
2024-02-21 21:25:38 +00:00
( " imatrix " , ctypes . c_void_p ) ,
2024-03-27 02:58:53 +00:00
( " kv_overrides " , ctypes . c_void_p ) ,
2023-06-10 16:17:38 +00:00
]
2023-07-24 17:04:34 +00:00
# // grammar types
# struct llama_grammar;
2024-02-21 21:25:38 +00:00
llama_grammar_p = ctypes . c_void_p
2023-07-24 17:04:34 +00:00
# // grammar element type
# enum llama_gretype {
# // end of rule definition
# LLAMA_GRETYPE_END = 0,
# // start of alternate definition for rule
# LLAMA_GRETYPE_ALT = 1,
# // non-terminal element: reference to rule
# LLAMA_GRETYPE_RULE_REF = 2,
# // terminal element: character (code point)
# LLAMA_GRETYPE_CHAR = 3,
# // inverse char(s) ([^a], [^a-b] [^abc])
# LLAMA_GRETYPE_CHAR_NOT = 4,
# // modifies a preceding LLAMA_GRETYPE_CHAR or LLAMA_GRETYPE_CHAR_ALT to
# // be an inclusive range ([a-z])
# LLAMA_GRETYPE_CHAR_RNG_UPPER = 5,
# // modifies a preceding LLAMA_GRETYPE_CHAR or
# // LLAMA_GRETYPE_CHAR_RNG_UPPER to add an alternate char to match ([ab], [a-zA])
# LLAMA_GRETYPE_CHAR_ALT = 6,
2024-06-07 06:02:12 +00:00
# // any character (.)
# LLAMA_GRETYPE_CHAR_ANY = 7,
2023-07-24 17:04:34 +00:00
# };
2023-07-24 19:42:07 +00:00
LLAMA_GRETYPE_END = 0
LLAMA_GRETYPE_ALT = 1
LLAMA_GRETYPE_RULE_REF = 2
LLAMA_GRETYPE_CHAR = 3
LLAMA_GRETYPE_CHAR_NOT = 4
LLAMA_GRETYPE_CHAR_RNG_UPPER = 5
LLAMA_GRETYPE_CHAR_ALT = 6
2024-06-07 06:02:12 +00:00
LLAMA_GRETYPE_CHAR_ANY = 7
2023-07-24 17:04:34 +00:00
# typedef struct llama_grammar_element {
# enum llama_gretype type;
# uint32_t value; // Unicode code point or rule ID
# } llama_grammar_element;
2024-02-21 21:25:38 +00:00
class llama_grammar_element ( ctypes . Structure ) :
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
type : int
value : int
2023-07-24 17:04:34 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " type " , ctypes . c_int ) ,
( " value " , ctypes . c_uint32 ) ,
2023-07-24 17:04:34 +00:00
]
2024-02-21 21:25:38 +00:00
llama_grammar_element_p = ctypes . POINTER ( llama_grammar_element )
2023-07-24 17:04:34 +00:00
2023-07-06 21:57:56 +00:00
# // performance timing information
# struct llama_timings {
# double t_start_ms;
# double t_end_ms;
# double t_load_ms;
# double t_sample_ms;
# double t_p_eval_ms;
# double t_eval_ms;
# int32_t n_sample;
# int32_t n_p_eval;
# int32_t n_eval;
# };
2024-02-21 21:25:38 +00:00
class llama_timings ( ctypes . Structure ) :
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
t_start_ms : float
t_end_ms : float
t_load_ms : float
t_sample_ms : float
t_p_eval_ms : float
t_eval_ms : float
n_sample : int
n_p_eval : int
n_eval : int
2023-07-06 21:57:56 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " t_start_ms " , ctypes . c_double ) ,
( " t_end_ms " , ctypes . c_double ) ,
( " t_load_ms " , ctypes . c_double ) ,
( " t_sample_ms " , ctypes . c_double ) ,
( " t_p_eval_ms " , ctypes . c_double ) ,
( " t_eval_ms " , ctypes . c_double ) ,
( " n_sample " , ctypes . c_int32 ) ,
( " n_p_eval " , ctypes . c_int32 ) ,
( " n_eval " , ctypes . c_int32 ) ,
2023-07-06 21:57:56 +00:00
]
2024-02-19 09:11:34 +00:00
# // used in chat template
# typedef struct llama_chat_message {
# const char * role;
# const char * content;
# } llama_chat_message;
2024-02-21 21:25:38 +00:00
class llama_chat_message ( ctypes . Structure ) :
2024-02-19 09:11:34 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " role " , ctypes . c_char_p ) ,
( " content " , ctypes . c_char_p ) ,
2024-02-19 09:11:34 +00:00
]
2023-09-29 02:42:03 +00:00
# // Helpers for getting default parameters
# LLAMA_API struct llama_model_params llama_model_default_params(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_default_params " ,
[ ] ,
llama_model_params ,
)
2023-09-29 02:42:03 +00:00
def llama_model_default_params ( ) - > llama_model_params :
2023-11-28 00:03:02 +00:00
""" Get default parameters for llama_model """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2023-08-24 04:17:00 +00:00
# LLAMA_API struct llama_context_params llama_context_default_params(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_context_default_params " ,
[ ] ,
llama_context_params ,
)
2023-03-23 09:33:06 +00:00
def llama_context_default_params ( ) - > llama_context_params :
2023-11-28 00:03:02 +00:00
""" Get default parameters for llama_context """
2024-02-21 21:25:38 +00:00
. . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-08-24 04:17:00 +00:00
# LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_quantize_default_params " ,
[ ] ,
llama_model_quantize_params ,
)
2023-06-10 16:17:38 +00:00
def llama_model_quantize_default_params ( ) - > llama_model_quantize_params :
2023-11-28 00:03:02 +00:00
""" Get default parameters for llama_model_quantize """
2024-02-21 21:25:38 +00:00
. . .
2023-06-10 16:17:38 +00:00
2023-05-21 21:47:21 +00:00
# // Initialize the llama + ggml backend
2023-06-29 05:08:15 +00:00
# // If numa is true, use NUMA optimizations
2023-05-21 21:47:21 +00:00
# // Call once at the start of the program
2023-07-15 19:11:01 +00:00
# LLAMA_API void llama_backend_init(bool numa);
2024-02-17 05:37:51 +00:00
# LLAMA_API void llama_backend_init(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_backend_init " ,
[ ] ,
None ,
)
2024-02-17 05:37:51 +00:00
def llama_backend_init ( ) :
2023-11-23 05:26:26 +00:00
""" Initialize the llama + ggml backend
If numa is true , use NUMA optimizations
Call once at the start of the program """
2024-02-21 21:25:38 +00:00
. . .
2023-07-15 19:11:01 +00:00
2024-02-17 05:37:51 +00:00
# // numa strategies
# enum ggml_numa_strategy {
# GGML_NUMA_STRATEGY_DISABLED = 0,
# GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
# GGML_NUMA_STRATEGY_ISOLATE = 2,
# GGML_NUMA_STRATEGY_NUMACTL = 3,
# GGML_NUMA_STRATEGY_MIRROR = 4,
# GGML_NUMA_STRATEGY_COUNT
# };
GGML_NUMA_STRATEGY_DISABLED = 0
GGML_NUMA_STRATEGY_DISTRIBUTE = 1
GGML_NUMA_STRATEGY_ISOLATE = 2
GGML_NUMA_STRATEGY_NUMACTL = 3
GGML_NUMA_STRATEGY_MIRROR = 4
GGML_NUMA_STRATEGY_COUNT = 5
# //optional:
# LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_numa_init " ,
[ ctypes . c_int ] ,
None ,
)
2024-04-28 03:41:54 +00:00
def llama_numa_init ( numa : int , / ) : . . .
2024-02-17 05:37:51 +00:00
2023-07-15 19:11:01 +00:00
# // Call once at the end of the program - currently only used for MPI
2023-08-24 04:17:00 +00:00
# LLAMA_API void llama_backend_free(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_backend_free " ,
[ ] ,
None ,
)
2023-07-15 19:11:01 +00:00
def llama_backend_free ( ) :
2023-11-23 05:26:26 +00:00
""" Call once at the end of the program - currently only used for MPI """
2024-02-21 21:25:38 +00:00
. . .
2023-05-21 21:47:21 +00:00
2023-07-15 19:11:01 +00:00
2023-06-26 12:50:38 +00:00
# LLAMA_API struct llama_model * llama_load_model_from_file(
2023-08-24 04:17:00 +00:00
# const char * path_model,
2023-10-05 20:07:49 +00:00
# struct llama_model_params params);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_load_model_from_file " ,
[ ctypes . c_char_p , llama_model_params ] ,
llama_model_p_ctypes ,
)
2023-06-26 12:50:38 +00:00
def llama_load_model_from_file (
2024-02-21 21:25:38 +00:00
path_model : bytes , params : llama_model_params , /
2024-04-28 03:41:54 +00:00
) - > Optional [ llama_model_p ] : . . .
2023-06-26 12:50:38 +00:00
# LLAMA_API void llama_free_model(struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_free_model " ,
[ llama_model_p_ctypes ] ,
None ,
)
2024-04-28 03:41:54 +00:00
def llama_free_model ( model : llama_model_p , / ) : . . .
2023-06-26 12:50:38 +00:00
# LLAMA_API struct llama_context * llama_new_context_with_model(
2023-08-24 04:17:00 +00:00
# struct llama_model * model,
2023-06-26 12:50:38 +00:00
# struct llama_context_params params);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_new_context_with_model " ,
[ llama_model_p_ctypes , llama_context_params ] ,
llama_context_p_ctypes ,
)
2023-06-26 12:50:38 +00:00
def llama_new_context_with_model (
2024-02-21 21:25:38 +00:00
model : llama_model_p , params : llama_context_params , /
2024-04-28 03:41:54 +00:00
) - > Optional [ llama_context_p ] : . . .
2023-06-26 12:50:38 +00:00
2023-08-24 04:17:00 +00:00
# // Frees all allocated memory
# LLAMA_API void llama_free(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_free " ,
[ llama_context_p_ctypes ] ,
None ,
)
2024-02-21 21:25:38 +00:00
def llama_free ( ctx : llama_context_p , / ) :
2023-11-23 05:26:26 +00:00
""" Frees all allocated memory """
2024-02-21 21:25:38 +00:00
. . .
2023-08-24 04:17:00 +00:00
# LLAMA_API int64_t llama_time_us(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_time_us " ,
[ ] ,
ctypes . c_int64 ,
)
2024-04-28 03:41:54 +00:00
def llama_time_us ( ) - > int : . . .
2023-05-21 21:47:21 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API size_t llama_max_devices(void);
@ctypes_function ( " llama_max_devices " , [ ] , ctypes . c_size_t )
2024-04-28 03:41:54 +00:00
def llama_max_devices ( ) - > int : . . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API bool llama_supports_mmap (void);
@ctypes_function ( " llama_supports_mmap " , [ ] , ctypes . c_bool )
2024-04-28 03:41:54 +00:00
def llama_supports_mmap ( ) - > bool : . . .
2024-01-31 15:41:42 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API bool llama_supports_mlock (void);
@ctypes_function ( " llama_supports_mlock " , [ ] , ctypes . c_bool )
2024-04-28 03:41:54 +00:00
def llama_supports_mlock ( ) - > bool : . . .
2024-01-31 15:41:42 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API bool llama_supports_gpu_offload(void);
@ctypes_function ( " llama_supports_gpu_offload " , [ ] , ctypes . c_bool )
2024-04-28 03:41:54 +00:00
def llama_supports_gpu_offload ( ) - > bool : . . .
2024-01-31 15:41:42 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API const struct llama_model * llama_get_model(const struct llama_context * ctx);
@ctypes_function ( " llama_get_model " , [ llama_context_p_ctypes ] , llama_model_p_ctypes )
2024-04-28 03:41:54 +00:00
def llama_get_model ( ctx : llama_context_p , / ) - > Optional [ llama_model_p ] : . . .
2023-08-24 04:17:00 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API uint32_t llama_n_ctx (const struct llama_context * ctx);
@ctypes_function ( " llama_n_ctx " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
2024-04-28 03:41:54 +00:00
def llama_n_ctx ( ctx : llama_context_p , / ) - > int : . . .
2023-08-24 04:17:00 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API uint32_t llama_n_batch (const struct llama_context * ctx);
@ctypes_function ( " llama_n_batch " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
2024-04-28 03:41:54 +00:00
def llama_n_batch ( ctx : llama_context_p , / ) - > int : . . .
2024-03-13 19:57:35 +00:00
# LLAMA_API uint32_t llama_n_ubatch (const struct llama_context * ctx);
@ctypes_function ( " llama_n_ubatch " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
2024-04-28 03:41:54 +00:00
def llama_n_ubatch ( ctx : llama_context_p , / ) - > int : . . .
2024-03-13 19:57:35 +00:00
# LLAMA_API uint32_t llama_n_seq_max (const struct llama_context * ctx);
@ctypes_function ( " llama_n_seq_max " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
2024-04-28 03:41:54 +00:00
def llama_n_seq_max ( ctx : llama_context_p , / ) - > int : . . .
2023-12-22 05:12:37 +00:00
2024-01-08 19:51:29 +00:00
2024-04-25 06:48:26 +00:00
# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
@ctypes_function ( " llama_pooling_type " , [ llama_context_p_ctypes ] , ctypes . c_int )
2024-04-28 03:41:54 +00:00
def llama_pooling_type ( ctx : llama_context_p , / ) - > int : . . .
2024-04-25 06:48:26 +00:00
2024-06-04 04:35:47 +00:00
# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_vocab_type " , [ llama_model_p_ctypes ] , ctypes . c_int )
2024-04-28 03:41:54 +00:00
def llama_vocab_type ( model : llama_model_p , / ) - > int : . . .
2023-09-09 16:12:32 +00:00
2023-08-24 04:17:00 +00:00
2024-06-04 04:35:47 +00:00
# LLAMA_API enum llama_rope_type llama_rope_type (const struct llama_model * model);
2024-02-26 01:52:14 +00:00
@ctypes_function ( " llama_rope_type " , [ llama_model_p_ctypes ] , ctypes . c_int )
2024-04-28 03:41:54 +00:00
def llama_rope_type ( model : llama_model_p , / ) - > int : . . .
2023-08-24 22:01:42 +00:00
2024-02-26 01:52:14 +00:00
# LLAMA_API int32_t llama_n_vocab (const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_n_vocab " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-04-28 03:41:54 +00:00
def llama_n_vocab ( model : llama_model_p , / ) - > int : . . .
2023-08-24 04:17:00 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API int32_t llama_n_ctx_train(const struct llama_model * model);
@ctypes_function ( " llama_n_ctx_train " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-04-28 03:41:54 +00:00
def llama_n_ctx_train ( model : llama_model_p , / ) - > int : . . .
2023-09-09 16:12:32 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API int32_t llama_n_embd (const struct llama_model * model);
@ctypes_function ( " llama_n_embd " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-04-28 03:41:54 +00:00
def llama_n_embd ( model : llama_model_p , / ) - > int : . . .
2023-08-24 04:17:00 +00:00
2024-03-18 14:26:36 +00:00
# LLAMA_API int32_t llama_n_layer (const struct llama_model * model);
@ctypes_function ( " llama_n_layer " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-04-28 03:41:54 +00:00
def llama_n_layer ( model : llama_model_p , / ) - > int : . . .
2024-03-18 14:26:36 +00:00
2023-10-03 19:23:35 +00:00
# // Get the model's RoPE frequency scaling factor
# LLAMA_API float llama_rope_freq_scale_train(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_rope_freq_scale_train " , [ llama_model_p_ctypes ] , ctypes . c_float )
2024-02-21 21:25:38 +00:00
def llama_rope_freq_scale_train ( model : llama_model_p , / ) - > float :
2023-11-23 05:26:26 +00:00
""" Get the model ' s RoPE frequency scaling factor """
2024-02-21 21:25:38 +00:00
. . .
2023-10-03 19:23:35 +00:00
2023-11-20 19:11:33 +00:00
# // Functions to access the model's GGUF metadata scalar values
# // - The functions return the length of the string on success, or -1 on failure
# // - The output string is always null-terminated and cleared on failure
# // - GGUF array values are not supported by these functions
# // Get metadata value as a string by key name
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_meta_val_str " ,
[
llama_model_p_ctypes ,
ctypes . c_char_p ,
ctypes . c_char_p ,
ctypes . c_size_t ,
] ,
ctypes . c_int32 ,
)
2023-11-20 19:11:33 +00:00
def llama_model_meta_val_str (
2024-02-22 07:00:09 +00:00
model : llama_model_p ,
key : Union [ ctypes . c_char_p , bytes ] ,
buf : bytes ,
buf_size : int ,
/ ,
2023-11-20 19:11:33 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Get metadata value as a string by key name """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
# // Get the number of metadata key/value pairs
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_meta_count(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_model_meta_count " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-02-21 21:25:38 +00:00
def llama_model_meta_count ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Get the number of metadata key/value pairs """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
# // Get metadata key name by index
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_meta_key_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_meta_key_by_index " ,
[
llama_model_p_ctypes ,
ctypes . c_int32 ,
ctypes . c_char_p ,
ctypes . c_size_t ,
] ,
ctypes . c_int32 ,
)
2023-11-20 19:11:33 +00:00
def llama_model_meta_key_by_index (
2024-02-23 08:39:38 +00:00
model : llama_model_p ,
i : Union [ ctypes . c_int , int ] ,
buf : Union [ bytes , CtypesArray [ ctypes . c_char ] ] ,
buf_size : int ,
/ ,
2023-11-20 19:11:33 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Get metadata key name by index """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
# // Get metadata value as a string by index
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_meta_val_str_by_index(const struct llama_model * model, int32_t i, char * buf, size_t buf_size);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_meta_val_str_by_index " ,
[
llama_model_p_ctypes ,
ctypes . c_int32 ,
ctypes . c_char_p ,
ctypes . c_size_t ,
] ,
ctypes . c_int32 ,
)
2023-11-20 19:11:33 +00:00
def llama_model_meta_val_str_by_index (
2024-02-23 08:39:38 +00:00
model : llama_model_p ,
i : Union [ ctypes . c_int , int ] ,
buf : Union [ bytes , CtypesArray [ ctypes . c_char ] ] ,
buf_size : int ,
/ ,
2023-11-20 19:11:33 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Get metadata value as a string by index """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
2023-08-24 04:17:00 +00:00
# // Get a string describing the model type
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_desc " ,
[ llama_model_p_ctypes , ctypes . c_char_p , ctypes . c_size_t ] ,
ctypes . c_int32 ,
)
2023-09-29 02:42:03 +00:00
def llama_model_desc (
2024-02-23 08:39:38 +00:00
model : llama_model_p ,
buf : Union [ bytes , CtypesArray [ ctypes . c_char ] ] ,
buf_size : Union [ ctypes . c_size_t , int ] ,
/ ,
2023-09-29 02:42:03 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Get a string describing the model type """
2024-02-21 21:25:38 +00:00
. . .
2023-08-24 04:17:00 +00:00
2023-08-25 18:35:53 +00:00
# // Returns the total size of all the tensors in the model in bytes
# LLAMA_API uint64_t llama_model_size(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_model_size " , [ llama_model_p_ctypes ] , ctypes . c_uint64 )
2024-02-21 21:25:38 +00:00
def llama_model_size ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Returns the total size of all the tensors in the model in bytes """
2024-02-21 21:25:38 +00:00
. . .
2023-08-25 18:35:53 +00:00
2023-08-24 04:17:00 +00:00
2023-08-25 18:35:53 +00:00
# // Returns the total number of parameters in the model
# LLAMA_API uint64_t llama_model_n_params(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_model_n_params " , [ llama_model_p_ctypes ] , ctypes . c_uint64 )
2024-02-21 21:25:38 +00:00
def llama_model_n_params ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Returns the total number of parameters in the model """
2024-02-21 21:25:38 +00:00
. . .
2023-08-25 18:35:53 +00:00
2023-09-29 02:42:03 +00:00
# // Get a llama model tensor
# LLAMA_API struct ggml_tensor * llama_get_model_tensor(struct llama_model * model, const char * name);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_model_tensor " , [ llama_model_p_ctypes , ctypes . c_char_p ] , ctypes . c_void_p
)
2023-09-29 02:42:03 +00:00
def llama_get_model_tensor (
2024-02-21 21:25:38 +00:00
model : llama_model_p , name : Union [ ctypes . c_char_p , bytes ] , /
) - > ctypes . c_void_p :
2023-11-23 05:26:26 +00:00
""" Get a llama model tensor """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2023-06-10 16:17:38 +00:00
# // Returns 0 on success
2024-01-04 03:04:04 +00:00
# LLAMA_API uint32_t llama_model_quantize(
2023-05-21 21:47:21 +00:00
# const char * fname_inp,
# const char * fname_out,
2023-06-10 16:17:38 +00:00
# const llama_model_quantize_params * params);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_quantize " ,
[
ctypes . c_char_p ,
ctypes . c_char_p ,
ctypes . POINTER ( llama_model_quantize_params ) ,
] ,
ctypes . c_uint32 ,
)
2023-04-22 23:50:28 +00:00
def llama_model_quantize (
2023-06-10 16:17:38 +00:00
fname_inp : bytes ,
fname_out : bytes ,
2024-02-22 07:00:09 +00:00
params : CtypesPointerOrRef [ llama_model_quantize_params ] ,
/ ,
2023-05-19 15:59:33 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Returns 0 on success """
2024-02-21 21:25:38 +00:00
. . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2024-03-18 14:26:36 +00:00
# // Apply a LoRA adapter to a loaded model
# // path_base_model is the path to a higher quality model to use as a base for
# // the layers modified by the adapter. Can be NULL to use the current loaded model.
# // The model needs to be reloaded before applying a new adapter, otherwise the adapter
# // will be applied on top of the previous one
# // Returns 0 on success
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_model_apply_lora_from_file(
2023-06-26 12:50:38 +00:00
# const struct llama_model * model,
2024-03-18 14:26:36 +00:00
# const char * path_lora,
# float scale,
# const char * path_base_model,
# int32_t n_threads);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_model_apply_lora_from_file " ,
[
llama_model_p_ctypes ,
ctypes . c_char_p ,
ctypes . c_float ,
ctypes . c_char_p ,
ctypes . c_int32 ,
] ,
ctypes . c_int32 ,
)
2023-06-26 12:50:38 +00:00
def llama_model_apply_lora_from_file (
model : llama_model_p ,
2024-02-21 21:25:38 +00:00
path_lora : Union [ ctypes . c_char_p , bytes ] ,
scale : Union [ ctypes . c_float , float ] ,
2024-02-28 19:27:16 +00:00
path_base_model : Union [ ctypes . c_char_p , bytes , None ] ,
2024-02-22 07:00:09 +00:00
n_threads : Union [ ctypes . c_int32 , int ] ,
/ ,
2024-03-18 14:26:36 +00:00
) - > int :
""" Apply a LoRA adapter to a loaded model
path_base_model is the path to a higher quality model to use as a base for
the layers modified by the adapter . Can be NULL to use the current loaded model .
The model needs to be reloaded before applying a new adapter , otherwise the adapter
will be applied on top of the previous one
Returns 0 on success """
. . .
# // Apply a loaded control vector to a llama_context, or if data is NULL, clear
# // the currently loaded vector.
# // n_embd should be the size of a single layer's control, and data should point
# // to an n_embd x n_layers buffer starting from layer 1.
# // il_start and il_end are the layer range the vector should apply to (both inclusive)
# // See llama_control_vector_load in common to load a control vector.
# LLAMA_API int32_t llama_control_vector_apply(
# struct llama_context * lctx,
# const float * data,
# size_t len,
# int32_t n_embd,
# int32_t il_start,
# int32_t il_end);
@ctypes_function (
" llama_control_vector_apply " ,
[
llama_context_p_ctypes ,
ctypes . POINTER ( ctypes . c_float ) ,
ctypes . c_size_t ,
ctypes . c_int32 ,
ctypes . c_int32 ,
ctypes . c_int32 ,
] ,
ctypes . c_int32 ,
)
def llama_control_vector_apply (
lctx : llama_context_p ,
data : CtypesPointerOrRef [ ctypes . c_float ] ,
len : int ,
n_embd : int ,
il_start : int ,
il_end : int ,
/ ,
) - > int :
""" Apply a loaded control vector to a llama_context, or if data is NULL, clear
the currently loaded vector .
n_embd should be the size of a single layer ' s control, and data should point
to an n_embd x n_layers buffer starting from layer 1.
il_start and il_end are the layer range the vector should apply to ( both inclusive )
See llama_control_vector_load in common to load a control vector . """
. . .
2023-06-26 12:50:38 +00:00
2023-09-29 02:42:03 +00:00
# //
# // KV cache
# //
2023-04-18 05:30:04 +00:00
2023-11-23 21:26:00 +00:00
# // Information associated with an individual cell in the KV cache view.
# struct llama_kv_cache_view_cell {
# // The position for this cell. Takes KV cache shifts into account.
# // May be negative if the cell is not populated.
# llama_pos pos;
# };
2024-02-21 21:25:38 +00:00
class llama_kv_cache_view_cell ( ctypes . Structure ) :
2024-03-18 14:26:36 +00:00
""" Information associated with an individual cell in the KV cache view.
Attributes :
pos ( llama_pos ) : The position for this cell . Takes KV cache shifts into account .
May be negative if the cell is not populated . """
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
pos : llama_pos
2023-11-23 21:26:00 +00:00
_fields_ = [ ( " pos " , llama_pos ) ]
# // An updateable view of the KV cache.
# struct llama_kv_cache_view {
# // Number of KV cache cells. This will be the same as the context size.
# int32_t n_cells;
# // Maximum number of sequences that can exist in a cell. It's not an error
# // if there are more sequences in a cell than this value, however they will
# // not be visible in the view cells_sequences.
2024-03-13 19:57:35 +00:00
# int32_t n_seq_max;
2023-11-23 21:26:00 +00:00
# // Number of tokens in the cache. For example, if there are two populated
# // cells, the first with 1 sequence id in it and the second with 2 sequence
# // ids then you'll have 3 tokens.
# int32_t token_count;
# // Number of populated cache cells.
# int32_t used_cells;
# // Maximum contiguous empty slots in the cache.
# int32_t max_contiguous;
# // Index to the start of the max_contiguous slot range. Can be negative
# // when cache is full.
# int32_t max_contiguous_idx;
# // Information for an individual cell.
# struct llama_kv_cache_view_cell * cells;
2024-03-13 19:57:35 +00:00
# // The sequences for each cell. There will be n_seq_max items per cell.
2023-11-23 21:26:00 +00:00
# llama_seq_id * cells_sequences;
# };
2024-02-21 21:25:38 +00:00
class llama_kv_cache_view ( ctypes . Structure ) :
2024-04-10 06:40:41 +00:00
if TYPE_CHECKING :
n_cells : int
n_max_seq : int
token_count : int
used_cells : int
max_contiguous : int
max_contiguous_idx : int
cells : CtypesArray [ llama_kv_cache_view_cell ]
cells_sequences : CtypesArray [ llama_seq_id ]
2023-11-23 21:26:00 +00:00
_fields_ = [
2024-02-21 21:25:38 +00:00
( " n_cells " , ctypes . c_int32 ) ,
( " n_max_seq " , ctypes . c_int32 ) ,
( " token_count " , ctypes . c_int32 ) ,
( " used_cells " , ctypes . c_int32 ) ,
( " max_contiguous " , ctypes . c_int32 ) ,
( " max_contiguous_idx " , ctypes . c_int32 ) ,
( " cells " , ctypes . POINTER ( llama_kv_cache_view_cell ) ) ,
( " cells_sequences " , ctypes . POINTER ( llama_seq_id ) ) ,
2023-11-23 21:26:00 +00:00
]
2024-02-21 21:25:38 +00:00
llama_kv_cache_view_p = ctypes . POINTER ( llama_kv_cache_view )
2023-12-18 23:11:26 +00:00
2023-11-23 21:26:00 +00:00
# // Create an empty KV cache view. (use only for debugging purposes)
2024-03-13 19:57:35 +00:00
# LLAMA_API struct llama_kv_cache_view llama_kv_cache_view_init(const struct llama_context * ctx, int32_t n_seq_max);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_view_init " ,
[ llama_context_p_ctypes , ctypes . c_int32 ] ,
llama_kv_cache_view ,
)
2023-11-23 21:26:00 +00:00
def llama_kv_cache_view_init (
2024-03-13 19:57:35 +00:00
ctx : llama_context_p , n_seq_max : Union [ ctypes . c_int32 , int ] , /
2023-11-23 21:26:00 +00:00
) - > llama_kv_cache_view :
""" Create an empty KV cache view. (use only for debugging purposes) """
2024-02-21 21:25:38 +00:00
. . .
2023-11-23 21:26:00 +00:00
# // Free a KV cache view. (use only for debugging purposes)
# LLAMA_API void llama_kv_cache_view_free(struct llama_kv_cache_view * view);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_kv_cache_view_free " , [ llama_kv_cache_view_p ] , None )
2024-02-21 21:25:38 +00:00
def llama_kv_cache_view_free ( view : " ctypes.pointer[llama_kv_cache_view] " , / ) : # type: ignore
2023-11-23 21:26:00 +00:00
""" Free a KV cache view. (use only for debugging purposes) """
2024-02-21 21:25:38 +00:00
. . .
2023-11-23 21:26:00 +00:00
# // Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes)
# LLAMA_API void llama_kv_cache_view_update(const struct llama_context * ctx, struct llama_kv_cache_view * view);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_view_update " , [ llama_context_p_ctypes , llama_kv_cache_view_p ] , None
)
2024-02-22 07:00:09 +00:00
def llama_kv_cache_view_update ( ctx : llama_context_p , view : CtypesPointerOrRef [ llama_kv_cache_view ] , / ) : # type: ignore
2023-11-23 21:26:00 +00:00
""" Update the KV cache view structure with the current state of the KV cache. (use only for debugging purposes) """
2024-02-21 21:25:38 +00:00
. . .
2023-11-23 21:26:00 +00:00
# // Returns the number of tokens in the KV cache (slow, use only for debug)
# // If a KV cell has multiple sequences assigned to it, it will be counted multiple times
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_get_kv_cache_token_count(const struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_kv_cache_token_count " , [ llama_context_p_ctypes ] , ctypes . c_int32
)
2024-02-21 21:25:38 +00:00
def llama_get_kv_cache_token_count ( ctx : llama_context_p , / ) - > int :
2023-11-23 21:26:00 +00:00
""" Returns the number of tokens in the KV cache (slow, use only for debug)
If a KV cell has multiple sequences assigned to it , it will be counted multiple times
"""
2024-02-21 21:25:38 +00:00
. . .
2023-04-02 17:33:49 +00:00
2023-04-11 15:59:03 +00:00
2023-11-23 21:26:00 +00:00
# // Returns the number of used KV cells (i.e. have at least one sequence assigned to them)
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_get_kv_cache_used_cells(const struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_kv_cache_used_cells " , [ llama_context_p_ctypes ] , ctypes . c_int32
)
2024-02-21 21:25:38 +00:00
def llama_get_kv_cache_used_cells ( ctx : llama_context_p , / ) - > int :
2023-11-23 21:26:00 +00:00
""" Returns the number of used KV cells (i.e. have at least one sequence assigned to them) """
2024-02-21 21:25:38 +00:00
. . .
2023-11-23 21:26:00 +00:00
2024-04-30 13:27:55 +00:00
# // Clear the KV cache - both cell info is erased and KV data is zeroed
2023-11-01 01:29:35 +00:00
# LLAMA_API void llama_kv_cache_clear(
# struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_kv_cache_clear " , [ llama_context_p_ctypes ] , None )
2024-02-21 21:25:38 +00:00
def llama_kv_cache_clear ( ctx : llama_context_p , / ) :
2023-11-23 05:26:26 +00:00
""" Clear the KV cache """
2024-02-21 21:25:38 +00:00
. . .
2023-04-27 00:00:54 +00:00
2023-04-28 19:32:43 +00:00
2023-09-29 02:42:03 +00:00
# // Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-04-09 13:53:49 +00:00
# // Returns false if a partial sequence cannot be removed. Removing a whole sequence never fails
2023-11-01 01:29:35 +00:00
# // seq_id < 0 : match any sequence
# // p0 < 0 : [0, p1]
# // p1 < 0 : [p0, inf)
2024-03-09 01:58:50 +00:00
# LLAMA_API bool llama_kv_cache_seq_rm(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# llama_seq_id seq_id,
# llama_pos p0,
# llama_pos p1);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_seq_rm " ,
[
llama_context_p_ctypes ,
llama_seq_id ,
llama_pos ,
llama_pos ,
] ,
2024-03-09 01:58:50 +00:00
ctypes . c_bool ,
2024-02-23 08:39:38 +00:00
)
2023-09-29 02:42:03 +00:00
def llama_kv_cache_seq_rm (
ctx : llama_context_p ,
2023-11-05 21:57:10 +00:00
seq_id : Union [ llama_seq_id , int ] ,
2023-09-29 02:42:03 +00:00
p0 : Union [ llama_pos , int ] ,
p1 : Union [ llama_pos , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2024-03-09 01:58:50 +00:00
) - > bool :
2023-11-23 05:26:26 +00:00
""" Removes all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-04-09 13:53:49 +00:00
Returns false if a partial sequence cannot be removed . Removing a whole sequence never fails
2023-11-23 05:26:26 +00:00
seq_id < 0 : match any sequence
p0 < 0 : [ 0 , p1 ]
p1 < 0 : [ p0 , inf ) """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Copy all tokens that belong to the specified sequence to another sequence
# // Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
2023-10-03 19:23:35 +00:00
# // p0 < 0 : [0, p1]
# // p1 < 0 : [p0, inf)
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_kv_cache_seq_cp(
# struct llama_context * ctx,
# llama_seq_id seq_id_src,
# llama_seq_id seq_id_dst,
# llama_pos p0,
# llama_pos p1);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_seq_cp " ,
[
llama_context_p_ctypes ,
llama_seq_id ,
llama_seq_id ,
llama_pos ,
llama_pos ,
] ,
None ,
)
2023-09-29 02:42:03 +00:00
def llama_kv_cache_seq_cp (
ctx : llama_context_p ,
2023-11-05 21:57:10 +00:00
seq_id_src : Union [ llama_seq_id , int ] ,
seq_id_dst : Union [ llama_seq_id , int ] ,
2023-09-29 02:42:03 +00:00
p0 : Union [ llama_pos , int ] ,
p1 : Union [ llama_pos , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Copy all tokens that belong to the specified sequence to another sequence
Note that this does not allocate extra KV cache memory - it simply assigns the tokens to the new sequence
p0 < 0 : [ 0 , p1 ]
p1 < 0 : [ p0 , inf ) """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Removes all tokens that do not belong to the specified sequence
# LLAMA_API void llama_kv_cache_seq_keep(
# struct llama_context * ctx,
# llama_seq_id seq_id);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_seq_keep " , [ llama_context_p_ctypes , llama_seq_id ] , None
)
2024-02-22 07:00:09 +00:00
def llama_kv_cache_seq_keep ( ctx : llama_context_p , seq_id : Union [ llama_seq_id , int ] , / ) :
2023-11-23 05:26:26 +00:00
""" Removes all tokens that do not belong to the specified sequence """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Adds relative position "delta" to all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-02-26 01:52:14 +00:00
# // If the KV cache is RoPEd, the KV data is updated accordingly:
# // - lazily on next llama_decode()
# // - explicitly with llama_kv_cache_update()
2023-10-03 19:23:35 +00:00
# // p0 < 0 : [0, p1]
# // p1 < 0 : [p0, inf)
2024-02-26 01:52:14 +00:00
# LLAMA_API void llama_kv_cache_seq_add(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# llama_seq_id seq_id,
# llama_pos p0,
# llama_pos p1,
# llama_pos delta);
2024-02-23 08:39:38 +00:00
@ctypes_function (
2024-02-26 01:52:14 +00:00
" llama_kv_cache_seq_add " ,
2024-02-23 08:39:38 +00:00
[
llama_context_p_ctypes ,
llama_seq_id ,
llama_pos ,
llama_pos ,
llama_pos ,
] ,
None ,
)
2024-02-26 01:52:14 +00:00
def llama_kv_cache_seq_add (
2023-09-29 02:42:03 +00:00
ctx : llama_context_p ,
2023-11-05 21:57:10 +00:00
seq_id : Union [ llama_seq_id , int ] ,
2023-09-29 02:42:03 +00:00
p0 : Union [ llama_pos , int ] ,
p1 : Union [ llama_pos , int ] ,
delta : Union [ llama_pos , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Adds relative position " delta " to all tokens that belong to the specified sequence and have positions in [p0, p1)
2024-02-26 01:52:14 +00:00
If the KV cache is RoPEd , the KV data is updated accordingly :
- lazily on next llama_decode ( )
- explicitly with llama_kv_cache_update ( )
2023-11-23 05:26:26 +00:00
p0 < 0 : [ 0 , p1 ]
p1 < 0 : [ p0 , inf ) """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2024-01-08 19:51:29 +00:00
# // Integer division of the positions by factor of `d > 1`
# // If the KV cache is RoPEd, the KV data is updated accordingly
# // p0 < 0 : [0, p1]
# // p1 < 0 : [p0, inf)
# LLAMA_API void llama_kv_cache_seq_div(
# struct llama_context * ctx,
# llama_seq_id seq_id,
# llama_pos p0,
# llama_pos p1,
# int d);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_kv_cache_seq_div " ,
[
llama_context_p_ctypes ,
llama_seq_id ,
llama_pos ,
llama_pos ,
ctypes . c_int ,
] ,
None ,
)
2024-01-08 19:51:29 +00:00
def llama_kv_cache_seq_div (
ctx : llama_context_p ,
seq_id : Union [ llama_seq_id , int ] ,
p0 : Union [ llama_pos , int ] ,
p1 : Union [ llama_pos , int ] ,
2024-02-21 21:25:38 +00:00
d : Union [ ctypes . c_int , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2024-01-08 19:51:29 +00:00
) :
""" Integer division of the positions by factor of `d > 1`
If the KV cache is RoPEd , the KV data is updated accordingly
p0 < 0 : [ 0 , p1 ]
p1 < 0 : [ p0 , inf ) """
2024-02-21 21:25:38 +00:00
. . .
2024-01-08 19:51:29 +00:00
2024-02-26 01:52:14 +00:00
# // Defragment the KV cache
# // This will be applied:
# // - lazily on next llama_decode()
# // - explicitly with llama_kv_cache_update()
# LLAMA_API void llama_kv_cache_defrag(struct llama_context * ctx);
@ctypes_function ( " llama_kv_cache_defrag " , [ llama_context_p_ctypes ] , None )
def llama_kv_cache_defrag ( ctx : llama_context_p , / ) :
""" Defragment the KV cache
This will be applied :
- lazily on next llama_decode ( )
- explicitly with llama_kv_cache_update ( ) """
. . .
# // Apply the KV cache updates (such as K-shifts, defragmentation, etc.)
# LLAMA_API void llama_kv_cache_update(struct llama_context * ctx);
@ctypes_function ( " llama_kv_cache_update " , [ llama_context_p_ctypes ] , None )
def llama_kv_cache_update ( ctx : llama_context_p , / ) :
""" Apply the KV cache updates (such as K-shifts, defragmentation, etc.) """
. . .
2023-09-29 02:42:03 +00:00
# //
# // State / sessions
# //
2023-04-02 17:33:49 +00:00
2023-04-28 19:32:43 +00:00
2023-05-03 13:33:30 +00:00
# Returns the maximum size in bytes of the state (rng, logits, embedding
# and kv_cache) - will often be smaller after compacting tokens
2024-04-09 13:53:49 +00:00
# LLAMA_API size_t llama_state_get_size(const struct llama_context * ctx);
@ctypes_function ( " llama_state_get_size " , [ llama_context_p_ctypes ] , ctypes . c_size_t )
def llama_state_get_size ( ctx : llama_context_p , / ) - > int :
""" Returns the maximum size in bytes of the state (rng, logits, embedding
and kv_cache ) - will often be smaller after compacting tokens """
. . .
# LLAMA_API DEPRECATED(size_t llama_get_state_size(const struct llama_context * ctx),
# "use llama_state_get_size instead");
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_get_state_size " , [ llama_context_p_ctypes ] , ctypes . c_size_t )
2024-02-21 21:25:38 +00:00
def llama_get_state_size ( ctx : llama_context_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Returns the maximum size in bytes of the state (rng, logits, embedding
and kv_cache ) - will often be smaller after compacting tokens """
2024-02-21 21:25:38 +00:00
. . .
2023-04-22 23:50:28 +00:00
# Copies the state to the specified destination address.
# Destination needs to have allocated enough memory.
# Returns the number of bytes copied
2024-04-09 13:53:49 +00:00
# LLAMA_API size_t llama_state_get_data(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# uint8_t * dst);
2024-04-09 13:53:49 +00:00
@ctypes_function (
" llama_state_get_data " ,
[
llama_context_p_ctypes ,
ctypes . POINTER ( ctypes . c_uint8 ) ,
] ,
ctypes . c_size_t ,
)
def llama_state_get_data (
ctx : llama_context_p , dst : CtypesArray [ ctypes . c_uint8 ] , /
) - > int :
""" Copies the state to the specified destination address.
Destination needs to have allocated enough memory .
Returns the number of bytes copied """
. . .
# LLAMA_API DEPRECATED(size_t llama_copy_state_data(
# struct llama_context * ctx,
# uint8_t * dst),
# "use llama_state_get_data instead");
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_copy_state_data " ,
[
llama_context_p_ctypes ,
ctypes . POINTER ( ctypes . c_uint8 ) ,
] ,
ctypes . c_size_t ,
)
2023-05-05 18:12:26 +00:00
def llama_copy_state_data (
2024-02-22 07:00:09 +00:00
ctx : llama_context_p , dst : CtypesArray [ ctypes . c_uint8 ] , /
2023-05-07 23:30:14 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Copies the state to the specified destination address.
Destination needs to have allocated enough memory .
Returns the number of bytes copied """
2024-02-21 21:25:38 +00:00
. . .
2023-04-22 23:50:28 +00:00
2024-03-01 17:57:16 +00:00
# // Set the state reading from the specified address
# // Returns the number of bytes read
2024-04-09 13:53:49 +00:00
# LLAMA_API size_t llama_state_set_data(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
2024-03-01 17:57:16 +00:00
# const uint8_t * src);
2024-04-09 13:53:49 +00:00
@ctypes_function (
" llama_state_set_data " ,
[ llama_context_p_ctypes , ctypes . POINTER ( ctypes . c_uint8 ) ] ,
ctypes . c_size_t ,
)
def llama_state_set_data (
ctx : llama_context_p , src : CtypesArray [ ctypes . c_uint8 ] , /
) - > int :
""" Set the state reading from the specified address
Returns the number of bytes read """
. . .
# LLAMA_API DEPRECATED(size_t llama_set_state_data(
# struct llama_context * ctx,
# const uint8_t * src),
# "use llama_state_set_data instead");
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_set_state_data " ,
[ llama_context_p_ctypes , ctypes . POINTER ( ctypes . c_uint8 ) ] ,
ctypes . c_size_t ,
)
2023-05-05 16:22:27 +00:00
def llama_set_state_data (
2024-02-22 07:00:09 +00:00
ctx : llama_context_p , src : CtypesArray [ ctypes . c_uint8 ] , /
2023-05-07 23:30:14 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Set the state reading from the specified address """
2024-02-21 21:25:38 +00:00
. . .
2023-04-22 23:50:28 +00:00
2023-04-28 19:32:43 +00:00
# Save/load session file
2024-04-09 13:53:49 +00:00
# LLAMA_API bool llama_state_load_file(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# const char * path_session,
# llama_token * tokens_out,
# size_t n_token_capacity,
# size_t * n_token_count_out);
2024-04-09 13:53:49 +00:00
@ctypes_function (
" llama_state_load_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_token_p ,
ctypes . c_size_t ,
ctypes . POINTER ( ctypes . c_size_t ) ,
] ,
ctypes . c_bool ,
)
def llama_state_load_file (
ctx : llama_context_p ,
path_session : bytes ,
tokens_out : CtypesArray [ llama_token ] ,
n_token_capacity : Union [ ctypes . c_size_t , int ] ,
n_token_count_out : CtypesPointerOrRef [ ctypes . c_size_t ] ,
/ ,
2024-04-28 03:41:54 +00:00
) - > bool : . . .
2024-04-09 13:53:49 +00:00
# LLAMA_API DEPRECATED(bool llama_load_session_file(
# struct llama_context * ctx,
# const char * path_session,
# llama_token * tokens_out,
# size_t n_token_capacity,
# size_t * n_token_count_out),
# "use llama_state_load_file instead");
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_load_session_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_token_p ,
ctypes . c_size_t ,
ctypes . POINTER ( ctypes . c_size_t ) ,
] ,
ctypes . c_size_t ,
)
2023-04-28 19:32:43 +00:00
def llama_load_session_file (
ctx : llama_context_p ,
path_session : bytes ,
2024-02-22 07:00:09 +00:00
tokens_out : CtypesArray [ llama_token ] ,
2024-02-21 21:25:38 +00:00
n_token_capacity : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
n_token_count_out : CtypesPointerOrRef [ ctypes . c_size_t ] ,
/ ,
2024-04-28 03:41:54 +00:00
) - > int : . . .
2023-04-28 19:32:43 +00:00
2024-04-09 13:53:49 +00:00
# LLAMA_API bool llama_state_save_file(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# const char * path_session,
# const llama_token * tokens,
# size_t n_token_count);
2024-04-09 13:53:49 +00:00
@ctypes_function (
" llama_state_save_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_token_p ,
ctypes . c_size_t ,
] ,
ctypes . c_bool ,
)
def llama_state_save_file (
ctx : llama_context_p ,
path_session : bytes ,
tokens : CtypesArray [ llama_token ] ,
n_token_count : Union [ ctypes . c_size_t , int ] ,
/ ,
2024-04-28 03:41:54 +00:00
) - > bool : . . .
2024-04-09 13:53:49 +00:00
# LLAMA_API DEPRECATED(bool llama_save_session_file(
# struct llama_context * ctx,
# const char * path_session,
# const llama_token * tokens,
# size_t n_token_count),
# "use llama_state_save_file instead");
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_save_session_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_token_p ,
ctypes . c_size_t ,
] ,
ctypes . c_size_t ,
)
2023-04-28 19:32:43 +00:00
def llama_save_session_file (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
path_session : bytes ,
2024-02-22 07:00:09 +00:00
tokens : CtypesArray [ llama_token ] ,
2024-02-21 21:25:38 +00:00
n_token_count : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2024-04-28 03:41:54 +00:00
) - > int : . . .
2023-04-28 19:32:43 +00:00
2024-04-09 13:53:49 +00:00
# // Get the exact size needed to copy the KV cache of a single sequence
# LLAMA_API size_t llama_state_seq_get_size(
# struct llama_context * ctx,
# llama_seq_id seq_id);
@ctypes_function (
" llama_state_seq_get_size " ,
[ llama_context_p_ctypes , llama_seq_id ] ,
ctypes . c_size_t ,
)
def llama_state_seq_get_size ( ctx : llama_context_p , seq_id : llama_seq_id , / ) - > int :
""" Get the exact size needed to copy the KV cache of a single sequence """
. . .
# // Copy the KV cache of a single sequence into the specified buffer
# LLAMA_API size_t llama_state_seq_get_data(
# struct llama_context * ctx,
# uint8_t * dst,
# llama_seq_id seq_id);
@ctypes_function (
" llama_state_seq_get_data " ,
[ llama_context_p_ctypes , ctypes . POINTER ( ctypes . c_uint8 ) , llama_seq_id ] ,
ctypes . c_size_t ,
)
def llama_state_seq_get_data (
ctx : llama_context_p , dst : CtypesArray [ ctypes . c_uint8 ] , seq_id : llama_seq_id , /
) - > int :
""" Copy the KV cache of a single sequence into the specified buffer """
. . .
# // Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence
# // Returns:
# // - Positive: Ok
# // - Zero: Failed to load
# LLAMA_API size_t llama_state_seq_set_data(
# struct llama_context * ctx,
# const uint8_t * src,
# llama_seq_id dest_seq_id);
@ctypes_function (
" llama_state_seq_set_data " ,
[ llama_context_p_ctypes , ctypes . POINTER ( ctypes . c_uint8 ) , llama_seq_id ] ,
ctypes . c_size_t ,
)
def llama_state_seq_set_data (
ctx : llama_context_p , src : CtypesArray [ ctypes . c_uint8 ] , dest_seq_id : llama_seq_id , /
) - > int :
""" Copy the sequence data (originally copied with `llama_state_seq_get_data`) into the specified sequence """
. . .
# LLAMA_API size_t llama_state_seq_save_file(
# struct llama_context * ctx,
# const char * filepath,
# llama_seq_id seq_id,
# const llama_token * tokens,
# size_t n_token_count);
@ctypes_function (
" llama_state_seq_save_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_seq_id ,
llama_token_p ,
ctypes . c_size_t ,
] ,
ctypes . c_size_t ,
)
def llama_state_seq_save_file (
ctx : llama_context_p ,
filepath : bytes ,
seq_id : llama_seq_id ,
tokens : CtypesArray [ llama_token ] ,
n_token_count : Union [ ctypes . c_size_t , int ] ,
/ ,
2024-04-28 03:41:54 +00:00
) - > int : . . .
2024-04-09 13:53:49 +00:00
# LLAMA_API size_t llama_state_seq_load_file(
# struct llama_context * ctx,
# const char * filepath,
# llama_seq_id dest_seq_id,
# llama_token * tokens_out,
# size_t n_token_capacity,
# size_t * n_token_count_out);
@ctypes_function (
" llama_state_seq_load_file " ,
[
llama_context_p_ctypes ,
ctypes . c_char_p ,
llama_seq_id ,
llama_token_p ,
ctypes . c_size_t ,
ctypes . POINTER ( ctypes . c_size_t ) ,
] ,
ctypes . c_size_t ,
)
def llama_state_seq_load_file (
ctx : llama_context_p ,
filepath : bytes ,
dest_seq_id : llama_seq_id ,
tokens_out : CtypesArray [ llama_token ] ,
n_token_capacity : Union [ ctypes . c_size_t , int ] ,
n_token_count_out : CtypesPointerOrRef [ ctypes . c_size_t ] ,
/ ,
2024-04-28 03:41:54 +00:00
) - > int : . . .
2024-04-09 13:53:49 +00:00
2023-09-29 02:42:03 +00:00
# //
# // Decoding
# //
2023-04-28 19:32:43 +00:00
2023-09-29 02:42:03 +00:00
# // Return batch for single sequence of tokens starting at pos_0
# //
# // NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
# //
# LLAMA_API struct llama_batch llama_batch_get_one(
# llama_token * tokens,
# int32_t n_tokens,
# llama_pos pos_0,
# llama_seq_id seq_id);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_batch_get_one " ,
[
llama_token_p ,
ctypes . c_int ,
llama_pos ,
llama_seq_id ,
] ,
llama_batch ,
)
2023-09-29 02:42:03 +00:00
def llama_batch_get_one (
2024-02-22 07:00:09 +00:00
tokens : CtypesArray [ llama_token ] ,
2024-02-21 21:25:38 +00:00
n_tokens : Union [ ctypes . c_int , int ] ,
2023-09-29 02:42:03 +00:00
pos_0 : Union [ llama_pos , int ] ,
seq_id : llama_seq_id ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) - > llama_batch :
2023-11-23 05:26:26 +00:00
""" Return batch for single sequence of tokens starting at pos_0
2023-11-23 21:26:00 +00:00
NOTE : this is a helper function to facilitate transition to the new batch API - avoid using it
"""
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2023-10-19 06:55:08 +00:00
# // Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
# // Each token can be assigned up to n_seq_max sequence ids
2023-09-29 02:42:03 +00:00
# // The batch has to be freed with llama_batch_free()
# // If embd != 0, llama_batch.embd will be allocated with size of n_tokens * embd * sizeof(float)
# // Otherwise, llama_batch.token will be allocated to store n_tokens llama_token
# // The rest of the llama_batch members are allocated with size n_tokens
# // All members are left uninitialized
# LLAMA_API struct llama_batch llama_batch_init(
# int32_t n_tokens,
2023-10-19 06:55:08 +00:00
# int32_t embd,
# int32_t n_seq_max);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_batch_init " , [ ctypes . c_int32 , ctypes . c_int32 , ctypes . c_int32 ] , llama_batch
)
2023-09-29 02:42:03 +00:00
def llama_batch_init (
2024-02-21 21:25:38 +00:00
n_tokens : Union [ ctypes . c_int32 , int ] ,
embd : Union [ ctypes . c_int32 , int ] ,
n_seq_max : Union [ ctypes . c_int32 , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) - > llama_batch :
2023-11-23 05:26:26 +00:00
""" Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
Each token can be assigned up to n_seq_max sequence ids
The batch has to be freed with llama_batch_free ( )
If embd != 0 , llama_batch . embd will be allocated with size of n_tokens * embd * sizeof ( float )
Otherwise , llama_batch . token will be allocated to store n_tokens llama_token
The rest of the llama_batch members are allocated with size n_tokens
All members are left uninitialized """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Frees a batch of tokens allocated with llama_batch_init()
# LLAMA_API void llama_batch_free(struct llama_batch batch);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_batch_free " , [ llama_batch ] , None )
2024-02-21 21:25:38 +00:00
def llama_batch_free ( batch : llama_batch , / ) :
2023-11-23 05:26:26 +00:00
""" Frees a batch of tokens allocated with llama_batch_init() """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Positive return values does not mean a fatal error, but rather a warning.
# // 0 - success
# // 1 - could not find a KV slot for the batch (try reducing the size of the batch or increase the context)
# // < 0 - error
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_decode(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# struct llama_batch batch);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_decode " , [ llama_context_p_ctypes , llama_batch ] , ctypes . c_int32 )
2024-02-21 21:25:38 +00:00
def llama_decode ( ctx : llama_context_p , batch : llama_batch , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Positive return values does not mean a fatal error, but rather a warning.
0 - success
1 - could not find a KV slot for the batch ( try reducing the size of the batch or increase the context )
< 0 - error """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
# // Set the number of threads used for decoding
# // n_threads is the number of threads used for generation (single token)
# // n_threads_batch is the number of threads used for prompt and batch processing (multiple tokens)
# LLAMA_API void llama_set_n_threads(struct llama_context * ctx, uint32_t n_threads, uint32_t n_threads_batch);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_set_n_threads " ,
[
llama_context_p_ctypes ,
ctypes . c_uint32 ,
ctypes . c_uint32 ,
] ,
None ,
)
2023-09-29 02:42:03 +00:00
def llama_set_n_threads (
ctx : llama_context_p ,
2024-02-21 21:25:38 +00:00
n_threads : Union [ ctypes . c_uint32 , int ] ,
n_threads_batch : Union [ ctypes . c_uint32 , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Set the number of threads used for decoding
n_threads is the number of threads used for generation ( single token )
2023-11-23 21:26:00 +00:00
n_threads_batch is the number of threads used for prompt and batch processing ( multiple tokens )
"""
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2024-03-11 03:45:05 +00:00
2024-05-24 05:43:36 +00:00
# // Get the number of threads used for generation of a single token.
# LLAMA_API uint32_t llama_n_threads(struct llama_context * ctx);
@ctypes_function ( " llama_n_threads " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
def llama_n_threads ( ctx : llama_context_p , / ) - > int :
""" Get the number of threads used for generation of a single token """
. . .
# // Get the number of threads used for prompt and batch processing (multiple token).
# LLAMA_API uint32_t llama_n_threads_batch(struct llama_context * ctx);
@ctypes_function ( " llama_n_threads_batch " , [ llama_context_p_ctypes ] , ctypes . c_uint32 )
def llama_n_threads_batch ( ctx : llama_context_p , / ) - > int :
""" Get the number of threads used for prompt and batch processing (multiple token) """
. . .
2024-03-11 03:45:05 +00:00
# // Set whether to use causal attention or not
# // If set to true, the model will only attend to the past tokens
# LLAMA_API void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn);
@ctypes_function ( " llama_set_causal_attn " , [ llama_context_p_ctypes , ctypes . c_bool ] , None )
def llama_set_causal_attn ( ctx : llama_context_p , causal_attn : bool , / ) :
""" Set whether to use causal attention or not
If set to true , the model will only attend to the past tokens """
. . .
2024-03-03 03:20:04 +00:00
# // Set abort callback
# LLAMA_API void llama_set_abort_callback(struct llama_context * ctx, ggml_abort_callback abort_callback, void * abort_callback_data);
@ctypes_function (
" llama_set_abort_callback " ,
[ llama_context_p_ctypes , ggml_abort_callback , ctypes . c_void_p ] ,
None ,
)
def llama_set_abort_callback (
ctx : llama_context_p ,
abort_callback : Callable [ [ ctypes . c_void_p ] , None ] ,
abort_callback_data : ctypes . c_void_p ,
/ ,
) :
""" Set abort callback """
. . .
2023-08-24 04:17:00 +00:00
2024-03-13 19:57:35 +00:00
# // Wait until all computations are finished
# // This is automatically done when using one of the functions below to obtain the computation results
# // and is not necessary to call it explicitly in most cases
# LLAMA_API void llama_synchronize(struct llama_context * ctx);
@ctypes_function ( " llama_synchronize " , [ llama_context_p_ctypes ] , None )
def llama_synchronize ( ctx : llama_context_p , / ) :
""" Wait until all computations are finished
This is automatically done when using one of the functions below to obtain the computation results
and is not necessary to call it explicitly in most cases """
. . .
2024-03-03 03:20:04 +00:00
# // Token logits obtained from the last call to llama_decode()
2024-03-27 02:58:53 +00:00
# // The logits for which llama_batch.logits[i] != 0 are stored contiguously
# // in the order they have appeared in the batch.
# // Rows: number of tokens for which llama_batch.logits[i] != 0
2023-09-29 02:42:03 +00:00
# // Cols: n_vocab
2023-08-24 04:17:00 +00:00
# LLAMA_API float * llama_get_logits(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_logits " , [ llama_context_p_ctypes ] , ctypes . POINTER ( ctypes . c_float )
)
def llama_get_logits ( ctx : llama_context_p , / ) - > CtypesArray [ ctypes . c_float ] :
2023-11-23 05:26:26 +00:00
""" Token logits obtained from the last call to llama_eval()
The logits for the last token are stored in the last row
Logits for which llama_batch . logits [ i ] == 0 are undefined
Rows : n_tokens provided with llama_batch
2024-03-09 01:58:50 +00:00
Cols : n_vocab
2024-03-13 19:57:35 +00:00
2024-03-09 01:58:50 +00:00
Returns :
Pointer to the logits buffer of shape ( n_tokens , n_vocab ) """
2024-02-21 21:25:38 +00:00
. . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2024-04-09 13:53:49 +00:00
# // Logits for the ith token. For positive indices, Equivalent to:
2024-03-27 02:58:53 +00:00
# // llama_get_logits(ctx) + ctx->output_ids[i]*n_vocab
2024-04-09 13:53:49 +00:00
# // Negative indicies can be used to access logits in reverse order, -1 is the last logit.
2024-03-27 02:58:53 +00:00
# // returns NULL for invalid ids.
2023-09-29 02:42:03 +00:00
# LLAMA_API float * llama_get_logits_ith(struct llama_context * ctx, int32_t i);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_logits_ith " ,
[ llama_context_p_ctypes , ctypes . c_int32 ] ,
ctypes . POINTER ( ctypes . c_float ) ,
)
2023-09-29 02:42:03 +00:00
def llama_get_logits_ith (
2024-02-22 07:00:09 +00:00
ctx : llama_context_p , i : Union [ ctypes . c_int32 , int ] , /
2024-02-23 08:39:38 +00:00
) - > CtypesArray [ ctypes . c_float ] :
2023-11-23 05:26:26 +00:00
""" Logits for the ith token. Equivalent to:
llama_get_logits ( ctx ) + i * n_vocab """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2024-03-27 02:58:53 +00:00
# // Get all output token embeddings.
# // when pooling_type == LLAMA_POOLING_TYPE_NONE or when using a generative model,
# // the embeddings for which llama_batch.logits[i] != 0 are stored contiguously
# // in the order they have appeared in the batch.
# // shape: [n_outputs*n_embd]
# // Otherwise, returns NULL.
2023-08-24 04:17:00 +00:00
# LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_embeddings " , [ llama_context_p_ctypes ] , ctypes . POINTER ( ctypes . c_float )
)
def llama_get_embeddings ( ctx : llama_context_p , / ) - > CtypesArray [ ctypes . c_float ] :
2023-11-23 05:26:26 +00:00
""" Get the embeddings for the input
shape : [ n_embd ] ( 1 - dimensional ) """
2024-02-21 21:25:38 +00:00
. . .
2023-07-15 19:11:01 +00:00
2024-04-09 13:53:49 +00:00
# // Get the embeddings for the ith token. For positive indices, Equivalent to:
2024-03-27 02:58:53 +00:00
# // llama_get_embeddings(ctx) + ctx->output_ids[i]*n_embd
2024-04-09 13:53:49 +00:00
# // Negative indicies can be used to access embeddings in reverse order, -1 is the last embedding.
2024-03-06 06:32:00 +00:00
# // shape: [n_embd] (1-dimensional)
2024-03-27 02:58:53 +00:00
# // returns NULL for invalid ids.
2024-02-13 17:24:00 +00:00
# LLAMA_API float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_embeddings_ith " ,
[ llama_context_p_ctypes , ctypes . c_int32 ] ,
ctypes . POINTER ( ctypes . c_float ) ,
)
2024-02-13 17:24:00 +00:00
def llama_get_embeddings_ith (
2024-02-21 21:25:38 +00:00
ctx : llama_context_p , i : Union [ ctypes . c_int32 , int ] , /
2024-02-23 08:39:38 +00:00
) - > CtypesArray [ ctypes . c_float ] :
2024-02-13 17:24:00 +00:00
""" Get the embeddings for the ith sequence
llama_get_embeddings ( ctx ) + i * n_embd """
2024-02-21 21:25:38 +00:00
. . .
2024-02-13 17:24:00 +00:00
2024-03-06 06:32:00 +00:00
# // Get the embeddings for a sequence id
# // Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
# // shape: [n_embd] (1-dimensional)
# LLAMA_API float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id);
@ctypes_function (
" llama_get_embeddings_seq " ,
[ llama_context_p_ctypes , llama_seq_id ] ,
ctypes . POINTER ( ctypes . c_float ) ,
)
def llama_get_embeddings_seq (
ctx : llama_context_p , seq_id : Union [ llama_seq_id , int ] , /
) - > CtypesArray [ ctypes . c_float ] :
""" Get the embeddings for a sequence id
Returns NULL if pooling_type is LLAMA_POOLING_TYPE_NONE
shape : [ n_embd ] ( 1 - dimensional ) """
. . .
2024-03-13 19:57:35 +00:00
2023-08-24 04:17:00 +00:00
# //
# // Vocab
# //
2023-03-24 18:58:42 +00:00
2023-10-24 07:13:32 +00:00
# LLAMA_API const char * llama_token_get_text(const struct llama_model * model, llama_token token);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_token_get_text " , [ llama_model_p_ctypes , llama_token ] , ctypes . c_char_p
)
2024-02-22 07:00:09 +00:00
def llama_token_get_text (
model : llama_model_p , token : Union [ llama_token , int ] , /
2024-04-28 03:41:54 +00:00
) - > bytes : . . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
@ctypes_function (
" llama_token_get_score " , [ llama_model_p_ctypes , llama_token ] , ctypes . c_float
)
2023-11-20 19:11:33 +00:00
def llama_token_get_score (
2024-02-21 21:25:38 +00:00
model : llama_model_p , token : Union [ llama_token , int ] , /
2024-04-28 03:41:54 +00:00
) - > float : . . .
2023-03-25 20:26:03 +00:00
2024-06-07 06:02:12 +00:00
# LLAMA_API enum llama_token_attr llama_token_get_attr(const struct llama_model * model, llama_token token);
2024-02-23 08:39:38 +00:00
@ctypes_function (
2024-06-07 06:02:12 +00:00
" llama_token_get_attr " , [ llama_model_p_ctypes , llama_token ] , ctypes . c_int
2024-02-23 08:39:38 +00:00
)
2024-06-07 06:02:12 +00:00
def llama_token_get_attr (
2024-02-22 07:00:09 +00:00
model : llama_model_p , token : Union [ llama_token , int ] , /
2024-04-28 03:41:54 +00:00
) - > int : . . .
2023-07-15 19:11:01 +00:00
2024-04-22 00:46:40 +00:00
# // Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.)
# LLAMA_API bool llama_token_is_eog(const struct llama_model * model, llama_token token);
@ctypes_function (
" llama_token_is_eog " , [ llama_model_p_ctypes , llama_token ] , ctypes . c_bool
)
2024-04-28 03:41:54 +00:00
def llama_token_is_eog ( model : llama_model_p , token : Union [ llama_token , int ] , / ) - > bool :
2024-04-22 00:46:40 +00:00
""" Check if the token is supposed to end generation (end-of-generation, eg. EOS, EOT, etc.) """
. . .
2024-05-27 14:51:57 +00:00
# // Identify if Token Id is a control token or a render-able token
# LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
@ctypes_function (
" llama_token_is_control " , [ llama_model_p_ctypes , llama_token ] , ctypes . c_bool
)
def llama_token_is_control ( model : llama_model_p , token : Union [ llama_token , int ] , / ) - > bool :
""" Identify if Token Id is a control token or a render-able token """
. . .
2023-08-24 04:17:00 +00:00
# // Special tokens
2023-07-15 19:11:01 +00:00
2023-10-24 07:13:32 +00:00
# LLAMA_API llama_token llama_token_bos(const struct llama_model * model); // beginning-of-sentence
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_token_bos " , [ llama_model_p_ctypes ] , llama_token )
2024-02-21 21:25:38 +00:00
def llama_token_bos ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" beginning-of-sentence """
2024-02-21 21:25:38 +00:00
. . .
2023-07-15 19:11:01 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API llama_token llama_token_eos(const struct llama_model * model); // end-of-sentence
@ctypes_function ( " llama_token_eos " , [ llama_model_p_ctypes ] , llama_token )
2024-02-21 21:25:38 +00:00
def llama_token_eos ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" end-of-sentence """
2024-02-21 21:25:38 +00:00
. . .
2023-07-15 19:11:01 +00:00
2024-04-10 06:25:58 +00:00
# LLAMA_API llama_token llama_token_cls(const struct llama_model * model); // classification
@ctypes_function ( " llama_token_cls " , [ llama_model_p_ctypes ] , llama_token )
def llama_token_cls ( model : llama_model_p , / ) - > int :
""" classification """
. . .
# LLAMA_API llama_token llama_token_sep(const struct llama_model * model); // sentence separator
@ctypes_function ( " llama_token_sep " , [ llama_model_p_ctypes ] , llama_token )
def llama_token_sep ( model : llama_model_p , / ) - > int :
""" sentence separator """
. . .
2024-02-23 08:39:38 +00:00
# LLAMA_API llama_token llama_token_nl (const struct llama_model * model); // next-line
@ctypes_function ( " llama_token_nl " , [ llama_model_p_ctypes ] , llama_token )
2024-02-21 21:25:38 +00:00
def llama_token_nl ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" next-line """
2024-02-21 21:25:38 +00:00
. . .
2023-08-24 04:17:00 +00:00
2023-11-20 19:11:33 +00:00
# // Returns -1 if unknown, 1 for true or 0 for false.
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_add_bos_token(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_add_bos_token " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-02-21 21:25:38 +00:00
def llama_add_bos_token ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Returns -1 if unknown, 1 for true or 0 for false. """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
# // Returns -1 if unknown, 1 for true or 0 for false.
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_add_eos_token(const struct llama_model * model);
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_add_eos_token " , [ llama_model_p_ctypes ] , ctypes . c_int32 )
2024-02-21 21:25:38 +00:00
def llama_add_eos_token ( model : llama_model_p , / ) - > int :
2023-11-23 05:26:26 +00:00
""" Returns -1 if unknown, 1 for true or 0 for false. """
2024-02-21 21:25:38 +00:00
. . .
2023-11-20 19:11:33 +00:00
2024-04-22 00:46:40 +00:00
# // Codellama infill tokens
2023-10-24 07:13:32 +00:00
# LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
2024-02-23 08:39:38 +00:00
@ctypes_function ( " llama_token_prefix " , [ llama_model_p_ctypes ] , llama_token )
2023-10-24 07:13:32 +00:00
def llama_token_prefix ( model : llama_model_p ) - > int :
2023-11-23 05:26:26 +00:00
""" codellama infill tokens """
2024-02-21 21:25:38 +00:00
. . .
2023-10-03 19:23:35 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API llama_token llama_token_middle(const struct llama_model * model); // Beginning of infill middle
@ctypes_function ( " llama_token_middle " , [ llama_model_p_ctypes ] , llama_token )
2024-04-28 03:41:54 +00:00
def llama_token_middle ( model : llama_model_p , / ) - > int : . . .
2023-10-03 19:23:35 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API llama_token llama_token_suffix(const struct llama_model * model); // Beginning of infill suffix
@ctypes_function ( " llama_token_suffix " , [ llama_model_p_ctypes ] , llama_token )
2024-04-28 03:41:54 +00:00
def llama_token_suffix ( model : llama_model_p , / ) - > int : . . .
2023-10-03 19:23:35 +00:00
2024-02-23 08:39:38 +00:00
# LLAMA_API llama_token llama_token_eot (const struct llama_model * model); // End of infill middle
@ctypes_function ( " llama_token_eot " , [ llama_model_p_ctypes ] , llama_token )
2024-04-28 03:41:54 +00:00
def llama_token_eot ( model : llama_model_p , / ) - > int : . . .
2023-10-03 19:23:35 +00:00
2023-08-24 04:17:00 +00:00
# //
# // Tokenization
# //
2023-10-19 06:55:08 +00:00
# /// @details Convert the provided text into tokens.
# /// @param tokens The tokens pointer must be large enough to hold the resulting tokens.
2024-03-13 19:57:35 +00:00
# /// @return Returns the number of tokens on success, no more than n_tokens_max
2023-10-19 06:55:08 +00:00
# /// @return Returns a negative number on failure - the number of tokens that would have been returned
2024-04-10 06:25:58 +00:00
# /// @param parse_special Allow tokenizing special and/or control tokens which otherwise are not exposed and treated
# /// as plaintext. Does not insert a leading space.
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_tokenize(
2023-10-19 06:55:08 +00:00
# const struct llama_model * model,
# const char * text,
2024-01-04 03:04:04 +00:00
# int32_t text_len,
2023-10-19 06:55:08 +00:00
# llama_token * tokens,
2024-03-13 19:57:35 +00:00
# int32_t n_tokens_max,
2024-04-10 06:25:58 +00:00
# bool add_special,
# bool parse_special);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_tokenize " ,
[
llama_model_p_ctypes ,
ctypes . c_char_p ,
ctypes . c_int32 ,
llama_token_p ,
ctypes . c_int32 ,
ctypes . c_bool ,
ctypes . c_bool ,
] ,
ctypes . c_int32 ,
)
2023-10-19 06:55:08 +00:00
def llama_tokenize (
model : llama_model_p ,
text : bytes ,
2024-02-21 21:25:38 +00:00
text_len : Union [ ctypes . c_int , int ] ,
2024-02-22 07:00:09 +00:00
tokens : CtypesArray [ llama_token ] ,
2024-03-13 19:57:35 +00:00
n_tokens_max : Union [ ctypes . c_int , int ] ,
2024-04-10 06:25:58 +00:00
add_special : Union [ ctypes . c_bool , bool ] ,
parse_special : Union [ ctypes . c_bool , bool ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-10-19 06:55:08 +00:00
) - > int :
2024-03-13 19:57:35 +00:00
""" Convert the provided text into tokens.
2024-03-18 14:26:36 +00:00
2024-03-13 19:57:35 +00:00
Args :
model : The model to use for tokenization .
text : The text to tokenize .
text_len : The length of the text .
tokens : The tokens pointer must be large enough to hold the resulting tokens .
n_max_tokens : The maximum number of tokens to return .
2024-04-10 06:25:58 +00:00
add_special : Allow tokenizing special and / or control tokens which otherwise are not exposed and treated as plaintext . Does not insert a leading space .
parse_special : Allow parsing special tokens .
2024-03-18 14:26:36 +00:00
2024-03-13 19:57:35 +00:00
Returns :
Returns the number of tokens on success , no more than n_tokens_max
2024-03-18 14:26:36 +00:00
Returns a negative number on failure - the number of tokens that would have been returned
"""
2024-02-21 21:25:38 +00:00
. . .
2023-10-19 06:55:08 +00:00
2023-08-27 16:59:20 +00:00
# // Token Id -> Piece.
# // Uses the vocabulary in the provided context.
# // Does not write null terminator to the buffer.
# // User code is responsible to remove the leading whitespace of the first non-BOS token when decoding multiple tokens.
2024-04-22 00:46:40 +00:00
# // @param special If true, special tokens are rendered in the output.
2024-01-04 03:04:04 +00:00
# LLAMA_API int32_t llama_token_to_piece(
2023-09-29 02:42:03 +00:00
# const struct llama_model * model,
# llama_token token,
# char * buf,
2024-04-22 00:46:40 +00:00
# int32_t length,
# bool special);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_token_to_piece " ,
[
llama_model_p_ctypes ,
llama_token ,
ctypes . c_char_p ,
ctypes . c_int32 ,
2024-04-22 00:46:40 +00:00
ctypes . c_bool ,
2024-02-23 08:39:38 +00:00
] ,
ctypes . c_int32 ,
)
2023-08-27 16:59:20 +00:00
def llama_token_to_piece (
2023-09-29 02:42:03 +00:00
model : llama_model_p ,
2023-11-05 21:57:10 +00:00
token : Union [ llama_token , int ] ,
2024-02-23 08:39:38 +00:00
buf : Union [ ctypes . c_char_p , bytes , CtypesArray [ ctypes . c_char ] ] ,
2024-02-21 21:25:38 +00:00
length : Union [ ctypes . c_int , int ] ,
2024-04-22 00:46:40 +00:00
special : Union [ ctypes . c_bool , bool ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-08-24 04:17:00 +00:00
) - > int :
2023-11-23 05:26:26 +00:00
""" Token Id -> Piece.
Uses the vocabulary in the provided context .
Does not write null terminator to the buffer .
2023-11-23 21:26:00 +00:00
User code is responsible to remove the leading whitespace of the first non - BOS token when decoding multiple tokens .
2024-04-22 00:46:40 +00:00
Args :
model : The model to use for tokenization .
token : The token to convert .
buf : The buffer to write the token to .
length : The length of the buffer .
special : If true , special tokens are rendered in the output . """
2024-02-21 21:25:38 +00:00
. . .
2023-03-24 18:58:42 +00:00
2023-03-24 18:59:29 +00:00
2024-02-19 09:11:34 +00:00
# /// Apply chat template. Inspired by hf apply_chat_template() on python.
# /// Both "model" and "custom_template" are optional, but at least one is required. "custom_template" has higher precedence than "model"
2024-02-22 04:04:52 +00:00
# /// NOTE: This function does not use a jinja parser. It only support a pre-defined list of template. See more: https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template
2024-02-19 09:11:34 +00:00
# /// @param tmpl A Jinja template to use for this chat. If this is nullptr, the model’ s default chat template will be used instead.
# /// @param chat Pointer to a list of multiple llama_chat_message
# /// @param n_msg Number of llama_chat_message in this chat
# /// @param add_ass Whether to end the prompt with the token(s) that indicate the start of an assistant message.
# /// @param buf A buffer to hold the output formatted prompt. The recommended alloc size is 2 * (total number of characters of all messages)
# /// @param length The size of the allocated buffer
# /// @return The total number of bytes of the formatted prompt. If is it larger than the size of buffer, you may need to re-alloc it and then re-apply the template.
# LLAMA_API int32_t llama_chat_apply_template(
# const struct llama_model * model,
# const char * tmpl,
# const struct llama_chat_message * chat,
# size_t n_msg,
# bool add_ass,
# char * buf,
# int32_t length);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_chat_apply_template " ,
[
ctypes . c_void_p ,
ctypes . c_char_p ,
ctypes . POINTER ( llama_chat_message ) ,
ctypes . c_size_t ,
] ,
ctypes . c_int32 ,
)
2024-02-19 09:11:34 +00:00
def llama_chat_apply_template (
2024-02-22 07:00:09 +00:00
model : llama_model_p ,
tmpl : bytes ,
chat : CtypesArray [ llama_chat_message ] ,
n_msg : int ,
/ ,
2024-04-28 03:41:54 +00:00
) - > int : . . .
2024-02-19 09:11:34 +00:00
2024-02-22 07:00:09 +00:00
2023-08-24 04:17:00 +00:00
# //
2023-07-24 17:04:34 +00:00
# // Grammar
# //
2023-08-24 04:17:00 +00:00
2023-07-24 17:04:34 +00:00
# LLAMA_API struct llama_grammar * llama_grammar_init(
# const llama_grammar_element ** rules,
# size_t n_rules,
# size_t start_rule_index);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_grammar_init " ,
[
ctypes . POINTER ( llama_grammar_element_p ) ,
ctypes . c_size_t ,
ctypes . c_size_t ,
] ,
llama_grammar_p ,
)
2023-07-24 17:04:34 +00:00
def llama_grammar_init (
2024-02-22 07:00:09 +00:00
rules : CtypesArray [
CtypesPointer [ llama_grammar_element ]
] , # NOTE: This might be wrong type sig
2024-02-21 21:25:38 +00:00
n_rules : Union [ ctypes . c_size_t , int ] ,
start_rule_index : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-07-24 17:04:34 +00:00
) - > llama_grammar_p :
2023-11-28 00:03:02 +00:00
""" Initialize a grammar from a set of rules. """
2024-02-21 21:25:38 +00:00
. . .
2023-07-24 17:04:34 +00:00
# LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_grammar_free " ,
[ llama_grammar_p ] ,
None ,
)
2024-02-21 21:25:38 +00:00
def llama_grammar_free ( grammar : llama_grammar_p , / ) :
2023-11-28 00:03:02 +00:00
""" Free a grammar. """
2024-02-21 21:25:38 +00:00
. . .
2023-07-24 17:04:34 +00:00
2023-09-09 16:12:32 +00:00
# LLAMA_API struct llama_grammar * llama_grammar_copy(const struct llama_grammar * grammar);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_grammar_copy " ,
[ llama_grammar_p ] ,
llama_grammar_p ,
)
2024-02-21 21:25:38 +00:00
def llama_grammar_copy ( grammar : llama_grammar_p , / ) - > llama_grammar_p :
2023-11-28 00:03:02 +00:00
""" Copy a grammar. """
2024-02-21 21:25:38 +00:00
. . .
2023-08-25 18:35:53 +00:00
2023-08-24 04:17:00 +00:00
# //
# // Sampling functions
# //
2023-05-01 18:02:06 +00:00
2023-09-29 02:42:03 +00:00
# // Sets the current rng seed.
# LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, uint32_t seed);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_set_rng_seed " ,
[ llama_context_p_ctypes , ctypes . c_uint32 ] ,
None ,
)
2024-02-21 21:25:38 +00:00
def llama_set_rng_seed ( ctx : llama_context_p , seed : Union [ ctypes . c_uint32 , int ] , / ) :
2023-11-23 05:26:26 +00:00
""" Sets the current rng seed. """
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2023-10-24 07:13:32 +00:00
# /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
# /// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
# LLAMA_API void llama_sample_repetition_penalties(
2023-09-29 02:42:03 +00:00
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# const llama_token * last_tokens,
2023-10-24 07:13:32 +00:00
# size_t penalty_last_n,
# float penalty_repeat,
# float penalty_freq,
# float penalty_present);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_repetition_penalties " ,
[
llama_context_p_ctypes ,
llama_token_data_array_p ,
llama_token_p ,
ctypes . c_size_t ,
ctypes . c_float ,
ctypes . c_float ,
ctypes . c_float ,
] ,
None ,
)
2023-10-24 07:13:32 +00:00
def llama_sample_repetition_penalties (
2023-05-01 14:44:28 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
last_tokens_data : CtypesArray [ llama_token ] ,
2024-02-21 21:25:38 +00:00
penalty_last_n : Union [ ctypes . c_size_t , int ] ,
penalty_repeat : Union [ ctypes . c_float , float ] ,
penalty_freq : Union [ ctypes . c_float , float ] ,
penalty_present : Union [ ctypes . c_float , float ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-01 18:02:06 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
2023-11-23 21:26:00 +00:00
Frequency and presence penalties described in OpenAI API https : / / platform . openai . com / docs / api - reference / parameter - details .
"""
2024-02-21 21:25:38 +00:00
. . .
2023-03-24 18:35:41 +00:00
2023-03-23 09:33:06 +00:00
2023-07-15 19:11:01 +00:00
# /// @details Apply classifier-free guidance to the logits as described in academic paper "Stay on topic with Classifier-Free Guidance" https://arxiv.org/abs/2306.17806
2024-01-15 15:12:10 +00:00
# /// @param logits Logits extracted from the original generation context.
# /// @param logits_guidance Logits extracted from a separate context from the same model. Other than a negative prompt at the beginning, it should have all generated and user input tokens copied from the main context.
# /// @param scale Guidance strength. 1.0f means no guidance. Higher values mean stronger guidance.
# LLAMA_API void llama_sample_apply_guidance(
# struct llama_context * ctx,
# float * logits,
# float * logits_guidance,
# float scale);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_apply_guidance " ,
[
llama_context_p_ctypes ,
ctypes . POINTER ( ctypes . c_float ) ,
ctypes . POINTER ( ctypes . c_float ) ,
ctypes . c_float ,
] ,
None ,
)
2024-01-15 15:12:10 +00:00
def llama_sample_apply_guidance (
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
logits : CtypesArray [ ctypes . c_float ] ,
logits_guidance : CtypesArray [ ctypes . c_float ] ,
2024-02-21 21:25:38 +00:00
scale : Union [ ctypes . c_float , float ] ,
2024-02-22 07:00:09 +00:00
/ ,
2024-01-15 15:12:10 +00:00
) :
""" Apply classifier-free guidance to the logits as described in academic paper " Stay on topic with Classifier-Free Guidance " https://arxiv.org/abs/2306.17806 """
2024-02-21 21:25:38 +00:00
. . .
2024-01-15 15:12:10 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_softmax(
# struct llama_context * ctx,
# llama_token_data_array * candidates);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_softmax " ,
[ llama_context_p_ctypes , llama_token_data_array_p ] ,
None ,
)
2023-05-05 18:12:26 +00:00
def llama_sample_softmax (
2024-02-22 07:00:09 +00:00
ctx : llama_context_p ,
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
/ ,
2023-05-05 18:12:26 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits. """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_top_k(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
2024-01-04 03:04:04 +00:00
# int32_t k,
2023-09-29 02:42:03 +00:00
# size_t min_keep);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_top_k " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_int32 , ctypes . c_size_t ] ,
None ,
)
2023-05-01 18:47:55 +00:00
def llama_sample_top_k (
2023-05-05 17:54:22 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
k : Union [ ctypes . c_int , int ] ,
min_keep : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-01 18:47:55 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Top-K sampling described in academic paper " The Curious Case of Neural Text Degeneration " https://arxiv.org/abs/1904.09751 """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_top_p(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float p,
# size_t min_keep);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_top_p " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_float , ctypes . c_size_t ] ,
None ,
)
2023-05-01 18:47:55 +00:00
def llama_sample_top_p (
2023-05-05 17:54:22 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
p : Union [ ctypes . c_float , float ] ,
min_keep : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-01 18:47:55 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Nucleus sampling described in academic paper " The Curious Case of Neural Text Degeneration " https://arxiv.org/abs/1904.09751 """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841
# LLAMA_API void llama_sample_min_p(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float p,
# size_t min_keep);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_min_p " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_float , ctypes . c_size_t ] ,
None ,
)
2023-11-01 01:29:35 +00:00
def llama_sample_min_p (
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
p : Union [ ctypes . c_float , float ] ,
min_keep : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-11-01 01:29:35 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Minimum P sampling as described in https://github.com/ggerganov/llama.cpp/pull/3841 """
2024-02-21 21:25:38 +00:00
. . .
2023-11-01 01:29:35 +00:00
# /// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_tail_free(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float z,
# size_t min_keep);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_tail_free " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_float , ctypes . c_size_t ] ,
None ,
)
2023-05-01 14:44:28 +00:00
def llama_sample_tail_free (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
z : Union [ ctypes . c_float , float ] ,
min_keep : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-01 18:02:06 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/. """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_typical(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float p,
# size_t min_keep);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_typical " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_float , ctypes . c_size_t ] ,
None ,
)
2023-05-01 18:47:55 +00:00
def llama_sample_typical (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
p : Union [ ctypes . c_float , float ] ,
min_keep : Union [ ctypes . c_size_t , int ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-01 18:47:55 +00:00
) :
2023-11-23 05:26:26 +00:00
""" Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666. """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2024-01-26 16:45:48 +00:00
# /// @details Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772.
# LLAMA_API void llama_sample_entropy(
# struct llama_context * ctx,
# llama_token_data_array * candidates_p,
# float min_temp,
# float max_temp,
# float exponent_val);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_entropy " ,
[
llama_context_p_ctypes ,
llama_token_data_array_p ,
ctypes . c_float ,
ctypes . c_float ,
ctypes . c_float ,
] ,
None ,
)
2024-01-26 16:45:48 +00:00
def llama_sample_entropy (
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
min_temp : Union [ ctypes . c_float , float ] ,
max_temp : Union [ ctypes . c_float , float ] ,
exponent_val : Union [ ctypes . c_float , float ] ,
2024-02-22 07:00:09 +00:00
/ ,
2024-01-26 16:45:48 +00:00
) :
""" Dynamic temperature implementation described in the paper https://arxiv.org/abs/2309.02772. """
2024-02-21 21:25:38 +00:00
. . .
2024-01-26 16:45:48 +00:00
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_sample_temp(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float temp);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_temp " ,
[ llama_context_p_ctypes , llama_token_data_array_p , ctypes . c_float ] ,
None ,
)
2023-09-29 02:42:03 +00:00
def llama_sample_temp (
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
temp : Union [ ctypes . c_float , float ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-09-29 02:42:03 +00:00
) :
2023-11-28 00:03:02 +00:00
""" Temperature sampling described in academic paper " Generating Long Sequences with Sparse Transformers " https://arxiv.org/abs/1904.10509
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Parameters :
candidates : A vector of ` llama_token_data ` containing the candidate tokens , their probabilities ( p ) , and log - odds ( logit ) for the current position in the generated text .
2024-01-08 19:51:29 +00:00
temp : The temperature value to use for the sampling . A higher value corresponds to more surprising or less predictable text , while a lower value corresponds to less surprising or more predictable text .
"""
2024-02-21 21:25:38 +00:00
. . .
2023-09-29 02:42:03 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Apply constraints from grammar
# LLAMA_API void llama_sample_grammar(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# const struct llama_grammar * grammar);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_grammar " ,
[ llama_context_p_ctypes , llama_token_data_array_p , llama_grammar_p ] ,
None ,
)
2023-07-24 19:42:31 +00:00
def llama_sample_grammar (
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2023-08-05 05:43:35 +00:00
grammar , # type: llama_grammar_p
2024-02-22 07:00:09 +00:00
/ ,
2023-07-24 19:42:31 +00:00
) :
2023-11-28 00:03:02 +00:00
""" Apply constraints from grammar
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Parameters :
candidates : A vector of ` llama_token_data ` containing the candidate tokens , their probabilities ( p ) , and log - odds ( logit ) for the current position in the generated text .
2024-01-08 19:51:29 +00:00
grammar : A grammar object containing the rules and constraints to apply to the generated text .
"""
2024-02-21 21:25:38 +00:00
. . .
2023-07-24 19:42:31 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
# /// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2023-09-29 02:42:03 +00:00
# LLAMA_API llama_token llama_sample_token_mirostat(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float tau,
# float eta,
2024-01-04 03:04:04 +00:00
# int32_t m,
2023-09-29 02:42:03 +00:00
# float * mu);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_token_mirostat " ,
[
llama_context_p_ctypes ,
llama_token_data_array_p ,
ctypes . c_float ,
ctypes . c_float ,
ctypes . c_int32 ,
ctypes . POINTER ( ctypes . c_float ) ,
] ,
llama_token ,
)
2023-05-01 14:44:28 +00:00
def llama_sample_token_mirostat (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
tau : Union [ ctypes . c_float , float ] ,
eta : Union [ ctypes . c_float , float ] ,
m : Union [ ctypes . c_int , int ] ,
2024-02-22 07:00:09 +00:00
mu : CtypesPointerOrRef [ ctypes . c_float ] ,
/ ,
2023-05-19 15:59:33 +00:00
) - > int :
2023-11-28 00:03:02 +00:00
""" Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Parameters :
candidates : A vector of ` llama_token_data ` containing the candidate tokens , their probabilities ( p ) , and log - odds ( logit ) for the current position in the generated text .
tau : The target cross - entropy ( or surprise ) value you want to achieve for the generated text . A higher value corresponds to more surprising or less predictable text , while a lower value corresponds to less surprising or more predictable text .
eta : The learning rate used to update ` mu ` based on the error between the target and observed surprisal of the sampled word . A larger learning rate will cause ` mu ` to be updated more quickly , while a smaller learning rate will result in slower updates .
m : The number of tokens considered in the estimation of ` s_hat ` . This is an arbitrary value that is used to calculate ` s_hat ` , which in turn helps to calculate the value of ` k ` . In the paper , they use ` m = 100 ` , but you can experiment with different values to see how it affects the performance of the algorithm .
2024-01-08 19:51:29 +00:00
mu : Maximum cross - entropy . This value is initialized to be twice the target cross - entropy ( ` 2 * tau ` ) and is updated in the algorithm based on the error between the target and observed surprisal .
"""
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
# /// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
# /// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
# /// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
# /// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
2023-09-29 02:42:03 +00:00
# LLAMA_API llama_token llama_sample_token_mirostat_v2(
# struct llama_context * ctx,
# llama_token_data_array * candidates,
# float tau,
# float eta,
# float * mu);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_token_mirostat_v2 " ,
[
llama_context_p_ctypes ,
llama_token_data_array_p ,
ctypes . c_float ,
ctypes . c_float ,
ctypes . POINTER ( ctypes . c_float ) ,
] ,
llama_token ,
)
2023-05-01 14:44:28 +00:00
def llama_sample_token_mirostat_v2 (
2023-05-05 16:22:27 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
2024-02-21 21:25:38 +00:00
tau : Union [ ctypes . c_float , float ] ,
eta : Union [ ctypes . c_float , float ] ,
2024-02-23 08:39:38 +00:00
mu : CtypesPointerOrRef [ ctypes . c_float ] ,
2024-02-22 07:00:09 +00:00
/ ,
2023-05-19 15:59:33 +00:00
) - > int :
2023-11-28 00:03:02 +00:00
""" Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
2024-01-08 19:51:29 +00:00
2023-11-28 00:03:02 +00:00
Parameters :
candidates : A vector of ` llama_token_data ` containing the candidate tokens , their probabilities ( p ) , and log - odds ( logit ) for the current position in the generated text .
tau : The target cross - entropy ( or surprise ) value you want to achieve for the generated text . A higher value corresponds to more surprising or less predictable text , while a lower value corresponds to less surprising or more predictable text .
eta : The learning rate used to update ` mu ` based on the error between the target and observed surprisal of the sampled word . A larger learning rate will cause ` mu ` to be updated more quickly , while a smaller learning rate will result in slower updates .
2024-01-08 19:51:29 +00:00
mu : Maximum cross - entropy . This value is initialized to be twice the target cross - entropy ( ` 2 * tau ` ) and is updated in the algorithm based on the error between the target and observed surprisal .
"""
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-11-01 01:29:35 +00:00
# /// @details Selects the token with the highest probability.
# /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
2023-09-29 02:42:03 +00:00
# LLAMA_API llama_token llama_sample_token_greedy(
# struct llama_context * ctx,
# llama_token_data_array * candidates);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_token_greedy " ,
[ llama_context_p_ctypes , llama_token_data_array_p ] ,
llama_token ,
)
2023-05-05 16:22:27 +00:00
def llama_sample_token_greedy (
2023-05-05 18:12:26 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
/ ,
2023-05-19 15:59:33 +00:00
) - > int :
2023-11-28 00:03:02 +00:00
""" Selects the token with the highest probability. """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2024-04-25 06:48:26 +00:00
# /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
2023-09-29 02:42:03 +00:00
# LLAMA_API llama_token llama_sample_token(
# struct llama_context * ctx,
# llama_token_data_array * candidates);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_sample_token " ,
[ llama_context_p_ctypes , llama_token_data_array_p ] ,
llama_token ,
)
2023-05-05 16:22:27 +00:00
def llama_sample_token (
2023-05-05 18:12:26 +00:00
ctx : llama_context_p ,
2024-02-22 07:00:09 +00:00
candidates : Union [
CtypesArray [ llama_token_data_array ] , CtypesPointerOrRef [ llama_token_data_array ]
] ,
/ ,
2023-05-19 15:59:33 +00:00
) - > int :
2023-11-28 00:03:02 +00:00
""" Randomly selects a token from the candidates based on their probabilities. """
2024-02-21 21:25:38 +00:00
. . .
2023-05-01 14:44:28 +00:00
2023-07-24 19:55:26 +00:00
# /// @details Accepts the sampled token into the grammar
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_grammar_accept_token(
# struct llama_context * ctx,
# struct llama_grammar * grammar,
# llama_token token);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_grammar_accept_token " ,
[ llama_context_p_ctypes , llama_grammar_p , llama_token ] ,
None ,
)
2023-07-24 19:55:26 +00:00
def llama_grammar_accept_token (
2024-02-22 07:00:09 +00:00
ctx : llama_context_p , grammar : llama_grammar_p , token : Union [ llama_token , int ] , /
2023-08-05 05:43:35 +00:00
) - > None :
2023-11-28 00:03:02 +00:00
""" Accepts the sampled token into the grammar """
2024-02-21 21:25:38 +00:00
. . .
2023-07-24 19:55:26 +00:00
2023-09-09 16:12:32 +00:00
# //
2024-06-07 06:02:12 +00:00
# // Model split
2023-09-09 16:12:32 +00:00
# //
2024-03-23 03:43:29 +00:00
# /// @details Build a split GGUF final path for this chunk.
# /// llama_split_path(split_path, sizeof(split_path), "/models/ggml-model-q4_0", 2, 4) => split_path = "/models/ggml-model-q4_0-00002-of-00004.gguf"
# // Returns the split_path length.
# LLAMA_API int llama_split_path(char * split_path, size_t maxlen, const char * path_prefix, int split_no, int split_count);
@ctypes_function (
" llama_split_path " ,
[ ctypes . c_char_p , ctypes . c_size_t , ctypes . c_char_p , ctypes . c_int , ctypes . c_int ] ,
ctypes . c_int ,
)
def llama_split_path (
split_path : bytes ,
maxlen : Union [ ctypes . c_size_t , int ] ,
path_prefix : bytes ,
split_no : Union [ ctypes . c_int , int ] ,
split_count : Union [ ctypes . c_int , int ] ,
/ ,
) - > int :
""" Build a split GGUF final path for this chunk. """
. . .
# /// @details Extract the path prefix from the split_path if and only if the split_no and split_count match.
# /// llama_split_prefix(split_prefix, 64, "/models/ggml-model-q4_0-00002-of-00004.gguf", 2, 4) => split_prefix = "/models/ggml-model-q4_0"
# // Returns the split_prefix length.
# LLAMA_API int llama_split_prefix(char * split_prefix, size_t maxlen, const char * split_path, int split_no, int split_count);
@ctypes_function (
" llama_split_prefix " ,
[ ctypes . c_char_p , ctypes . c_size_t , ctypes . c_char_p , ctypes . c_int , ctypes . c_int ] ,
ctypes . c_int ,
)
def llama_split_prefix (
split_prefix : bytes ,
maxlen : Union [ ctypes . c_size_t , int ] ,
split_path : bytes ,
split_no : Union [ ctypes . c_int , int ] ,
split_count : Union [ ctypes . c_int , int ] ,
/ ,
) - > int :
""" Extract the path prefix from the split_path if and only if the split_no and split_count match. """
. . .
2023-03-24 18:58:42 +00:00
# Performance information
2023-03-24 18:59:29 +00:00
2023-07-06 21:57:56 +00:00
# LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_get_timings " ,
[ llama_context_p_ctypes ] ,
llama_timings ,
)
2024-02-21 21:25:38 +00:00
def llama_get_timings ( ctx : llama_context_p , / ) - > llama_timings :
2023-11-28 00:03:02 +00:00
""" Get performance information """
2024-02-21 21:25:38 +00:00
. . .
2023-07-06 21:57:56 +00:00
2023-05-21 21:47:21 +00:00
# LLAMA_API void llama_print_timings(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_print_timings " ,
[ llama_context_p_ctypes ] ,
None ,
)
2024-02-21 21:25:38 +00:00
def llama_print_timings ( ctx : llama_context_p , / ) :
2023-11-28 00:03:02 +00:00
""" Print performance information """
2024-02-21 21:25:38 +00:00
. . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-05-21 21:47:21 +00:00
# LLAMA_API void llama_reset_timings(struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_reset_timings " ,
[ llama_context_p_ctypes ] ,
None ,
)
2024-02-21 21:25:38 +00:00
def llama_reset_timings ( ctx : llama_context_p , / ) :
2023-11-28 00:03:02 +00:00
""" Reset performance information """
2024-02-21 21:25:38 +00:00
. . .
2023-03-23 09:33:06 +00:00
2023-03-24 18:35:41 +00:00
2023-03-24 18:58:42 +00:00
# Print system information
2023-05-21 21:47:21 +00:00
# LLAMA_API const char * llama_print_system_info(void);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_print_system_info " ,
[ ] ,
ctypes . c_char_p ,
)
2023-03-23 09:33:06 +00:00
def llama_print_system_info ( ) - > bytes :
2023-11-28 00:03:02 +00:00
""" Print system information """
2024-02-21 21:25:38 +00:00
. . .
2023-03-24 18:58:42 +00:00
2023-09-29 02:42:03 +00:00
# NOTE: THIS IS CURRENTLY BROKEN AS ggml_log_callback IS NOT EXPOSED IN LLAMA.H
2023-08-24 04:17:00 +00:00
# // Set callback for all future logging events.
# // If this is not called, or NULL is supplied, everything is output on stderr.
2023-09-29 02:42:03 +00:00
# LLAMA_API void llama_log_set(ggml_log_callback log_callback, void * user_data);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_log_set " ,
[ ctypes . c_void_p , ctypes . c_void_p ] ,
None ,
)
2023-08-24 04:17:00 +00:00
def llama_log_set (
2024-02-22 07:00:09 +00:00
log_callback : Optional [ CtypesFuncPointer ] ,
2024-02-23 08:39:38 +00:00
user_data : ctypes . c_void_p ,
2024-02-22 07:00:09 +00:00
/ ,
2023-08-24 04:17:00 +00:00
) :
2023-11-28 00:03:02 +00:00
""" Set callback for all future logging events.
If this is not called , or NULL is supplied , everything is output on stderr . """
2024-02-21 21:25:38 +00:00
. . .
2023-08-24 04:17:00 +00:00
2023-08-29 11:36:20 +00:00
# LLAMA_API void llama_dump_timing_info_yaml(FILE * stream, const struct llama_context * ctx);
2024-02-23 08:39:38 +00:00
@ctypes_function (
" llama_dump_timing_info_yaml " ,
[ ctypes . c_void_p , llama_context_p_ctypes ] ,
None ,
)
2024-04-28 03:41:54 +00:00
def llama_dump_timing_info_yaml ( stream : ctypes . c_void_p , ctx : llama_context_p , / ) : . . .