Update llama.cpp
This commit is contained in:
parent
6dde6bd09c
commit
2c2afa320f
3 changed files with 86 additions and 5 deletions
|
@ -205,6 +205,7 @@ LLAMA_ROPE_SCALING_LINEAR = 1
|
|||
LLAMA_ROPE_SCALING_YARN = 2
|
||||
LLAMA_ROPE_SCALING_MAX_VALUE = LLAMA_ROPE_SCALING_YARN
|
||||
|
||||
|
||||
# typedef struct llama_token_data {
|
||||
# llama_token id; // token id
|
||||
# float logit; // log-odds of the token
|
||||
|
@ -661,6 +662,62 @@ def llama_rope_freq_scale_train(model: llama_model_p) -> float:
|
|||
_lib.llama_rope_freq_scale_train.argtypes = [llama_model_p]
|
||||
_lib.llama_rope_freq_scale_train.restype = c_float
|
||||
|
||||
# // Functions to access the model's GGUF metadata scalar values
|
||||
# // - The functions return the length of the string on success, or -1 on failure
|
||||
# // - The output string is always null-terminated and cleared on failure
|
||||
# // - GGUF array values are not supported by these functions
|
||||
|
||||
|
||||
# // Get metadata value as a string by key name
|
||||
# LLAMA_API int llama_model_meta_val_str(const struct llama_model * model, const char * key, char * buf, size_t buf_size);
|
||||
def llama_model_meta_val_str(
|
||||
model: llama_model_p, key: Union[c_char_p, bytes], buf: bytes, buf_size: int
|
||||
) -> int:
|
||||
return _lib.llama_model_meta_val_str(model, key, buf, buf_size)
|
||||
|
||||
|
||||
_lib.llama_model_meta_val_str.argtypes = [llama_model_p, c_char_p, c_char_p, c_size_t]
|
||||
_lib.llama_model_meta_val_str.restype = c_int
|
||||
|
||||
|
||||
# // Get the number of metadata key/value pairs
|
||||
# LLAMA_API int llama_model_meta_count(const struct llama_model * model);
|
||||
def llama_model_meta_count(model: llama_model_p) -> int:
|
||||
return _lib.llama_model_meta_count(model)
|
||||
|
||||
|
||||
_lib.llama_model_meta_count.argtypes = [llama_model_p]
|
||||
_lib.llama_model_meta_count.restype = c_int
|
||||
|
||||
|
||||
# // Get metadata key name by index
|
||||
# LLAMA_API int llama_model_meta_key_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
|
||||
def llama_model_meta_key_by_index(
|
||||
model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int
|
||||
) -> int:
|
||||
return _lib.llama_model_meta_key_by_index(model, i, buf, buf_size)
|
||||
|
||||
|
||||
_lib.llama_model_meta_key_by_index.argtypes = [llama_model_p, c_int, c_char_p, c_size_t]
|
||||
_lib.llama_model_meta_key_by_index.restype = c_int
|
||||
|
||||
|
||||
# // Get metadata value as a string by index
|
||||
# LLAMA_API int llama_model_meta_val_str_by_index(const struct llama_model * model, int i, char * buf, size_t buf_size);
|
||||
def llama_model_meta_val_str_by_index(
|
||||
model: llama_model_p, i: Union[c_int, int], buf: bytes, buf_size: int
|
||||
) -> int:
|
||||
return _lib.llama_model_meta_val_str_by_index(model, i, buf, buf_size)
|
||||
|
||||
|
||||
_lib.llama_model_meta_val_str_by_index.argtypes = [
|
||||
llama_model_p,
|
||||
c_int,
|
||||
c_char_p,
|
||||
c_size_t,
|
||||
]
|
||||
_lib.llama_model_meta_val_str_by_index.restype = c_int
|
||||
|
||||
|
||||
# // Get a string describing the model type
|
||||
# LLAMA_API int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size);
|
||||
|
@ -1213,7 +1270,9 @@ _lib.llama_token_get_text.restype = c_char_p
|
|||
|
||||
|
||||
# LLAMA_API float llama_token_get_score(const struct llama_model * model, llama_token token);
|
||||
def llama_token_get_score(model: llama_model_p, token: Union[llama_token, int]) -> float:
|
||||
def llama_token_get_score(
|
||||
model: llama_model_p, token: Union[llama_token, int]
|
||||
) -> float:
|
||||
return _lib.llama_token_get_score(model, token)
|
||||
|
||||
|
||||
|
@ -1260,6 +1319,26 @@ _lib.llama_token_nl.argtypes = [llama_model_p]
|
|||
_lib.llama_token_nl.restype = llama_token
|
||||
|
||||
|
||||
# // Returns -1 if unknown, 1 for true or 0 for false.
|
||||
# LLAMA_API int llama_add_bos_token(const struct llama_model * model);
|
||||
def llama_add_bos_token(model: llama_model_p) -> int:
|
||||
return _lib.llama_add_bos_token(model)
|
||||
|
||||
|
||||
_lib.llama_add_bos_token.argtypes = [llama_model_p]
|
||||
_lib.llama_add_bos_token.restype = c_int
|
||||
|
||||
|
||||
# // Returns -1 if unknown, 1 for true or 0 for false.
|
||||
# LLAMA_API int llama_add_eos_token(const struct llama_model * model);
|
||||
def llama_add_eos_token(model: llama_model_p) -> int:
|
||||
return _lib.llama_add_eos_token(model)
|
||||
|
||||
|
||||
_lib.llama_add_eos_token.argtypes = [llama_model_p]
|
||||
_lib.llama_add_eos_token.restype = c_int
|
||||
|
||||
|
||||
# // codellama infill tokens
|
||||
# LLAMA_API llama_token llama_token_prefix(const struct llama_model * model); // Beginning of infill prefix
|
||||
def llama_token_prefix(model: llama_model_p) -> int:
|
||||
|
|
|
@ -35,7 +35,7 @@ def test_llama_cpp_tokenization():
|
|||
|
||||
tokens = llama.tokenize(text, special=True)
|
||||
assert tokens[-1] == llama.token_eos()
|
||||
assert tokens == [1, 10994, 2787, 2]
|
||||
assert tokens == [1, 15043, 2787, 2]
|
||||
|
||||
|
||||
def test_llama_patch(monkeypatch):
|
||||
|
@ -55,8 +55,11 @@ def test_llama_patch(monkeypatch):
|
|||
monkeypatch.setattr("llama_cpp.llama_cpp.llama_decode", mock_decode)
|
||||
monkeypatch.setattr("llama_cpp.llama_cpp.llama_get_logits", mock_get_logits)
|
||||
|
||||
text = "The quick brown fox"
|
||||
text_tokens = llama.tokenize(text.encode("utf-8"), add_bos=True, special=True)
|
||||
output_text = " jumps over the lazy dog."
|
||||
output_tokens = llama.tokenize(output_text.encode("utf-8"), add_bos=False, special=True)
|
||||
all_text_tokens = llama.tokenize((text + output_text).encode("utf-8"), add_bos=True, special=True)
|
||||
output_tokens = all_text_tokens[len(text_tokens):]
|
||||
token_eos = llama.token_eos()
|
||||
n = 0
|
||||
|
||||
|
@ -70,7 +73,6 @@ def test_llama_patch(monkeypatch):
|
|||
|
||||
monkeypatch.setattr("llama_cpp.llama_cpp.llama_sample_token", mock_sample)
|
||||
|
||||
text = "The quick brown fox"
|
||||
|
||||
## Test basic completion until eos
|
||||
n = 0 # reset
|
||||
|
|
2
vendor/llama.cpp
vendored
2
vendor/llama.cpp
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 8da46278e1a57107591653275f8e03a281de94f0
|
||||
Subproject commit dfc7cd48b1cc31d759c093e917a18c0efe03d0e8
|
Loading…
Reference in a new issue