feat: Update llama.cpp
This commit is contained in:
parent
c50d3300d2
commit
2a9979fce1
2 changed files with 10 additions and 4 deletions
|
@ -1167,7 +1167,13 @@ def llama_n_seq_max(ctx: llama_context_p, /) -> int:
|
|||
...
|
||||
|
||||
|
||||
# LLAMA_API enum llama_vocab_type llama_vocab_type(const struct llama_model * model);
|
||||
# LLAMA_API enum llama_pooling_type llama_pooling_type(const struct llama_context * ctx);
|
||||
@ctypes_function("llama_pooling_type", [llama_context_p_ctypes], ctypes.c_int)
|
||||
def llama_pooling_type(ctx: llama_context_p, /) -> int:
|
||||
...
|
||||
|
||||
|
||||
# LLAMA_API enum llama_vocab_type llama_vocab_type (const struct llama_model * model);
|
||||
@ctypes_function("llama_vocab_type", [llama_model_p_ctypes], ctypes.c_int)
|
||||
def llama_vocab_type(model: llama_model_p, /) -> int:
|
||||
...
|
||||
|
@ -3091,7 +3097,7 @@ def llama_sample_token_greedy(
|
|||
...
|
||||
|
||||
|
||||
# /// @details Randomly selects a token from the candidates based on their probabilities.
|
||||
# /// @details Randomly selects a token from the candidates based on their probabilities using the RNG of ctx.
|
||||
# LLAMA_API llama_token llama_sample_token(
|
||||
# struct llama_context * ctx,
|
||||
# llama_token_data_array * candidates);
|
||||
|
|
2
vendor/llama.cpp
vendored
2
vendor/llama.cpp
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 4e96a812b3ce7322a29a3008db2ed73d9087b176
|
||||
Subproject commit 784e11dea1f5ce9638851b2b0dddb107e2a609c8
|
Loading…
Reference in a new issue