Update low_level_api_llama_cpp.py to match current API (#1023)

This commit is contained in:
Jonathan Soma 2023-12-18 15:59:11 -05:00 committed by GitHub
parent 095c650006
commit cfd698c75c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -73,7 +73,7 @@ while remaining_tokens > 0:
embd = []
if len(embd_inp) <= input_consumed:
logits = llama_cpp.llama_get_logits(ctx)
n_vocab = llama_cpp.llama_n_vocab(ctx)
n_vocab = llama_cpp.llama_n_vocab(model)
_arr = (llama_cpp.llama_token_data * n_vocab)(*[
llama_cpp.llama_token_data(token_id, logits[token_id], 0.0)
@ -83,12 +83,12 @@ while remaining_tokens > 0:
llama_cpp.llama_token_data_array(_arr, len(_arr), False))
_arr = (llama_cpp.c_int * len(last_n_tokens_data))(*last_n_tokens_data)
llama_cpp.llama_sample_repetition_penalty(ctx, candidates_p,
llama_cpp.llama_sample_repetition_penalties(ctx, candidates_p,
_arr,
last_n_repeat, repeat_penalty)
llama_cpp.llama_sample_frequency_and_presence_penalties(ctx, candidates_p,
_arr,
last_n_repeat, frequency_penalty, presence_penalty)
penalty_last_n=last_n_repeat,
penalty_repeat=repeat_penalty,
penalty_freq=frequency_penalty,
penalty_present=presence_penalty)
llama_cpp.llama_sample_top_k(ctx, candidates_p, k=40, min_keep=1)
llama_cpp.llama_sample_top_p(ctx, candidates_p, p=0.8, min_keep=1)