Formatting

This commit is contained in:
Andrei Betlen 2023-03-31 00:00:27 -04:00
parent 8d9560ed66
commit c928e0afc8

View file

@ -47,7 +47,6 @@ class llama_context_params(Structure):
("n_ctx", c_int), # text context ("n_ctx", c_int), # text context
("n_parts", c_int), # -1 for default ("n_parts", c_int), # -1 for default
("seed", c_int), # RNG seed, 0 for random ("seed", c_int), # RNG seed, 0 for random
("f16_kv", c_bool), # use fp16 for KV cache ("f16_kv", c_bool), # use fp16 for KV cache
( (
"logits_all", "logits_all",
@ -56,7 +55,6 @@ class llama_context_params(Structure):
("vocab_only", c_bool), # only load the vocabulary, no weights ("vocab_only", c_bool), # only load the vocabulary, no weights
("use_mlock", c_bool), # force system to keep model in RAM ("use_mlock", c_bool), # force system to keep model in RAM
("embedding", c_bool), # embedding mode only ("embedding", c_bool), # embedding mode only
# called with a progress value between 0 and 1, pass NULL to disable # called with a progress value between 0 and 1, pass NULL to disable
("progress_callback", llama_progress_callback), ("progress_callback", llama_progress_callback),
# context pointer passed to the progress callback # context pointer passed to the progress callback