This commit is contained in:
Andrei Betlen 2024-06-01 18:10:13 -04:00
commit a6457ba74b
7 changed files with 51 additions and 24 deletions

View file

@ -29,7 +29,7 @@ jobs:
python -m pip install -e .[all] python -m pip install -e .[all]
- name: Build wheels - name: Build wheels
uses: pypa/cibuildwheel@v2.18.0 uses: pypa/cibuildwheel@v2.18.1
env: env:
# disable repair # disable repair
CIBW_REPAIR_WHEEL_COMMAND: "" CIBW_REPAIR_WHEEL_COMMAND: ""
@ -56,7 +56,7 @@ jobs:
platforms: linux/arm64 platforms: linux/arm64
- name: Build wheels - name: Build wheels
uses: pypa/cibuildwheel@v2.18.0 uses: pypa/cibuildwheel@v2.18.1
env: env:
CIBW_SKIP: "*musllinux* pp*" CIBW_SKIP: "*musllinux* pp*"
CIBW_REPAIR_WHEEL_COMMAND: "" CIBW_REPAIR_WHEEL_COMMAND: ""

View file

@ -13,7 +13,13 @@ build:
python3 -m pip install --verbose -e . python3 -m pip install --verbose -e .
build.debug: build.debug:
CMAKE_ARGS="-DCMAKE_BUILD_TYPE=Debug" python3 -m pip install --verbose --config-settings=cmake.verbose=true --config-settings=logging.level=INFO --config-settings=install.strip=false --editable . python3 -m pip install \
--verbose \
--config-settings=cmake.verbose=true \
--config-settings=logging.level=INFO \
--config-settings=install.strip=false \
--config-settings=cmake.args="-DCMAKE_BUILD_TYPE=Debug;-DCMAKE_C_FLAGS='-ggdb -O0';-DCMAKE_CXX_FLAGS='-ggdb -O0'" \
--editable .
build.cuda: build.cuda:
CMAKE_ARGS="-DLLAMA_CUDA=on" python3 -m pip install --verbose -e . CMAKE_ARGS="-DLLAMA_CUDA=on" python3 -m pip install --verbose -e .

View file

@ -6,6 +6,7 @@ import uuid
import time import time
import json import json
import ctypes import ctypes
import typing
import fnmatch import fnmatch
import multiprocessing import multiprocessing
@ -249,13 +250,13 @@ class Llama:
self._kv_overrides_array[i].key = k.encode("utf-8") self._kv_overrides_array[i].key = k.encode("utf-8")
if isinstance(v, bool): if isinstance(v, bool):
self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_BOOL self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_BOOL
self._kv_overrides_array[i].value.bool_value = v self._kv_overrides_array[i].value.val_bool = v
elif isinstance(v, int): elif isinstance(v, int):
self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_INT self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_INT
self._kv_overrides_array[i].value.int_value = v self._kv_overrides_array[i].value.val_i64 = v
elif isinstance(v, float): elif isinstance(v, float):
self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_FLOAT self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_FLOAT
self._kv_overrides_array[i].value.float_value = v self._kv_overrides_array[i].value.val_f64 = v
elif isinstance(v, str): # type: ignore elif isinstance(v, str): # type: ignore
v_bytes = v.encode("utf-8") v_bytes = v.encode("utf-8")
if len(v_bytes) > 128: # TODO: Make this a constant if len(v_bytes) > 128: # TODO: Make this a constant
@ -263,10 +264,12 @@ class Llama:
v_bytes = v_bytes.ljust(128, b"\0") v_bytes = v_bytes.ljust(128, b"\0")
self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_STR self._kv_overrides_array[i].tag = llama_cpp.LLAMA_KV_OVERRIDE_TYPE_STR
# copy min(v_bytes, 128) to str_value # copy min(v_bytes, 128) to str_value
address = typing.cast(int, ctypes.addressof(self._kv_overrides_array[i].value) + llama_cpp.llama_model_kv_override_value.val_str.offset)
buffer_start = ctypes.cast(address, ctypes.POINTER(ctypes.c_char))
ctypes.memmove( ctypes.memmove(
self._kv_overrides_array[i].value.str_value, buffer_start,
v_bytes, v_bytes,
min(len(v_bytes), 128), 128,
) )
else: else:
raise ValueError(f"Unknown value type for {k}: {v}") raise ValueError(f"Unknown value type for {k}: {v}")

View file

@ -3098,7 +3098,7 @@ class NanoLlavaChatHandler(Llava15ChatHandler):
"{% endif %}" "{% endif %}"
) )
class Llama3VisionAlpha(Llava15ChatHandler): class Llama3VisionAlphaChatHandler(Llava15ChatHandler):
# question = "<image>" + q # question = "<image>" + q
# prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{question}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" # prompt = f"<|start_header_id|>user<|end_header_id|>\n\n{question}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
@ -3159,6 +3159,10 @@ class Llama3VisionAlpha(Llava15ChatHandler):
"{% endif %}" "{% endif %}"
) )
# alias
Llama3VisionAlpha = Llama3VisionAlphaChatHandler
@register_chat_completion_handler("chatml-function-calling") @register_chat_completion_handler("chatml-function-calling")
def chatml_function_calling( def chatml_function_calling(
llama: llama.Llama, llama: llama.Llama,
@ -3193,7 +3197,6 @@ def chatml_function_calling(
llama_types.CreateChatCompletionResponse, llama_types.CreateChatCompletionResponse,
Iterator[llama_types.CreateChatCompletionStreamResponse], Iterator[llama_types.CreateChatCompletionStreamResponse],
]: ]:
print(logprobs)
function_calling_template = ( function_calling_template = (
"{% for message in messages %}" "{% for message in messages %}"
"<|im_start|>{{ message.role }}\n" "<|im_start|>{{ message.role }}\n"

View file

@ -300,6 +300,7 @@ LLAMA_VOCAB_TYPE_WPM = 3
# LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11, # LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11,
# LLAMA_VOCAB_PRE_TYPE_OLMO = 12, # LLAMA_VOCAB_PRE_TYPE_OLMO = 12,
# LLAMA_VOCAB_PRE_TYPE_DBRX = 13, # LLAMA_VOCAB_PRE_TYPE_DBRX = 13,
# LLAMA_VOCAB_PRE_TYPE_SMAUG = 14,
# }; # };
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0 LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1 LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1
@ -315,6 +316,7 @@ LLAMA_VOCAB_PRE_TYPE_STABLELM2 = 10
LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11 LLAMA_VOCAB_PRE_TYPE_QWEN2 = 11
LLAMA_VOCAB_PRE_TYPE_OLMO = 12 LLAMA_VOCAB_PRE_TYPE_OLMO = 12
LLAMA_VOCAB_PRE_TYPE_DBRX = 13 LLAMA_VOCAB_PRE_TYPE_DBRX = 13
LLAMA_VOCAB_PRE_TYPE_SMAUG = 14
# // note: these values should be synchronized with ggml_rope # // note: these values should be synchronized with ggml_rope
@ -611,17 +613,17 @@ LLAMA_KV_OVERRIDE_TYPE_STR = 3
# }; # };
class llama_model_kv_override_value(ctypes.Union): class llama_model_kv_override_value(ctypes.Union):
_fields_ = [ _fields_ = [
("int_value", ctypes.c_int64), ("val_i64", ctypes.c_int64),
("float_value", ctypes.c_double), ("val_f64", ctypes.c_double),
("bool_value", ctypes.c_bool), ("val_bool", ctypes.c_bool),
("str_value", ctypes.c_char * 128), ("val_str", ctypes.c_char * 128),
] ]
if TYPE_CHECKING: if TYPE_CHECKING:
int_value: int val_i64: int
float_value: float val_f64: float
bool_value: bool val_bool: bool
str_value: bytes val_str: bytes
class llama_model_kv_override(ctypes.Structure): class llama_model_kv_override(ctypes.Structure):
@ -718,6 +720,8 @@ class llama_model_params(ctypes.Structure):
] ]
# // NOTE: changing the default values of parameters marked as [EXPERIMENTAL] may cause crashes or incorrect results in certain configurations
# // https://github.com/ggerganov/llama.cpp/pull/7544
# struct llama_context_params { # struct llama_context_params {
# uint32_t seed; // RNG seed, -1 for random # uint32_t seed; // RNG seed, -1 for random
# uint32_t n_ctx; // text context, 0 = from model # uint32_t n_ctx; // text context, 0 = from model
@ -744,15 +748,14 @@ class llama_model_params(ctypes.Structure):
# ggml_backend_sched_eval_callback cb_eval; # ggml_backend_sched_eval_callback cb_eval;
# void * cb_eval_user_data; # void * cb_eval_user_data;
# enum ggml_type type_k; // data type for K cache # enum ggml_type type_k; // data type for K cache [EXPERIMENTAL]
# enum ggml_type type_v; // data type for V cache # enum ggml_type type_v; // data type for V cache [EXPERIMENTAL]
# // Keep the booleans together to avoid misalignment during copy-by-value. # // Keep the booleans together to avoid misalignment during copy-by-value.
# bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead) # bool logits_all; // the llama_decode() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
# bool embeddings; // if true, extract embeddings (together with logits) # bool embeddings; // if true, extract embeddings (together with logits)
# bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU # bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU
# bool flash_attn; // whether to use flash attention # bool flash_attn; // whether to use flash attention [EXPERIMENTAL]
# // Abort callback # // Abort callback
# // if it returns true, execution of llama_decode() will be aborted # // if it returns true, execution of llama_decode() will be aborted
@ -2454,6 +2457,16 @@ def llama_token_is_eog(model: llama_model_p, token: Union[llama_token, int], /)
... ...
# // Identify if Token Id is a control token or a render-able token
# LLAMA_API bool llama_token_is_control(const struct llama_model * model, llama_token token);
@ctypes_function(
"llama_token_is_control", [llama_model_p_ctypes, llama_token], ctypes.c_bool
)
def llama_token_is_control(model: llama_model_p, token: Union[llama_token, int], /) -> bool:
"""Identify if Token Id is a control token or a render-able token"""
...
# // Special tokens # // Special tokens

View file

@ -183,7 +183,7 @@ class LlamaProxy:
num_pred_tokens=settings.draft_model_num_pred_tokens num_pred_tokens=settings.draft_model_num_pred_tokens
) )
kv_overrides: Optional[Dict[str, Union[bool, int, float]]] = None kv_overrides: Optional[Dict[str, Union[bool, int, float, str]]] = None
if settings.kv_overrides is not None: if settings.kv_overrides is not None:
assert isinstance(settings.kv_overrides, list) assert isinstance(settings.kv_overrides, list)
kv_overrides = {} kv_overrides = {}
@ -197,6 +197,8 @@ class LlamaProxy:
kv_overrides[key] = int(value) kv_overrides[key] = int(value)
elif value_type == "float": elif value_type == "float":
kv_overrides[key] = float(value) kv_overrides[key] = float(value)
elif value_type == "str":
kv_overrides[key] = value
else: else:
raise ValueError(f"Unknown value type {value_type}") raise ValueError(f"Unknown value type {value_type}")

2
vendor/llama.cpp vendored

@ -1 +1 @@
Subproject commit 0df0aa8e43c3378975269a51f9b876c8692e70da Subproject commit 504f0c340f6b5e04de682f6ddefdd3b81208df5d