Fix cache implementation breaking changes
This commit is contained in:
parent
90874c01cd
commit
0c42168508
2 changed files with 247 additions and 212 deletions
|
@ -4,7 +4,7 @@ import uuid
|
||||||
import time
|
import time
|
||||||
import math
|
import math
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
from abc import ABC
|
from abc import ABC, abstractmethod
|
||||||
from typing import (
|
from typing import (
|
||||||
List,
|
List,
|
||||||
Optional,
|
Optional,
|
||||||
|
@ -27,33 +27,37 @@ import numpy as np
|
||||||
import numpy.typing as npt
|
import numpy.typing as npt
|
||||||
|
|
||||||
|
|
||||||
class LlamaCache(ABC):
|
class BaseLlamaCache(ABC):
|
||||||
"""Base cache class for a llama.cpp model."""
|
"""Base cache class for a llama.cpp model."""
|
||||||
|
|
||||||
def __init__(self, capacity_bytes: int = (2 << 30)):
|
def __init__(self, capacity_bytes: int = (2 << 30)):
|
||||||
pass
|
self.capacity_bytes = capacity_bytes
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cache_size(self):
|
@abstractmethod
|
||||||
return 0
|
def cache_size(self) -> int:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
def _find_longest_prefix_key(
|
def _find_longest_prefix_key(
|
||||||
self,
|
self,
|
||||||
key: Tuple[int, ...],
|
key: Tuple[int, ...],
|
||||||
) -> Optional[Tuple[int, ...]]:
|
) -> Optional[Tuple[int, ...]]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
||||||
pass
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
def __contains__(self, key: Sequence[int]) -> bool:
|
def __contains__(self, key: Sequence[int]) -> bool:
|
||||||
pass
|
raise NotImplementedError
|
||||||
|
|
||||||
def __setitem__(self, key: Sequence[int], value: "LlamaState"):
|
@abstractmethod
|
||||||
pass
|
def __setitem__(self, key: Sequence[int], value: "LlamaState") -> None:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class LlamaRAMCache(LlamaCache):
|
class LlamaRAMCache(BaseLlamaCache):
|
||||||
"""Cache for a llama.cpp model using RAM."""
|
"""Cache for a llama.cpp model using RAM."""
|
||||||
|
|
||||||
def __init__(self, capacity_bytes: int = (2 << 30)):
|
def __init__(self, capacity_bytes: int = (2 << 30)):
|
||||||
|
@ -66,8 +70,8 @@ class LlamaRAMCache(LlamaCache):
|
||||||
return sum([state.llama_state_size for state in self.cache_state.values()])
|
return sum([state.llama_state_size for state in self.cache_state.values()])
|
||||||
|
|
||||||
def _find_longest_prefix_key(
|
def _find_longest_prefix_key(
|
||||||
self,
|
self,
|
||||||
key: Tuple[int, ...],
|
key: Tuple[int, ...],
|
||||||
) -> Optional[Tuple[int, ...]]:
|
) -> Optional[Tuple[int, ...]]:
|
||||||
min_len = 0
|
min_len = 0
|
||||||
min_key = None
|
min_key = None
|
||||||
|
@ -97,32 +101,38 @@ class LlamaRAMCache(LlamaCache):
|
||||||
if key in self.cache_state:
|
if key in self.cache_state:
|
||||||
del self.cache_state[key]
|
del self.cache_state[key]
|
||||||
self.cache_state[key] = value
|
self.cache_state[key] = value
|
||||||
while self.cache_size > self.capacity_bytes:
|
while self.cache_size > self.capacity_bytes and len(self.cache_state) > 0:
|
||||||
self.cache_state.popitem(last=False)
|
self.cache_state.popitem(last=False)
|
||||||
|
|
||||||
|
|
||||||
class LlamaDiskCache(LlamaCache):
|
# Alias for backwards compatibility
|
||||||
|
LlamaCache = LlamaRAMCache
|
||||||
|
|
||||||
|
|
||||||
|
class LlamaDiskCache(BaseLlamaCache):
|
||||||
"""Cache for a llama.cpp model using disk."""
|
"""Cache for a llama.cpp model using disk."""
|
||||||
|
|
||||||
def __init__(self, cache_dir="./llama_cache", capacity_bytes: int = (2 << 30)):
|
def __init__(
|
||||||
|
self, cache_dir: str = ".cache/llama_cache", capacity_bytes: int = (2 << 30)
|
||||||
|
):
|
||||||
super().__init__(capacity_bytes)
|
super().__init__(capacity_bytes)
|
||||||
self.cache = diskcache.Cache(cache_dir)
|
self.cache = diskcache.Cache(cache_dir)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def cache_size(self):
|
def cache_size(self):
|
||||||
return self.cache.volume()
|
return int(self.cache.volume()) # type: ignore
|
||||||
|
|
||||||
def _find_longest_prefix_key(
|
def _find_longest_prefix_key(
|
||||||
self,
|
self,
|
||||||
key: Tuple[int, ...],
|
key: Tuple[int, ...],
|
||||||
) -> Optional[Tuple[int, ...]]:
|
) -> Optional[Tuple[int, ...]]:
|
||||||
min_len = 0
|
min_len = 0
|
||||||
min_key = None
|
min_key: Optional[Tuple[int, ...]] = None
|
||||||
for k in self.cache.iterkeys():
|
for k in self.cache.iterkeys(): # type: ignore
|
||||||
prefix_len = Llama.longest_token_prefix(k, key)
|
prefix_len = Llama.longest_token_prefix(k, key)
|
||||||
if prefix_len > min_len:
|
if prefix_len > min_len:
|
||||||
min_len = prefix_len
|
min_len = prefix_len
|
||||||
min_key = k
|
min_key = k # type: ignore
|
||||||
return min_key
|
return min_key
|
||||||
|
|
||||||
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
||||||
|
@ -130,29 +140,36 @@ class LlamaDiskCache(LlamaCache):
|
||||||
_key = self._find_longest_prefix_key(key)
|
_key = self._find_longest_prefix_key(key)
|
||||||
if _key is None:
|
if _key is None:
|
||||||
raise KeyError("Key not found")
|
raise KeyError("Key not found")
|
||||||
value = self.cache.pop(_key)
|
value: "LlamaState" = self.cache.pop(_key) # type: ignore
|
||||||
self.cache.push(_key)
|
self.cache.push(_key, side="front") # type: ignore
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
def __contains__(self, key: Sequence[int]) -> bool:
|
||||||
|
return self._find_longest_prefix_key(tuple(key)) is not None
|
||||||
|
|
||||||
def __setitem__(self, key: Sequence[int], value: "LlamaState"):
|
def __setitem__(self, key: Sequence[int], value: "LlamaState"):
|
||||||
|
print("LlamaDiskCache.__setitem__: called", file=sys.stderr)
|
||||||
key = tuple(key)
|
key = tuple(key)
|
||||||
if key in self.cache:
|
if key in self.cache:
|
||||||
|
print("LlamaDiskCache.__setitem__: delete", file=sys.stderr)
|
||||||
del self.cache[key]
|
del self.cache[key]
|
||||||
self.cache[key] = value
|
self.cache[key] = value
|
||||||
while self.cache_size > self.capacity_bytes:
|
print("LlamaDiskCache.__setitem__: set", file=sys.stderr)
|
||||||
|
while self.cache_size > self.capacity_bytes and len(self.cache) > 0:
|
||||||
key_to_remove = next(iter(self.cache))
|
key_to_remove = next(iter(self.cache))
|
||||||
del self.cache[key_to_remove]
|
del self.cache[key_to_remove]
|
||||||
|
print("LlamaDiskCache.__setitem__: trim", file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
class LlamaState:
|
class LlamaState:
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
eval_tokens: Deque[int],
|
eval_tokens: Deque[int],
|
||||||
eval_logits: Deque[List[float]],
|
eval_logits: Deque[List[float]],
|
||||||
input_ids: npt.NDArray[np.intc],
|
input_ids: npt.NDArray[np.intc],
|
||||||
scores: npt.NDArray[np.single],
|
scores: npt.NDArray[np.single],
|
||||||
llama_state, # type: llama_cpp.Array[llama_cpp.c_uint8]
|
llama_state, # type: llama_cpp.Array[llama_cpp.c_uint8]
|
||||||
llama_state_size: int,
|
llama_state_size: int,
|
||||||
):
|
):
|
||||||
self.eval_tokens = eval_tokens
|
self.eval_tokens = eval_tokens
|
||||||
self.eval_logits = eval_logits
|
self.eval_logits = eval_logits
|
||||||
|
@ -184,25 +201,25 @@ class Llama:
|
||||||
"""High-level Python wrapper for a llama.cpp model."""
|
"""High-level Python wrapper for a llama.cpp model."""
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
model_path: str,
|
model_path: str,
|
||||||
# NOTE: These parameters are likely to change in the future.
|
# NOTE: These parameters are likely to change in the future.
|
||||||
n_ctx: int = 512,
|
n_ctx: int = 512,
|
||||||
n_parts: int = -1,
|
n_parts: int = -1,
|
||||||
n_gpu_layers: int = 0,
|
n_gpu_layers: int = 0,
|
||||||
seed: int = 1337,
|
seed: int = 1337,
|
||||||
f16_kv: bool = True,
|
f16_kv: bool = True,
|
||||||
logits_all: bool = False,
|
logits_all: bool = False,
|
||||||
vocab_only: bool = False,
|
vocab_only: bool = False,
|
||||||
use_mmap: bool = True,
|
use_mmap: bool = True,
|
||||||
use_mlock: bool = False,
|
use_mlock: bool = False,
|
||||||
embedding: bool = False,
|
embedding: bool = False,
|
||||||
n_threads: Optional[int] = None,
|
n_threads: Optional[int] = None,
|
||||||
n_batch: int = 512,
|
n_batch: int = 512,
|
||||||
last_n_tokens_size: int = 64,
|
last_n_tokens_size: int = 64,
|
||||||
lora_base: Optional[str] = None,
|
lora_base: Optional[str] = None,
|
||||||
lora_path: Optional[str] = None,
|
lora_path: Optional[str] = None,
|
||||||
verbose: bool = True,
|
verbose: bool = True,
|
||||||
):
|
):
|
||||||
"""Load a llama.cpp model from `model_path`.
|
"""Load a llama.cpp model from `model_path`.
|
||||||
|
|
||||||
|
@ -249,7 +266,7 @@ class Llama:
|
||||||
self.eval_tokens: Deque[int] = deque(maxlen=n_ctx)
|
self.eval_tokens: Deque[int] = deque(maxlen=n_ctx)
|
||||||
self.eval_logits: Deque[List[float]] = deque(maxlen=n_ctx if logits_all else 1)
|
self.eval_logits: Deque[List[float]] = deque(maxlen=n_ctx if logits_all else 1)
|
||||||
|
|
||||||
self.cache: Optional[LlamaCache] = None
|
self.cache: Optional[BaseLlamaCache] = None
|
||||||
|
|
||||||
self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1)
|
self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1)
|
||||||
|
|
||||||
|
@ -271,12 +288,12 @@ class Llama:
|
||||||
|
|
||||||
if self.lora_path:
|
if self.lora_path:
|
||||||
if llama_cpp.llama_apply_lora_from_file(
|
if llama_cpp.llama_apply_lora_from_file(
|
||||||
self.ctx,
|
self.ctx,
|
||||||
llama_cpp.c_char_p(self.lora_path.encode("utf-8")),
|
llama_cpp.c_char_p(self.lora_path.encode("utf-8")),
|
||||||
llama_cpp.c_char_p(self.lora_base.encode("utf-8"))
|
llama_cpp.c_char_p(self.lora_base.encode("utf-8"))
|
||||||
if self.lora_base is not None
|
if self.lora_base is not None
|
||||||
else llama_cpp.c_char_p(0),
|
else llama_cpp.c_char_p(0),
|
||||||
llama_cpp.c_int(self.n_threads),
|
llama_cpp.c_int(self.n_threads),
|
||||||
):
|
):
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
f"Failed to apply LoRA from lora path: {self.lora_path} to base path: {self.lora_base}"
|
f"Failed to apply LoRA from lora path: {self.lora_path} to base path: {self.lora_base}"
|
||||||
|
@ -363,7 +380,7 @@ class Llama:
|
||||||
)
|
)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def set_cache(self, cache: Optional[LlamaCache]):
|
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
||||||
"""Set the cache.
|
"""Set the cache.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -387,7 +404,7 @@ class Llama:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
n_ctx = self._n_ctx
|
n_ctx = self._n_ctx
|
||||||
for i in range(0, len(tokens), self.n_batch):
|
for i in range(0, len(tokens), self.n_batch):
|
||||||
batch = tokens[i: min(len(tokens), i + self.n_batch)]
|
batch = tokens[i : min(len(tokens), i + self.n_batch)]
|
||||||
n_past = min(n_ctx - len(batch), len(self._input_ids))
|
n_past = min(n_ctx - len(batch), len(self._input_ids))
|
||||||
n_tokens = len(batch)
|
n_tokens = len(batch)
|
||||||
return_code = llama_cpp.llama_eval(
|
return_code = llama_cpp.llama_eval(
|
||||||
|
@ -409,28 +426,28 @@ class Llama:
|
||||||
n_vocab = self._n_vocab
|
n_vocab = self._n_vocab
|
||||||
cols = n_vocab
|
cols = n_vocab
|
||||||
logits_view = llama_cpp.llama_get_logits(self.ctx)
|
logits_view = llama_cpp.llama_get_logits(self.ctx)
|
||||||
logits = [logits_view[i * cols: (i + 1) * cols] for i in range(rows)]
|
logits = [logits_view[i * cols : (i + 1) * cols] for i in range(rows)]
|
||||||
self.eval_logits.extend(logits)
|
self.eval_logits.extend(logits)
|
||||||
self._scores: npt.NDArray[np.single] = np.concatenate(
|
self._scores: npt.NDArray[np.single] = np.concatenate(
|
||||||
(self._scores, np.array(logits, dtype=np.single)), axis=0
|
(self._scores, np.array(logits, dtype=np.single)), axis=0
|
||||||
)
|
)
|
||||||
|
|
||||||
def _sample(
|
def _sample(
|
||||||
self,
|
self,
|
||||||
last_n_tokens_data, # type: llama_cpp.Array[llama_cpp.llama_token]
|
last_n_tokens_data, # type: llama_cpp.Array[llama_cpp.llama_token]
|
||||||
last_n_tokens_size: llama_cpp.c_int,
|
last_n_tokens_size: llama_cpp.c_int,
|
||||||
top_k: llama_cpp.c_int,
|
top_k: llama_cpp.c_int,
|
||||||
top_p: llama_cpp.c_float,
|
top_p: llama_cpp.c_float,
|
||||||
temp: llama_cpp.c_float,
|
temp: llama_cpp.c_float,
|
||||||
tfs_z: llama_cpp.c_float,
|
tfs_z: llama_cpp.c_float,
|
||||||
repeat_penalty: llama_cpp.c_float,
|
repeat_penalty: llama_cpp.c_float,
|
||||||
frequency_penalty: llama_cpp.c_float,
|
frequency_penalty: llama_cpp.c_float,
|
||||||
presence_penalty: llama_cpp.c_float,
|
presence_penalty: llama_cpp.c_float,
|
||||||
mirostat_mode: llama_cpp.c_int,
|
mirostat_mode: llama_cpp.c_int,
|
||||||
mirostat_tau: llama_cpp.c_float,
|
mirostat_tau: llama_cpp.c_float,
|
||||||
mirostat_eta: llama_cpp.c_float,
|
mirostat_eta: llama_cpp.c_float,
|
||||||
penalize_nl: bool = True,
|
penalize_nl: bool = True,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
):
|
):
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
assert len(self.eval_logits) > 0
|
assert len(self.eval_logits) > 0
|
||||||
|
@ -550,19 +567,19 @@ class Llama:
|
||||||
)
|
)
|
||||||
|
|
||||||
def sample(
|
def sample(
|
||||||
self,
|
self,
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
temp: float = 0.80,
|
temp: float = 0.80,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
penalize_nl: bool = True,
|
penalize_nl: bool = True,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
):
|
):
|
||||||
"""Sample a token from the model.
|
"""Sample a token from the model.
|
||||||
|
|
||||||
|
@ -578,7 +595,7 @@ class Llama:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
last_n_tokens_data = [llama_cpp.llama_token(0)] * max(
|
last_n_tokens_data = [llama_cpp.llama_token(0)] * max(
|
||||||
0, self.last_n_tokens_size - len(self._input_ids)
|
0, self.last_n_tokens_size - len(self._input_ids)
|
||||||
) + self._input_ids[-self.last_n_tokens_size:].tolist()
|
) + self._input_ids[-self.last_n_tokens_size :].tolist()
|
||||||
return self._sample(
|
return self._sample(
|
||||||
last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)(
|
last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)(
|
||||||
*last_n_tokens_data
|
*last_n_tokens_data
|
||||||
|
@ -599,21 +616,21 @@ class Llama:
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate(
|
def generate(
|
||||||
self,
|
self,
|
||||||
tokens: Sequence[int],
|
tokens: Sequence[int],
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
temp: float = 0.80,
|
temp: float = 0.80,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
reset: bool = True,
|
reset: bool = True,
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||||
) -> Generator[int, Optional[Sequence[int]], None]:
|
) -> Generator[int, Optional[Sequence[int]], None]:
|
||||||
"""Create a generator of tokens from a prompt.
|
"""Create a generator of tokens from a prompt.
|
||||||
|
|
||||||
|
@ -676,7 +693,7 @@ class Llama:
|
||||||
logits_processor=logits_processor,
|
logits_processor=logits_processor,
|
||||||
)
|
)
|
||||||
if stopping_criteria is not None and stopping_criteria(
|
if stopping_criteria is not None and stopping_criteria(
|
||||||
self._input_ids.tolist(), self._scores[-1, :].tolist()
|
self._input_ids.tolist(), self._scores[-1, :].tolist()
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
tokens_or_none = yield token
|
tokens_or_none = yield token
|
||||||
|
@ -685,7 +702,7 @@ class Llama:
|
||||||
tokens.extend(tokens_or_none)
|
tokens.extend(tokens_or_none)
|
||||||
|
|
||||||
def create_embedding(
|
def create_embedding(
|
||||||
self, input: Union[str, List[str]], model: Optional[str] = None
|
self, input: Union[str, List[str]], model: Optional[str] = None
|
||||||
) -> Embedding:
|
) -> Embedding:
|
||||||
"""Embed a string.
|
"""Embed a string.
|
||||||
|
|
||||||
|
@ -720,8 +737,8 @@ class Llama:
|
||||||
n_tokens = len(tokens)
|
n_tokens = len(tokens)
|
||||||
total_tokens += n_tokens
|
total_tokens += n_tokens
|
||||||
embedding = llama_cpp.llama_get_embeddings(self.ctx)[
|
embedding = llama_cpp.llama_get_embeddings(self.ctx)[
|
||||||
: llama_cpp.llama_n_embd(self.ctx)
|
: llama_cpp.llama_n_embd(self.ctx)
|
||||||
]
|
]
|
||||||
|
|
||||||
data.append(
|
data.append(
|
||||||
{
|
{
|
||||||
|
@ -755,27 +772,27 @@ class Llama:
|
||||||
return list(map(float, self.create_embedding(input)["data"][0]["embedding"]))
|
return list(map(float, self.create_embedding(input)["data"][0]["embedding"]))
|
||||||
|
|
||||||
def _create_completion(
|
def _create_completion(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
suffix: Optional[str] = None,
|
suffix: Optional[str] = None,
|
||||||
max_tokens: int = 16,
|
max_tokens: int = 16,
|
||||||
temperature: float = 0.8,
|
temperature: float = 0.8,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
logprobs: Optional[int] = None,
|
logprobs: Optional[int] = None,
|
||||||
echo: bool = False,
|
echo: bool = False,
|
||||||
stop: Optional[Union[str, List[str]]] = [],
|
stop: Optional[Union[str, List[str]]] = [],
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
) -> Union[Iterator[Completion], Iterator[CompletionChunk]]:
|
) -> Union[Iterator[Completion], Iterator[CompletionChunk]]:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
|
|
||||||
|
@ -827,19 +844,19 @@ class Llama:
|
||||||
finish_reason = "length"
|
finish_reason = "length"
|
||||||
multibyte_fix = 0
|
multibyte_fix = 0
|
||||||
for token in self.generate(
|
for token in self.generate(
|
||||||
prompt_tokens,
|
prompt_tokens,
|
||||||
top_k=top_k,
|
top_k=top_k,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
temp=temperature,
|
temp=temperature,
|
||||||
tfs_z=tfs_z,
|
tfs_z=tfs_z,
|
||||||
mirostat_mode=mirostat_mode,
|
mirostat_mode=mirostat_mode,
|
||||||
mirostat_tau=mirostat_tau,
|
mirostat_tau=mirostat_tau,
|
||||||
mirostat_eta=mirostat_eta,
|
mirostat_eta=mirostat_eta,
|
||||||
frequency_penalty=frequency_penalty,
|
frequency_penalty=frequency_penalty,
|
||||||
presence_penalty=presence_penalty,
|
presence_penalty=presence_penalty,
|
||||||
repeat_penalty=repeat_penalty,
|
repeat_penalty=repeat_penalty,
|
||||||
stopping_criteria=stopping_criteria,
|
stopping_criteria=stopping_criteria,
|
||||||
logits_processor=logits_processor,
|
logits_processor=logits_processor,
|
||||||
):
|
):
|
||||||
if token == self._token_eos:
|
if token == self._token_eos:
|
||||||
text = self.detokenize(completion_tokens)
|
text = self.detokenize(completion_tokens)
|
||||||
|
@ -891,7 +908,7 @@ class Llama:
|
||||||
token_end_position += len(self.detokenize([token]))
|
token_end_position += len(self.detokenize([token]))
|
||||||
# Check if stop sequence is in the token
|
# Check if stop sequence is in the token
|
||||||
if token_end_position >= (
|
if token_end_position >= (
|
||||||
remaining_length - first_stop_position - 1
|
remaining_length - first_stop_position - 1
|
||||||
):
|
):
|
||||||
break
|
break
|
||||||
logprobs_or_none: Optional[CompletionLogprobs] = None
|
logprobs_or_none: Optional[CompletionLogprobs] = None
|
||||||
|
@ -952,7 +969,7 @@ class Llama:
|
||||||
break
|
break
|
||||||
|
|
||||||
if stopping_criteria is not None and stopping_criteria(
|
if stopping_criteria is not None and stopping_criteria(
|
||||||
self._input_ids.tolist(), self._scores[-1, :].tolist()
|
self._input_ids.tolist(), self._scores[-1, :].tolist()
|
||||||
):
|
):
|
||||||
text = self.detokenize(completion_tokens)
|
text = self.detokenize(completion_tokens)
|
||||||
finish_reason = "stop"
|
finish_reason = "stop"
|
||||||
|
@ -1017,8 +1034,8 @@ class Llama:
|
||||||
"choices": [
|
"choices": [
|
||||||
{
|
{
|
||||||
"text": last_text[
|
"text": last_text[
|
||||||
: len(last_text) - (token_end_position - end)
|
: len(last_text) - (token_end_position - end)
|
||||||
].decode("utf-8", errors="ignore"),
|
].decode("utf-8", errors="ignore"),
|
||||||
"index": 0,
|
"index": 0,
|
||||||
"logprobs": logprobs_or_none,
|
"logprobs": logprobs_or_none,
|
||||||
"finish_reason": finish_reason,
|
"finish_reason": finish_reason,
|
||||||
|
@ -1049,6 +1066,7 @@ class Llama:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("Llama._create_completion: cache save", file=sys.stderr)
|
print("Llama._create_completion: cache save", file=sys.stderr)
|
||||||
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
||||||
|
print("Llama._create_completion: cache saved", file=sys.stderr)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.cache:
|
if self.cache:
|
||||||
|
@ -1084,10 +1102,10 @@ class Llama:
|
||||||
for token in all_tokens
|
for token in all_tokens
|
||||||
]
|
]
|
||||||
all_logprobs = [
|
all_logprobs = [
|
||||||
Llama.logits_to_logprobs(row.tolist()) for row in self._scores
|
Llama.logits_to_logprobs(row.tolist()) for row in self._scores
|
||||||
][token_offset:]
|
][token_offset:]
|
||||||
for token, token_str, logprobs_token in zip(
|
for token, token_str, logprobs_token in zip(
|
||||||
all_tokens, all_token_strs, all_logprobs
|
all_tokens, all_token_strs, all_logprobs
|
||||||
):
|
):
|
||||||
text_offsets.append(text_offset)
|
text_offsets.append(text_offset)
|
||||||
text_offset += len(token_str)
|
text_offset += len(token_str)
|
||||||
|
@ -1138,27 +1156,27 @@ class Llama:
|
||||||
}
|
}
|
||||||
|
|
||||||
def create_completion(
|
def create_completion(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
suffix: Optional[str] = None,
|
suffix: Optional[str] = None,
|
||||||
max_tokens: int = 128,
|
max_tokens: int = 128,
|
||||||
temperature: float = 0.8,
|
temperature: float = 0.8,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
logprobs: Optional[int] = None,
|
logprobs: Optional[int] = None,
|
||||||
echo: bool = False,
|
echo: bool = False,
|
||||||
stop: Optional[Union[str, List[str]]] = [],
|
stop: Optional[Union[str, List[str]]] = [],
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
) -> Union[Completion, Iterator[CompletionChunk]]:
|
) -> Union[Completion, Iterator[CompletionChunk]]:
|
||||||
"""Generate text from a prompt.
|
"""Generate text from a prompt.
|
||||||
|
|
||||||
|
@ -1211,27 +1229,27 @@ class Llama:
|
||||||
return completion
|
return completion
|
||||||
|
|
||||||
def __call__(
|
def __call__(
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
suffix: Optional[str] = None,
|
suffix: Optional[str] = None,
|
||||||
max_tokens: int = 128,
|
max_tokens: int = 128,
|
||||||
temperature: float = 0.8,
|
temperature: float = 0.8,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
logprobs: Optional[int] = None,
|
logprobs: Optional[int] = None,
|
||||||
echo: bool = False,
|
echo: bool = False,
|
||||||
stop: Optional[Union[str, List[str]]] = [],
|
stop: Optional[Union[str, List[str]]] = [],
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
stopping_criteria: Optional[StoppingCriteriaList] = None,
|
||||||
logits_processor: Optional[LogitsProcessorList] = None,
|
logits_processor: Optional[LogitsProcessorList] = None,
|
||||||
) -> Union[Completion, Iterator[CompletionChunk]]:
|
) -> Union[Completion, Iterator[CompletionChunk]]:
|
||||||
"""Generate text from a prompt.
|
"""Generate text from a prompt.
|
||||||
|
|
||||||
|
@ -1279,7 +1297,7 @@ class Llama:
|
||||||
)
|
)
|
||||||
|
|
||||||
def _convert_text_completion_to_chat(
|
def _convert_text_completion_to_chat(
|
||||||
self, completion: Completion
|
self, completion: Completion
|
||||||
) -> ChatCompletion:
|
) -> ChatCompletion:
|
||||||
return {
|
return {
|
||||||
"id": "chat" + completion["id"],
|
"id": "chat" + completion["id"],
|
||||||
|
@ -1300,8 +1318,8 @@ class Llama:
|
||||||
}
|
}
|
||||||
|
|
||||||
def _convert_text_completion_chunks_to_chat(
|
def _convert_text_completion_chunks_to_chat(
|
||||||
self,
|
self,
|
||||||
chunks: Iterator[CompletionChunk],
|
chunks: Iterator[CompletionChunk],
|
||||||
) -> Iterator[ChatCompletionChunk]:
|
) -> Iterator[ChatCompletionChunk]:
|
||||||
for i, chunk in enumerate(chunks):
|
for i, chunk in enumerate(chunks):
|
||||||
if i == 0:
|
if i == 0:
|
||||||
|
@ -1337,22 +1355,22 @@ class Llama:
|
||||||
}
|
}
|
||||||
|
|
||||||
def create_chat_completion(
|
def create_chat_completion(
|
||||||
self,
|
self,
|
||||||
messages: List[ChatCompletionMessage],
|
messages: List[ChatCompletionMessage],
|
||||||
temperature: float = 0.2,
|
temperature: float = 0.2,
|
||||||
top_p: float = 0.95,
|
top_p: float = 0.95,
|
||||||
top_k: int = 40,
|
top_k: int = 40,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
stop: Optional[Union[str, List[str]]] = [],
|
stop: Optional[Union[str, List[str]]] = [],
|
||||||
max_tokens: int = 256,
|
max_tokens: int = 256,
|
||||||
presence_penalty: float = 0.0,
|
presence_penalty: float = 0.0,
|
||||||
frequency_penalty: float = 0.0,
|
frequency_penalty: float = 0.0,
|
||||||
repeat_penalty: float = 1.1,
|
repeat_penalty: float = 1.1,
|
||||||
tfs_z: float = 1.0,
|
tfs_z: float = 1.0,
|
||||||
mirostat_mode: int = 0,
|
mirostat_mode: int = 0,
|
||||||
mirostat_tau: float = 5.0,
|
mirostat_tau: float = 5.0,
|
||||||
mirostat_eta: float = 0.1,
|
mirostat_eta: float = 0.1,
|
||||||
model: Optional[str] = None,
|
model: Optional[str] = None,
|
||||||
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
|
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
|
||||||
"""Generate a chat completion from a list of messages.
|
"""Generate a chat completion from a list of messages.
|
||||||
|
|
||||||
|
@ -1453,9 +1471,17 @@ class Llama:
|
||||||
|
|
||||||
def save_state(self) -> LlamaState:
|
def save_state(self) -> LlamaState:
|
||||||
assert self.ctx is not None
|
assert self.ctx is not None
|
||||||
|
if self.verbose:
|
||||||
|
print("Llama.save_state: saving llama state", file=sys.stderr)
|
||||||
state_size = llama_cpp.llama_get_state_size(self.ctx)
|
state_size = llama_cpp.llama_get_state_size(self.ctx)
|
||||||
|
if self.verbose:
|
||||||
|
print(f"Llama.save_state: got state size: {state_size}", file=sys.stderr)
|
||||||
llama_state = (llama_cpp.c_uint8 * int(state_size))()
|
llama_state = (llama_cpp.c_uint8 * int(state_size))()
|
||||||
|
if self.verbose:
|
||||||
|
print("Llama.save_state: allocated state", file=sys.stderr)
|
||||||
n_bytes = llama_cpp.llama_copy_state_data(self.ctx, llama_state)
|
n_bytes = llama_cpp.llama_copy_state_data(self.ctx, llama_state)
|
||||||
|
if self.verbose:
|
||||||
|
print(f"Llama.save_state: copied llama state: {n_bytes}", file=sys.stderr)
|
||||||
if int(n_bytes) > int(state_size):
|
if int(n_bytes) > int(state_size):
|
||||||
raise RuntimeError("Failed to copy llama state data")
|
raise RuntimeError("Failed to copy llama state data")
|
||||||
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
|
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
|
||||||
|
|
|
@ -58,6 +58,10 @@ class Settings(BaseSettings):
|
||||||
default=False,
|
default=False,
|
||||||
description="Use a cache to reduce processing times for evaluated prompts.",
|
description="Use a cache to reduce processing times for evaluated prompts.",
|
||||||
)
|
)
|
||||||
|
cache_type: Literal["ram", "disk"] = Field(
|
||||||
|
default="ram",
|
||||||
|
description="The type of cache to use. Only used if cache is True.",
|
||||||
|
)
|
||||||
cache_size: int = Field(
|
cache_size: int = Field(
|
||||||
default=2 << 30,
|
default=2 << 30,
|
||||||
description="The size of the cache in bytes. Only used if cache is True.",
|
description="The size of the cache in bytes. Only used if cache is True.",
|
||||||
|
@ -108,6 +112,11 @@ def create_app(settings: Optional[Settings] = None):
|
||||||
verbose=settings.verbose,
|
verbose=settings.verbose,
|
||||||
)
|
)
|
||||||
if settings.cache:
|
if settings.cache:
|
||||||
|
if settings.cache_type == "disk":
|
||||||
|
cache = llama_cpp.LlamaDiskCache(capacity_bytes=settings.cache_size)
|
||||||
|
else:
|
||||||
|
cache = llama_cpp.LlamaRAMCache(capacity_bytes=settings.cache_size)
|
||||||
|
|
||||||
cache = llama_cpp.LlamaCache(capacity_bytes=settings.cache_size)
|
cache = llama_cpp.LlamaCache(capacity_bytes=settings.cache_size)
|
||||||
llama.set_cache(cache)
|
llama.set_cache(cache)
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue