Fix cache implementation breaking changes
This commit is contained in:
parent
90874c01cd
commit
0c42168508
2 changed files with 247 additions and 212 deletions
|
@ -4,7 +4,7 @@ import uuid
|
|||
import time
|
||||
import math
|
||||
import multiprocessing
|
||||
from abc import ABC
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import (
|
||||
List,
|
||||
Optional,
|
||||
|
@ -27,15 +27,16 @@ import numpy as np
|
|||
import numpy.typing as npt
|
||||
|
||||
|
||||
class LlamaCache(ABC):
|
||||
class BaseLlamaCache(ABC):
|
||||
"""Base cache class for a llama.cpp model."""
|
||||
|
||||
def __init__(self, capacity_bytes: int = (2 << 30)):
|
||||
pass
|
||||
self.capacity_bytes = capacity_bytes
|
||||
|
||||
@property
|
||||
def cache_size(self):
|
||||
return 0
|
||||
@abstractmethod
|
||||
def cache_size(self) -> int:
|
||||
raise NotImplementedError
|
||||
|
||||
def _find_longest_prefix_key(
|
||||
self,
|
||||
|
@ -43,17 +44,20 @@ class LlamaCache(ABC):
|
|||
) -> Optional[Tuple[int, ...]]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
||||
pass
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def __contains__(self, key: Sequence[int]) -> bool:
|
||||
pass
|
||||
raise NotImplementedError
|
||||
|
||||
def __setitem__(self, key: Sequence[int], value: "LlamaState"):
|
||||
pass
|
||||
@abstractmethod
|
||||
def __setitem__(self, key: Sequence[int], value: "LlamaState") -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LlamaRAMCache(LlamaCache):
|
||||
class LlamaRAMCache(BaseLlamaCache):
|
||||
"""Cache for a llama.cpp model using RAM."""
|
||||
|
||||
def __init__(self, capacity_bytes: int = (2 << 30)):
|
||||
|
@ -97,32 +101,38 @@ class LlamaRAMCache(LlamaCache):
|
|||
if key in self.cache_state:
|
||||
del self.cache_state[key]
|
||||
self.cache_state[key] = value
|
||||
while self.cache_size > self.capacity_bytes:
|
||||
while self.cache_size > self.capacity_bytes and len(self.cache_state) > 0:
|
||||
self.cache_state.popitem(last=False)
|
||||
|
||||
|
||||
class LlamaDiskCache(LlamaCache):
|
||||
# Alias for backwards compatibility
|
||||
LlamaCache = LlamaRAMCache
|
||||
|
||||
|
||||
class LlamaDiskCache(BaseLlamaCache):
|
||||
"""Cache for a llama.cpp model using disk."""
|
||||
|
||||
def __init__(self, cache_dir="./llama_cache", capacity_bytes: int = (2 << 30)):
|
||||
def __init__(
|
||||
self, cache_dir: str = ".cache/llama_cache", capacity_bytes: int = (2 << 30)
|
||||
):
|
||||
super().__init__(capacity_bytes)
|
||||
self.cache = diskcache.Cache(cache_dir)
|
||||
|
||||
@property
|
||||
def cache_size(self):
|
||||
return self.cache.volume()
|
||||
return int(self.cache.volume()) # type: ignore
|
||||
|
||||
def _find_longest_prefix_key(
|
||||
self,
|
||||
key: Tuple[int, ...],
|
||||
) -> Optional[Tuple[int, ...]]:
|
||||
min_len = 0
|
||||
min_key = None
|
||||
for k in self.cache.iterkeys():
|
||||
min_key: Optional[Tuple[int, ...]] = None
|
||||
for k in self.cache.iterkeys(): # type: ignore
|
||||
prefix_len = Llama.longest_token_prefix(k, key)
|
||||
if prefix_len > min_len:
|
||||
min_len = prefix_len
|
||||
min_key = k
|
||||
min_key = k # type: ignore
|
||||
return min_key
|
||||
|
||||
def __getitem__(self, key: Sequence[int]) -> "LlamaState":
|
||||
|
@ -130,18 +140,25 @@ class LlamaDiskCache(LlamaCache):
|
|||
_key = self._find_longest_prefix_key(key)
|
||||
if _key is None:
|
||||
raise KeyError("Key not found")
|
||||
value = self.cache.pop(_key)
|
||||
self.cache.push(_key)
|
||||
value: "LlamaState" = self.cache.pop(_key) # type: ignore
|
||||
self.cache.push(_key, side="front") # type: ignore
|
||||
return value
|
||||
|
||||
def __contains__(self, key: Sequence[int]) -> bool:
|
||||
return self._find_longest_prefix_key(tuple(key)) is not None
|
||||
|
||||
def __setitem__(self, key: Sequence[int], value: "LlamaState"):
|
||||
print("LlamaDiskCache.__setitem__: called", file=sys.stderr)
|
||||
key = tuple(key)
|
||||
if key in self.cache:
|
||||
print("LlamaDiskCache.__setitem__: delete", file=sys.stderr)
|
||||
del self.cache[key]
|
||||
self.cache[key] = value
|
||||
while self.cache_size > self.capacity_bytes:
|
||||
print("LlamaDiskCache.__setitem__: set", file=sys.stderr)
|
||||
while self.cache_size > self.capacity_bytes and len(self.cache) > 0:
|
||||
key_to_remove = next(iter(self.cache))
|
||||
del self.cache[key_to_remove]
|
||||
print("LlamaDiskCache.__setitem__: trim", file=sys.stderr)
|
||||
|
||||
|
||||
class LlamaState:
|
||||
|
@ -249,7 +266,7 @@ class Llama:
|
|||
self.eval_tokens: Deque[int] = deque(maxlen=n_ctx)
|
||||
self.eval_logits: Deque[List[float]] = deque(maxlen=n_ctx if logits_all else 1)
|
||||
|
||||
self.cache: Optional[LlamaCache] = None
|
||||
self.cache: Optional[BaseLlamaCache] = None
|
||||
|
||||
self.n_threads = n_threads or max(multiprocessing.cpu_count() // 2, 1)
|
||||
|
||||
|
@ -363,7 +380,7 @@ class Llama:
|
|||
)
|
||||
return output
|
||||
|
||||
def set_cache(self, cache: Optional[LlamaCache]):
|
||||
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
||||
"""Set the cache.
|
||||
|
||||
Args:
|
||||
|
@ -387,7 +404,7 @@ class Llama:
|
|||
assert self.ctx is not None
|
||||
n_ctx = self._n_ctx
|
||||
for i in range(0, len(tokens), self.n_batch):
|
||||
batch = tokens[i: min(len(tokens), i + self.n_batch)]
|
||||
batch = tokens[i : min(len(tokens), i + self.n_batch)]
|
||||
n_past = min(n_ctx - len(batch), len(self._input_ids))
|
||||
n_tokens = len(batch)
|
||||
return_code = llama_cpp.llama_eval(
|
||||
|
@ -409,7 +426,7 @@ class Llama:
|
|||
n_vocab = self._n_vocab
|
||||
cols = n_vocab
|
||||
logits_view = llama_cpp.llama_get_logits(self.ctx)
|
||||
logits = [logits_view[i * cols: (i + 1) * cols] for i in range(rows)]
|
||||
logits = [logits_view[i * cols : (i + 1) * cols] for i in range(rows)]
|
||||
self.eval_logits.extend(logits)
|
||||
self._scores: npt.NDArray[np.single] = np.concatenate(
|
||||
(self._scores, np.array(logits, dtype=np.single)), axis=0
|
||||
|
@ -578,7 +595,7 @@ class Llama:
|
|||
assert self.ctx is not None
|
||||
last_n_tokens_data = [llama_cpp.llama_token(0)] * max(
|
||||
0, self.last_n_tokens_size - len(self._input_ids)
|
||||
) + self._input_ids[-self.last_n_tokens_size:].tolist()
|
||||
) + self._input_ids[-self.last_n_tokens_size :].tolist()
|
||||
return self._sample(
|
||||
last_n_tokens_data=(llama_cpp.llama_token * self.last_n_tokens_size)(
|
||||
*last_n_tokens_data
|
||||
|
@ -1049,6 +1066,7 @@ class Llama:
|
|||
if self.verbose:
|
||||
print("Llama._create_completion: cache save", file=sys.stderr)
|
||||
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
||||
print("Llama._create_completion: cache saved", file=sys.stderr)
|
||||
return
|
||||
|
||||
if self.cache:
|
||||
|
@ -1453,9 +1471,17 @@ class Llama:
|
|||
|
||||
def save_state(self) -> LlamaState:
|
||||
assert self.ctx is not None
|
||||
if self.verbose:
|
||||
print("Llama.save_state: saving llama state", file=sys.stderr)
|
||||
state_size = llama_cpp.llama_get_state_size(self.ctx)
|
||||
if self.verbose:
|
||||
print(f"Llama.save_state: got state size: {state_size}", file=sys.stderr)
|
||||
llama_state = (llama_cpp.c_uint8 * int(state_size))()
|
||||
if self.verbose:
|
||||
print("Llama.save_state: allocated state", file=sys.stderr)
|
||||
n_bytes = llama_cpp.llama_copy_state_data(self.ctx, llama_state)
|
||||
if self.verbose:
|
||||
print(f"Llama.save_state: copied llama state: {n_bytes}", file=sys.stderr)
|
||||
if int(n_bytes) > int(state_size):
|
||||
raise RuntimeError("Failed to copy llama state data")
|
||||
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
|
||||
|
|
|
@ -58,6 +58,10 @@ class Settings(BaseSettings):
|
|||
default=False,
|
||||
description="Use a cache to reduce processing times for evaluated prompts.",
|
||||
)
|
||||
cache_type: Literal["ram", "disk"] = Field(
|
||||
default="ram",
|
||||
description="The type of cache to use. Only used if cache is True.",
|
||||
)
|
||||
cache_size: int = Field(
|
||||
default=2 << 30,
|
||||
description="The size of the cache in bytes. Only used if cache is True.",
|
||||
|
@ -108,6 +112,11 @@ def create_app(settings: Optional[Settings] = None):
|
|||
verbose=settings.verbose,
|
||||
)
|
||||
if settings.cache:
|
||||
if settings.cache_type == "disk":
|
||||
cache = llama_cpp.LlamaDiskCache(capacity_bytes=settings.cache_size)
|
||||
else:
|
||||
cache = llama_cpp.LlamaRAMCache(capacity_bytes=settings.cache_size)
|
||||
|
||||
cache = llama_cpp.LlamaCache(capacity_bytes=settings.cache_size)
|
||||
llama.set_cache(cache)
|
||||
|
||||
|
|
Loading…
Reference in a new issue