Re-enable cache
This commit is contained in:
parent
6639371407
commit
21acd7901f
1 changed files with 3 additions and 5 deletions
|
@ -831,9 +831,7 @@ class Llama:
|
||||||
"logprobs is not supported for models created with logits_all=False"
|
"logprobs is not supported for models created with logits_all=False"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Temporarily disable usage of the cache
|
if self.cache:
|
||||||
# See: https://github.com/abetlen/llama-cpp-python/issues/348#issuecomment-1583072408
|
|
||||||
if self.cache and False:
|
|
||||||
try:
|
try:
|
||||||
cache_item = self.cache[prompt_tokens]
|
cache_item = self.cache[prompt_tokens]
|
||||||
cache_prefix_len = Llama.longest_token_prefix(
|
cache_prefix_len = Llama.longest_token_prefix(
|
||||||
|
@ -1071,14 +1069,14 @@ class Llama:
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
if self.cache and False:
|
if self.cache:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("Llama._create_completion: cache save", file=sys.stderr)
|
print("Llama._create_completion: cache save", file=sys.stderr)
|
||||||
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
||||||
print("Llama._create_completion: cache saved", file=sys.stderr)
|
print("Llama._create_completion: cache saved", file=sys.stderr)
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.cache and False:
|
if self.cache:
|
||||||
if self.verbose:
|
if self.verbose:
|
||||||
print("Llama._create_completion: cache save", file=sys.stderr)
|
print("Llama._create_completion: cache save", file=sys.stderr)
|
||||||
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
self.cache[prompt_tokens + completion_tokens] = self.save_state()
|
||||||
|
|
Loading…
Reference in a new issue