Strip leading space when de-tokenizing.
This commit is contained in:
parent
c2d1deaa8a
commit
8ac59465b9
2 changed files with 21 additions and 9 deletions
|
@ -445,17 +445,17 @@ class Llama:
|
|||
"""
|
||||
assert self.ctx is not None
|
||||
output = b""
|
||||
buffer_size = 32
|
||||
buffer_size = 8
|
||||
buffer = (ctypes.c_char * buffer_size)()
|
||||
for token in tokens:
|
||||
if token == llama_cpp.llama_token_bos(self.ctx):
|
||||
continue
|
||||
n = llama_cpp.llama_token_to_str(
|
||||
self.ctx, llama_cpp.llama_token(token), buffer, buffer_size
|
||||
)
|
||||
assert n <= buffer_size
|
||||
output += bytes(buffer[:n])
|
||||
return output
|
||||
# NOTE: Llama1 models automatically added a space at the start of the prompt
|
||||
# this line removes a leading space if the first token is a beginning of sentence token
|
||||
return output[1:] if len(tokens) > 0 and tokens[0] == self.token_bos() else output
|
||||
|
||||
def set_cache(self, cache: Optional[BaseLlamaCache]):
|
||||
"""Set the cache.
|
||||
|
@ -886,7 +886,7 @@ class Llama:
|
|||
created: int = int(time.time())
|
||||
completion_tokens: List[int] = []
|
||||
# Add blank space to start of prompt to match OG llama tokenizer
|
||||
prompt_tokens: List[int] = self.tokenize(b" " + prompt.encode("utf-8"))
|
||||
prompt_tokens: List[int] = self.tokenize(prompt.encode("utf-8")) if prompt != "" else [self.token_bos()]
|
||||
text: bytes = b""
|
||||
returned_tokens: int = 0
|
||||
stop = (
|
||||
|
|
|
@ -1,20 +1,32 @@
|
|||
import pytest
|
||||
import llama_cpp
|
||||
|
||||
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf"
|
||||
|
||||
|
||||
def test_llama():
|
||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
||||
def test_llama_cpp_tokenization():
|
||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
|
||||
|
||||
assert llama
|
||||
assert llama.ctx is not None
|
||||
|
||||
text = b"Hello World"
|
||||
|
||||
assert llama.detokenize(llama.tokenize(text)) == text
|
||||
tokens = llama.tokenize(text)
|
||||
assert tokens[0] == llama.token_bos()
|
||||
assert tokens == [1, 15043, 2787]
|
||||
detokenized = llama.detokenize(tokens)
|
||||
assert detokenized == text
|
||||
|
||||
tokens = llama.tokenize(text, add_bos=False)
|
||||
assert tokens[0] != llama.token_bos()
|
||||
assert tokens == [15043, 2787]
|
||||
|
||||
detokenized = llama.detokenize(tokens)
|
||||
assert detokenized != text
|
||||
|
||||
|
||||
# @pytest.mark.skip(reason="need to update sample mocking")
|
||||
@pytest.mark.skip(reason="bug in tokenization where leading space is always inserted even if not after eos")
|
||||
def test_llama_patch(monkeypatch):
|
||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
||||
n_vocab = llama_cpp.llama_n_vocab(llama.ctx)
|
||||
|
|
Loading…
Reference in a new issue