llama.cpp/tests/test_llama.py

199 lines
5.9 KiB
Python
Raw Normal View History

2023-11-10 10:39:42 +00:00
import ctypes
import pytest
2023-11-10 10:39:42 +00:00
2023-04-05 07:23:15 +00:00
import llama_cpp
2023-08-24 05:01:20 +00:00
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf"
2023-04-05 07:23:15 +00:00
def test_llama_cpp_tokenization():
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
2023-04-05 07:23:15 +00:00
assert llama
2023-11-06 14:16:36 +00:00
assert llama._ctx.ctx is not None
2023-04-05 07:23:15 +00:00
text = b"Hello World"
tokens = llama.tokenize(text)
assert tokens[0] == llama.token_bos()
2023-08-27 16:59:20 +00:00
assert tokens == [1, 15043, 2787]
detokenized = llama.detokenize(tokens)
assert detokenized == text
tokens = llama.tokenize(text, add_bos=False)
assert tokens[0] != llama.token_bos()
2023-08-27 16:59:20 +00:00
assert tokens == [15043, 2787]
detokenized = llama.detokenize(tokens)
2023-08-27 16:59:20 +00:00
assert detokenized != text
2023-04-05 07:23:15 +00:00
text = b"Hello World</s>"
tokens = llama.tokenize(text)
assert tokens[-1] != llama.token_eos()
assert tokens == [1, 15043, 2787, 829, 29879, 29958]
tokens = llama.tokenize(text, special=True)
assert tokens[-1] == llama.token_eos()
assert tokens == [1, 10994, 2787, 2]
2023-04-05 07:23:15 +00:00
def test_llama_patch(monkeypatch):
2023-11-10 10:39:42 +00:00
n_ctx = 128
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, n_ctx=n_ctx)
2023-11-06 14:16:36 +00:00
n_vocab = llama_cpp.llama_n_vocab(llama._model.model)
2023-11-10 10:39:42 +00:00
assert n_vocab == 32000
2023-04-05 07:23:15 +00:00
## Set up mock function
2023-11-10 10:39:42 +00:00
def mock_decode(*args, **kwargs):
2023-04-05 07:23:15 +00:00
return 0
2023-05-02 02:38:46 +00:00
2023-05-01 19:46:03 +00:00
def mock_get_logits(*args, **kwargs):
2023-11-10 10:39:42 +00:00
size = n_vocab * n_ctx
return (llama_cpp.c_float * size)()
2023-04-05 07:23:15 +00:00
2023-11-10 10:39:42 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_decode", mock_decode)
2023-05-01 19:46:03 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_get_logits", mock_get_logits)
2023-04-05 07:23:15 +00:00
output_text = " jumps over the lazy dog."
2023-10-19 06:55:29 +00:00
output_tokens = llama.tokenize(output_text.encode("utf-8"), add_bos=False, special=True)
2023-04-05 07:23:15 +00:00
token_eos = llama.token_eos()
n = 0
def mock_sample(*args, **kwargs):
nonlocal n
if n < len(output_tokens):
n += 1
return output_tokens[n - 1]
else:
return token_eos
2023-05-01 19:46:03 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_sample_token", mock_sample)
2023-04-05 07:23:15 +00:00
text = "The quick brown fox"
## Test basic completion until eos
n = 0 # reset
completion = llama.create_completion(text, max_tokens=20)
assert completion["choices"][0]["text"] == output_text
assert completion["choices"][0]["finish_reason"] == "stop"
## Test streaming completion until eos
n = 0 # reset
2023-10-19 06:55:29 +00:00
chunks = list(llama.create_completion(text, max_tokens=20, stream=True))
2023-04-05 07:23:15 +00:00
assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == output_text
2023-10-19 06:56:45 +00:00
assert chunks[-1]["choices"][0]["finish_reason"] == "stop"
2023-04-05 07:23:15 +00:00
## Test basic completion until stop sequence
n = 0 # reset
completion = llama.create_completion(text, max_tokens=20, stop=["lazy"])
assert completion["choices"][0]["text"] == " jumps over the "
assert completion["choices"][0]["finish_reason"] == "stop"
## Test streaming completion until stop sequence
n = 0 # reset
2023-10-19 06:55:29 +00:00
chunks = list(llama.create_completion(text, max_tokens=20, stream=True, stop=["lazy"]))
2023-04-05 07:23:15 +00:00
assert (
"".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps over the "
)
2023-10-19 06:56:45 +00:00
assert chunks[-1]["choices"][0]["finish_reason"] == "stop"
2023-04-05 07:23:15 +00:00
## Test basic completion until length
n = 0 # reset
completion = llama.create_completion(text, max_tokens=2)
2023-10-19 06:55:29 +00:00
assert completion["choices"][0]["text"] == " jumps"
2023-10-19 06:56:45 +00:00
assert completion["choices"][0]["finish_reason"] == "length"
2023-04-05 07:23:15 +00:00
## Test streaming completion until length
n = 0 # reset
2023-10-19 06:55:29 +00:00
chunks = list(llama.create_completion(text, max_tokens=2, stream=True))
assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps"
2023-10-19 06:56:45 +00:00
assert chunks[-1]["choices"][0]["finish_reason"] == "length"
def test_llama_pickle():
import pickle
import tempfile
2023-05-02 02:38:46 +00:00
fp = tempfile.TemporaryFile()
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
pickle.dump(llama, fp)
fp.seek(0)
llama = pickle.load(fp)
assert llama
assert llama.ctx is not None
text = b"Hello World"
assert llama.detokenize(llama.tokenize(text)) == text
2023-05-02 02:38:46 +00:00
def test_utf8(monkeypatch):
2023-11-10 10:39:42 +00:00
n_ctx = 512
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, n_ctx=n_ctx, logits_all=True)
2023-09-29 02:42:03 +00:00
n_vocab = llama.n_vocab()
## Set up mock function
2023-11-10 10:39:42 +00:00
def mock_decode(*args, **kwargs):
return 0
2023-05-01 19:46:03 +00:00
def mock_get_logits(*args, **kwargs):
2023-11-10 10:39:42 +00:00
size = n_vocab * n_ctx
return (llama_cpp.c_float * size)()
2023-05-01 19:46:03 +00:00
2023-11-10 10:39:42 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_decode", mock_decode)
2023-05-01 19:46:03 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_get_logits", mock_get_logits)
output_text = "😀"
output_tokens = llama.tokenize(output_text.encode("utf-8"))
token_eos = llama.token_eos()
n = 0
def mock_sample(*args, **kwargs):
nonlocal n
if n < len(output_tokens):
n += 1
return output_tokens[n - 1]
else:
return token_eos
2023-05-01 19:46:03 +00:00
monkeypatch.setattr("llama_cpp.llama_cpp.llama_sample_token", mock_sample)
## Test basic completion with utf8 multibyte
n = 0 # reset
completion = llama.create_completion("", max_tokens=4)
assert completion["choices"][0]["text"] == output_text
## Test basic completion with incomplete utf8 multibyte
n = 0 # reset
completion = llama.create_completion("", max_tokens=1)
assert completion["choices"][0]["text"] == ""
2023-04-29 06:26:07 +00:00
def test_llama_server():
from fastapi.testclient import TestClient
2023-05-02 02:38:46 +00:00
from llama_cpp.server.app import create_app, Settings
2023-05-02 02:41:54 +00:00
settings = Settings(
model=MODEL,
vocab_only=True,
)
2023-05-02 02:38:46 +00:00
app = create_app(settings)
2023-04-29 06:26:07 +00:00
client = TestClient(app)
response = client.get("/v1/models")
assert response.json() == {
"object": "list",
"data": [
{
"id": MODEL,
"object": "model",
"owned_by": "me",
"permissions": [],
}
],
}
def test_llama_cpp_version():
assert llama_cpp.__version__