Refactor Llama class internals
This commit is contained in:
parent
bbffdaebaa
commit
e214a58422
2 changed files with 641 additions and 312 deletions
File diff suppressed because it is too large
Load diff
|
@ -8,7 +8,7 @@ def test_llama_cpp_tokenization():
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, verbose=False)
|
||||||
|
|
||||||
assert llama
|
assert llama
|
||||||
assert llama.ctx is not None
|
assert llama._ctx.ctx is not None
|
||||||
|
|
||||||
text = b"Hello World"
|
text = b"Hello World"
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ def test_llama_cpp_tokenization():
|
||||||
|
|
||||||
def test_llama_patch(monkeypatch):
|
def test_llama_patch(monkeypatch):
|
||||||
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True)
|
||||||
n_vocab = llama_cpp.llama_n_vocab(llama.model)
|
n_vocab = llama_cpp.llama_n_vocab(llama._model.model)
|
||||||
|
|
||||||
## Set up mock function
|
## Set up mock function
|
||||||
def mock_eval(*args, **kwargs):
|
def mock_eval(*args, **kwargs):
|
||||||
|
|
Loading…
Reference in a new issue