feat: Update llama.cpp

This commit is contained in:
Andrei Betlen 2024-04-29 23:34:55 -04:00
parent df2b5b5d44
commit 97fb860eba
3 changed files with 23 additions and 2 deletions

View file

@ -284,6 +284,27 @@ LLAMA_VOCAB_TYPE_WPM = 3
"""BERT tokenizer based on WordPiece"""
# // pre-tokenization types
# enum llama_vocab_pre_type {
# LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0,
# LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1,
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2,
# LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3,
# LLAMA_VOCAB_PRE_TYPE_FALCON = 4,
# LLAMA_VOCAB_PRE_TYPE_MPT = 5,
# LLAMA_VOCAB_PRE_TYPE_STARCODER = 6,
# LLAMA_VOCAB_PRE_TYPE_GPT2 = 7,
# };
LLAMA_VOCAB_PRE_TYPE_DEFAULT = 0
LLAMA_VOCAB_PRE_TYPE_LLAMA3 = 1
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM = 2
LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_CODER = 3
LLAMA_VOCAB_PRE_TYPE_FALCON = 4
LLAMA_VOCAB_PRE_TYPE_MPT = 5
LLAMA_VOCAB_PRE_TYPE_STARCODER = 6
LLAMA_VOCAB_PRE_TYPE_GPT2 = 7
# // note: these values should be synchronized with ggml_rope
# // TODO: maybe move this enum to ggml.h (ggml_rope_type)
# enum llama_rope_type {

View file

@ -6,7 +6,7 @@ from scipy.special import log_softmax
import llama_cpp
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama.gguf"
MODEL = "./vendor/llama.cpp/models/ggml-vocab-llama-spm.gguf"
def test_llama_cpp_tokenization():

2
vendor/llama.cpp vendored

@ -1 +1 @@
Subproject commit 4dba7e8114d84241c842b986e008af8b88d1a019
Subproject commit 8843a98c2ba97a25e93319a104f9ddfaf83ce4c4