From 327eedbfe16d950fc32d6e7a5573fd7f81cbbc74 Mon Sep 17 00:00:00 2001 From: Don Mahurin <@> Date: Mon, 22 May 2023 23:56:25 -0700 Subject: [PATCH 1/4] fix "from_bytes() missing required argument 'byteorder'" --- examples/low_level_api/low_level_api_chat_cpp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/low_level_api/low_level_api_chat_cpp.py b/examples/low_level_api/low_level_api_chat_cpp.py index 8773cb1..0e7cbe9 100644 --- a/examples/low_level_api/low_level_api_chat_cpp.py +++ b/examples/low_level_api/low_level_api_chat_cpp.py @@ -493,7 +493,7 @@ n_keep = {self.params.n_keep} # Contains multi-byte UTF8 for num, pattern in [(2, 192), (3, 224), (4, 240)]: # Bitwise AND check - if pattern & int.from_bytes(cur_char) == pattern: + if pattern & int.from_bytes(cur_char, 'little') == pattern: self.multibyte_fix = [cur_char] + ([None] * (num-1)) # Stop incomplete bytes from passing From d6a7adb17aa84b0be9a48e59a4d18295166bbfef Mon Sep 17 00:00:00 2001 From: Don Mahurin <@> Date: Mon, 22 May 2023 23:54:57 -0700 Subject: [PATCH 2/4] fix "missing 1 required positional argument: 'min_keep'" --- examples/low_level_api/low_level_api_chat_cpp.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/low_level_api/low_level_api_chat_cpp.py b/examples/low_level_api/low_level_api_chat_cpp.py index 8773cb1..a0736fe 100644 --- a/examples/low_level_api/low_level_api_chat_cpp.py +++ b/examples/low_level_api/low_level_api_chat_cpp.py @@ -368,10 +368,10 @@ n_keep = {self.params.n_keep} id = llama_cpp.llama_sample_token_mirostat_v2(self.ctx, candidates_p, llama_cpp.c_float(self.params.mirostat_tau), llama_cpp.c_float(self.params.mirostat_eta), llama_cpp.c_float(mirostat_mu)) else: # Temperature sampling - llama_cpp.llama_sample_top_k(self.ctx, candidates_p, top_k) - llama_cpp.llama_sample_tail_free(self.ctx, candidates_p, llama_cpp.c_float(self.params.tfs_z)) - llama_cpp.llama_sample_typical(self.ctx, candidates_p, llama_cpp.c_float(self.params.typical_p)) - llama_cpp.llama_sample_top_p(self.ctx, candidates_p, llama_cpp.c_float(self.params.top_p)) + llama_cpp.llama_sample_top_k(self.ctx, candidates_p, top_k, min_keep=llama_cpp.c_size_t(1)) + llama_cpp.llama_sample_tail_free(self.ctx, candidates_p, llama_cpp.c_float(self.params.tfs_z), min_keep=llama_cpp.c_size_t(1)) + llama_cpp.llama_sample_typical(self.ctx, candidates_p, llama_cpp.c_float(self.params.typical_p), min_keep=llama_cpp.c_size_t(1)) + llama_cpp.llama_sample_top_p(self.ctx, candidates_p, llama_cpp.c_float(self.params.top_p), min_keep=llama_cpp.c_size_t(1)) llama_cpp.llama_sample_temperature(self.ctx, candidates_p, llama_cpp.c_float(self.params.temp)) id = llama_cpp.llama_sample_token(self.ctx, candidates_p) # print("`{}`".format(candidates_p.size)) From 0fa2ec490396447afa2fc7da3dc5033759c888a6 Mon Sep 17 00:00:00 2001 From: Don Mahurin <@> Date: Fri, 26 May 2023 06:35:15 -0700 Subject: [PATCH 3/4] low_level_api_chat_cpp.py: Fix missing antiprompt output in chat. --- examples/low_level_api/low_level_api_chat_cpp.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/low_level_api/low_level_api_chat_cpp.py b/examples/low_level_api/low_level_api_chat_cpp.py index 8773cb1..756609e 100644 --- a/examples/low_level_api/low_level_api_chat_cpp.py +++ b/examples/low_level_api/low_level_api_chat_cpp.py @@ -382,12 +382,15 @@ n_keep = {self.params.n_keep} # replace end of text token with newline token when in interactive mode if (id == llama_cpp.llama_token_eos() and self.params.interactive and not self.params.instruct): id = self.llama_token_newline[0] + self.embd.append(id) if (self.use_antiprompt()): # tokenize and inject first reverse prompt self.embd_inp += self.first_antiprompt[0] - - # add it to the context - self.embd.append(id) + for id in self.first_antiprompt[0]: + self.embd.append(id) + else: + # add it to the context + self.embd.append(id) # echo this to console self.output_echo = True From 6075e17cb6c74cc48369a65cc742dda607ebc43f Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Fri, 26 May 2023 17:21:51 -0400 Subject: [PATCH 4/4] Bump version --- pyproject.toml | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9633ffc..895a644 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "llama_cpp_python" -version = "0.1.54" +version = "0.1.55" description = "Python bindings for the llama.cpp library" authors = ["Andrei Betlen "] license = "MIT" diff --git a/setup.py b/setup.py index bd7192f..2136d8d 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ setup( description="A Python wrapper for llama.cpp", long_description=long_description, long_description_content_type="text/markdown", - version="0.1.54", + version="0.1.55", author="Andrei Betlen", author_email="abetlen@gmail.com", license="MIT",