From f1615f05e6032bf79d4dc0c683a518ed39aca55a Mon Sep 17 00:00:00 2001 From: Mug <> Date: Mon, 3 Apr 2023 22:54:46 +0200 Subject: [PATCH 01/10] Chat llama.cpp example implementation --- examples/low_level_api_chatllama_cpp.py | 235 ++++++++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100644 examples/low_level_api_chatllama_cpp.py diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api_chatllama_cpp.py new file mode 100644 index 0000000..a244867 --- /dev/null +++ b/examples/low_level_api_chatllama_cpp.py @@ -0,0 +1,235 @@ +""" +This is an example implementation of main.cpp from llama.cpp +Quirks: + * Its not exactly alike since this port is designed around programmatic I/O + * Input is always echoed if on, so it should be turned off when using "input()" + * The first antiprompt should be the userprompt like "\nUser:", + because its added when n_predict is reached (aka generation ended prematurely) + * n_predict can be set to -1 for unlimited length responses +""" +import llama_cpp + +def toIntArray(lst): + return [int(i) for i in lst] + +# A LLaMA interactive session +class LLaMAInteract: + def __init__(self, + primer: str="", + model: str="./models/30B/ggml-model-q4_0.bin", + n_ctx: int=1024, + seed: int=0, + n_threads: int=8, + antiprompt: list[str]=[], + input_echo: bool=True, + n_predict: int=20, + n_batch: int=8, + repeat_last_n: int=64, + top_k: int=50, + top_p: float=1., + temp: float=1.0, + repeat_penalty: float=1, + ) -> None: + # input args + self.n_threads = n_threads + self.input_echo = input_echo + self.n_predict = n_predict + self.n_batch = n_batch + self.repeat_last_n = repeat_last_n + self.top_k=top_k + self.top_p=top_p + self.temp=temp + self.repeat_penalty=repeat_penalty + self.n_ctx = n_ctx + self.seed = seed + + # runtime args + self.input_consumed = 0 + self.embd = [] + self.embd_inp = [] + self.n_past = 0 + self.first_antiprompt = [] + self.remaining_tokens = self.n_predict + self.output_echo = input_echo + + # model load + self.lparams = llama_cpp.llama_context_default_params() + self.lparams.n_ctx = self.n_ctx + self.lparams.seed = self.seed + self.ctx = llama_cpp.llama_init_from_file(model.encode("utf8"), self.lparams) + + # determine the required inference memory per token: + tmp = [0, 1, 2, 3] + llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) + + # determine newline token + self.llama_token_newline = (llama_cpp.llama_token * 1)() + llama_cpp.llama_tokenize(self.ctx, b"\n", self.llama_token_newline, len(self.llama_token_newline), False) + self.llama_token_newline = toIntArray(self.llama_token_newline) + + # primer feed + if (len(primer) > 0): + self.input(primer) + self.n_keep = len(self.embd_inp) + + # create internal context + self.n_ctx = int(llama_cpp.llama_n_ctx(self.ctx)) + self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices + + # determine antiprompt tokens + for i in antiprompt: + d_antiprompt = (llama_cpp.llama_token * (len(i) + 1))() + n_antiprompt = llama_cpp.llama_tokenize(self.ctx, i.encode("utf8"), d_antiprompt, len(d_antiprompt), False) + self.first_antiprompt.append(toIntArray(d_antiprompt[:n_antiprompt])) + + # if an antiprompt is present + def use_antiprompt(self): + return len(self.first_antiprompt) > 0 + + def generate(self): + while self.remaining_tokens > 0 or self.use_antiprompt(): + # predict + if len(self.embd) > 0: + # infinite text generation via context swapping + # if we run out of context: + # - take the n_keep first tokens from the original prompt (via n_past) + # - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch + if (self.n_past + len(self.embd) > self.n_ctx): + n_left = self.n_past - self.n_keep + self.n_past = self.n_keep + + # insert n_left/2 tokens at the start of embd from last_n_tokens + _insert = self.last_n_tokens[ + -(int(n_left/2) - len(self.embd)):-len(self.embd) + ] + self.embd[:len(_insert)] = _insert + #TODO: Still untested + + if (llama_cpp.llama_eval( + self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.n_threads + ) != 0): + raise Exception("Failed to llama_eval!") + + self.n_past += len(self.embd) + self.embd = [] + if len(self.embd_inp) <= self.input_consumed: + # out of user input, sample next token + _arr = self.last_n_tokens[-min(self.repeat_last_n, self.n_past):] + id = llama_cpp.llama_sample_top_p_top_k( + self.ctx, + (llama_cpp.llama_token * len(_arr))(*_arr), + len(_arr), + self.top_k, + self.top_p, + self.temp, + self.repeat_penalty, + ) + self.last_n_tokens.pop(0) + self.last_n_tokens.append(int(id)) + + # replace end of text token with newline token when in interactive mode + if (id == llama_cpp.llama_token_eos() and self.use_antiprompt()): + id = self.llama_token_newline[0] + # tokenize and inject first reverse prompt + self.embd_inp += self.first_antiprompt[0] + + # add it to the context + self.embd.append(int(id)) + + # echo this to console + self.output_echo = True + + # decrement remaining sampling budget + self.remaining_tokens -= 1 + else: + # output to console if input echo is on + self.output_echo = self.input_echo + + # some user input remains from prompt or interaction, forward it to processing + while len(self.embd_inp) > self.input_consumed: + self.embd.append(int(self.embd_inp[self.input_consumed])) + self.last_n_tokens.pop(0) + self.last_n_tokens.append(int(self.embd_inp[self.input_consumed])) + self.input_consumed += 1 + if len(self.embd) >= self.n_batch: + break + + # display tokens + if self.output_echo: + for id in self.embd: + yield id + + # if antiprompt is present, stop + if (self.use_antiprompt() and len(self.embd_inp) <= self.input_consumed): + for i in self.first_antiprompt: + if i == self.last_n_tokens[-len(i):]: + return + + # if end of generation + if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(): + break + + # respect n_predict even if antiprompt is present + if (self.use_antiprompt() and self.remaining_tokens <= 0 and self.n_predict != -1): + self.embd_inp += self.first_antiprompt[0] + break + + def past(self): + for id in self.last_n_tokens[-self.n_past:]: + yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") + + def input(self, prompt: str): + embd_arr = (llama_cpp.llama_token * (len(prompt) + 1))() + n_of_tok = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), embd_arr, len(embd_arr), True) + self.embd_inp += toIntArray(embd_arr[:n_of_tok]) + + def output(self): + self.remaining_tokens = self.n_predict + for id in self.generate(): + yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") + +if __name__ == "__main__": + from datetime import datetime + + USER_NAME="User" + AI_NAME="ChatLLaMa" + + time_now = datetime.now() + prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. +{AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}’s requests immediately and with details and precision. +There are no annotations like (30 seconds passed...) or (to himself), just what {USER_NAME} and {AI_NAME} say aloud to each other. +The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. +The transcript only includes text, it does not include markup like HTML and Markdown. + +{USER_NAME}: Hello, {AI_NAME}! +{AI_NAME}: Hello {USER_NAME}! How may I help you today? +{USER_NAME}: What time is it? +{AI_NAME}: It is {time_now.strftime("%H:%M")}. +{USER_NAME}: What year is it? +{AI_NAME}: We are in {time_now.strftime("%Y")}. +{USER_NAME}: What is a cat? +{AI_NAME}: A cat is a domestic species of small carnivorous mammal. It is the only domesticated species in the family Felidae. +{USER_NAME}: Name a color. +{AI_NAME}: Blue +{USER_NAME}:""" + + print("Loading model...") + ll = LLaMAInteract(prompt, + model="./models/30B/ggml-model-q4_0.bin", + n_ctx=2048, + antiprompt=[f"\n{USER_NAME}:"], + repeat_last_n=256, + n_predict=2048, + temp=0.7, top_p=0.5, top_k=40, repeat_penalty=1.17647 + ) + print("Loaded model!") + + for i in ll.output(): + print(i,end="",flush=True) + ll.input_echo = False + + inp = lambda x: f" {x}\n" + while True: + ll.input(inp(input(' '))) + for i in ll.output(): + print(i,end="",flush=True) \ No newline at end of file From 0b32bb3d43638b8cd606df0c83f89fdcede7ed1c Mon Sep 17 00:00:00 2001 From: Mug <> Date: Tue, 4 Apr 2023 11:48:48 +0200 Subject: [PATCH 02/10] Add instruction mode --- examples/low_level_api_chatllama_cpp.py | 101 +++++++++++++++--------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api_chatllama_cpp.py index a244867..6462121 100644 --- a/examples/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api_chatllama_cpp.py @@ -5,24 +5,26 @@ Quirks: * Input is always echoed if on, so it should be turned off when using "input()" * The first antiprompt should be the userprompt like "\nUser:", because its added when n_predict is reached (aka generation ended prematurely) - * n_predict can be set to -1 for unlimited length responses + * n_predict can be set to -1 for unlimited length responses (or just a really high value) + * It's always in interactive mode, generation ends either by reaching an antiprompt + or running out of n_predict. + * Instruction mode adds its own antiprompt """ import llama_cpp -def toIntArray(lst): - return [int(i) for i in lst] - # A LLaMA interactive session class LLaMAInteract: def __init__(self, primer: str="", model: str="./models/30B/ggml-model-q4_0.bin", + instruct: bool=False, n_ctx: int=1024, seed: int=0, n_threads: int=8, antiprompt: list[str]=[], input_echo: bool=True, n_predict: int=20, + n_keep: int=0, n_batch: int=8, repeat_last_n: int=64, top_k: int=50, @@ -31,17 +33,17 @@ class LLaMAInteract: repeat_penalty: float=1, ) -> None: # input args + self.instruct = instruct self.n_threads = n_threads self.input_echo = input_echo self.n_predict = n_predict + self.n_keep = n_keep self.n_batch = n_batch self.repeat_last_n = repeat_last_n self.top_k=top_k self.top_p=top_p self.temp=temp self.repeat_penalty=repeat_penalty - self.n_ctx = n_ctx - self.seed = seed # runtime args self.input_consumed = 0 @@ -54,8 +56,8 @@ class LLaMAInteract: # model load self.lparams = llama_cpp.llama_context_default_params() - self.lparams.n_ctx = self.n_ctx - self.lparams.seed = self.seed + self.lparams.n_ctx = n_ctx + self.lparams.seed = seed self.ctx = llama_cpp.llama_init_from_file(model.encode("utf8"), self.lparams) # determine the required inference memory per token: @@ -63,29 +65,44 @@ class LLaMAInteract: llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) # determine newline token - self.llama_token_newline = (llama_cpp.llama_token * 1)() - llama_cpp.llama_tokenize(self.ctx, b"\n", self.llama_token_newline, len(self.llama_token_newline), False) - self.llama_token_newline = toIntArray(self.llama_token_newline) + self.llama_token_newline = self._tokenize("\n", False) + self.inp_prefix = self._tokenize("\n\n### Instruction:\n\n") + self.inp_suffix = self._tokenize("\n\n### Response:\n\n", False) + + # add instruction as antiprompt + if (self.instruct): + self.first_antiprompt.append(self.inp_prefix) # primer feed if (len(primer) > 0): - self.input(primer) - self.n_keep = len(self.embd_inp) + self.embd_inp += self._tokenize(primer) + + # break immediately if using instruct + self.init_break = self.instruct + + # number of tokens to keep when resetting context + if (self.n_keep < 0 or self.n_keep > len(self.embd_inp) or self.instruct): + self.n_keep = len(self.embd_inp) # create internal context - self.n_ctx = int(llama_cpp.llama_n_ctx(self.ctx)) + self.n_ctx = llama_cpp.llama_n_ctx(self.ctx) self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices # determine antiprompt tokens for i in antiprompt: - d_antiprompt = (llama_cpp.llama_token * (len(i) + 1))() - n_antiprompt = llama_cpp.llama_tokenize(self.ctx, i.encode("utf8"), d_antiprompt, len(d_antiprompt), False) - self.first_antiprompt.append(toIntArray(d_antiprompt[:n_antiprompt])) + self.first_antiprompt.append(self._tokenize(i, False)) + + # tokenize a prompt + def _tokenize(self, prompt, bos=True): + _arr = (llama_cpp.llama_token * (len(prompt) + 1))() + _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), _arr, len(_arr), bos) + return _arr[:_n] # if an antiprompt is present def use_antiprompt(self): return len(self.first_antiprompt) > 0 + # generate tokens def generate(self): while self.remaining_tokens > 0 or self.use_antiprompt(): # predict @@ -125,16 +142,16 @@ class LLaMAInteract: self.repeat_penalty, ) self.last_n_tokens.pop(0) - self.last_n_tokens.append(int(id)) + self.last_n_tokens.append(id) # replace end of text token with newline token when in interactive mode - if (id == llama_cpp.llama_token_eos() and self.use_antiprompt()): + if (id == llama_cpp.llama_token_eos() and self.use_antiprompt() and not self.instruct): id = self.llama_token_newline[0] # tokenize and inject first reverse prompt self.embd_inp += self.first_antiprompt[0] # add it to the context - self.embd.append(int(id)) + self.embd.append(id) # echo this to console self.output_echo = True @@ -147,9 +164,9 @@ class LLaMAInteract: # some user input remains from prompt or interaction, forward it to processing while len(self.embd_inp) > self.input_consumed: - self.embd.append(int(self.embd_inp[self.input_consumed])) + self.embd.append(self.embd_inp[self.input_consumed]) self.last_n_tokens.pop(0) - self.last_n_tokens.append(int(self.embd_inp[self.input_consumed])) + self.last_n_tokens.append(self.embd_inp[self.input_consumed]) self.input_consumed += 1 if len(self.embd) >= self.n_batch: break @@ -159,11 +176,17 @@ class LLaMAInteract: for id in self.embd: yield id - # if antiprompt is present, stop - if (self.use_antiprompt() and len(self.embd_inp) <= self.input_consumed): - for i in self.first_antiprompt: - if i == self.last_n_tokens[-len(i):]: - return + if (len(self.embd_inp) <= self.input_consumed): + # if antiprompt is present, stop + if (self.use_antiprompt()): + for i in self.first_antiprompt: + if i == self.last_n_tokens[-len(i):]: + return + + # if we are using instruction mode, and we have processed the initial prompt + if (self.init_break): + self.init_break = False + break # if end of generation if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(): @@ -174,15 +197,20 @@ class LLaMAInteract: self.embd_inp += self.first_antiprompt[0] break + # return past text def past(self): for id in self.last_n_tokens[-self.n_past:]: yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") + # write input def input(self, prompt: str): - embd_arr = (llama_cpp.llama_token * (len(prompt) + 1))() - n_of_tok = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), embd_arr, len(embd_arr), True) - self.embd_inp += toIntArray(embd_arr[:n_of_tok]) + if (self.instruct): + self.embd_inp += self.inp_prefix + self.embd_inp += self._tokenize(prompt + "\n") + if (self.instruct): + self.embd_inp += self.inp_suffix + # write output def output(self): self.remaining_tokens = self.n_predict for id in self.generate(): @@ -193,7 +221,7 @@ if __name__ == "__main__": USER_NAME="User" AI_NAME="ChatLLaMa" - + time_now = datetime.now() prompt = f"""Text transcript of a never ending dialog, where {USER_NAME} interacts with an AI assistant named {AI_NAME}. {AI_NAME} is helpful, kind, honest, friendly, good at writing and never fails to answer {USER_NAME}’s requests immediately and with details and precision. @@ -214,7 +242,7 @@ The transcript only includes text, it does not include markup like HTML and Mark {USER_NAME}:""" print("Loading model...") - ll = LLaMAInteract(prompt, + m = LLaMAInteract(prompt, model="./models/30B/ggml-model-q4_0.bin", n_ctx=2048, antiprompt=[f"\n{USER_NAME}:"], @@ -224,12 +252,11 @@ The transcript only includes text, it does not include markup like HTML and Mark ) print("Loaded model!") - for i in ll.output(): + for i in m.output(): print(i,end="",flush=True) - ll.input_echo = False + m.input_echo = False - inp = lambda x: f" {x}\n" while True: - ll.input(inp(input(' '))) - for i in ll.output(): + m.input(" " + input('\n> ' if m.instruct else " ")) + for i in m.output(): print(i,end="",flush=True) \ No newline at end of file From da5a6a708924eeb48c845c88d98999d6bb5feff3 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Tue, 4 Apr 2023 16:18:26 +0200 Subject: [PATCH 03/10] Added instruction mode, fixed infinite generation, and various other fixes --- examples/low_level_api_chatllama_cpp.py | 62 ++++++++++++++++++------- 1 file changed, 44 insertions(+), 18 deletions(-) diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api_chatllama_cpp.py index 6462121..357b381 100644 --- a/examples/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api_chatllama_cpp.py @@ -8,7 +8,9 @@ Quirks: * n_predict can be set to -1 for unlimited length responses (or just a really high value) * It's always in interactive mode, generation ends either by reaching an antiprompt or running out of n_predict. - * Instruction mode adds its own antiprompt + * Instruction mode adds its own antiprompt. + You should also still be feeding the model with a "primer" prompt that + shows it the expected format. """ import llama_cpp @@ -31,6 +33,8 @@ class LLaMAInteract: top_p: float=1., temp: float=1.0, repeat_penalty: float=1, + instruct_inp_prefix: str="\n\n### Instruction:\n\n", + instruct_inp_suffix: str="\n\n### Response:\n\n", ) -> None: # input args self.instruct = instruct @@ -66,12 +70,12 @@ class LLaMAInteract: # determine newline token self.llama_token_newline = self._tokenize("\n", False) - self.inp_prefix = self._tokenize("\n\n### Instruction:\n\n") - self.inp_suffix = self._tokenize("\n\n### Response:\n\n", False) + self.inp_prefix = self._tokenize(instruct_inp_prefix) + self.inp_suffix = self._tokenize(instruct_inp_suffix, False) # add instruction as antiprompt if (self.instruct): - self.first_antiprompt.append(self.inp_prefix) + self.first_antiprompt.append(self.inp_prefix.strip()) # primer feed if (len(primer) > 0): @@ -117,10 +121,9 @@ class LLaMAInteract: # insert n_left/2 tokens at the start of embd from last_n_tokens _insert = self.last_n_tokens[ - -(int(n_left/2) - len(self.embd)):-len(self.embd) + self.n_ctx - int(n_left/2) - len(self.embd):-len(self.embd) ] - self.embd[:len(_insert)] = _insert - #TODO: Still untested + self.embd = _insert + self.embd if (llama_cpp.llama_eval( self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.n_threads @@ -197,6 +200,12 @@ class LLaMAInteract: self.embd_inp += self.first_antiprompt[0] break + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + llama_cpp.llama_free(self.ctx) + # return past text def past(self): for id in self.last_n_tokens[-self.n_past:]: @@ -206,7 +215,7 @@ class LLaMAInteract: def input(self, prompt: str): if (self.instruct): self.embd_inp += self.inp_prefix - self.embd_inp += self._tokenize(prompt + "\n") + self.embd_inp += self._tokenize(prompt) if (self.instruct): self.embd_inp += self.inp_suffix @@ -242,21 +251,38 @@ The transcript only includes text, it does not include markup like HTML and Mark {USER_NAME}:""" print("Loading model...") - m = LLaMAInteract(prompt, + with LLaMAInteract(prompt, model="./models/30B/ggml-model-q4_0.bin", n_ctx=2048, antiprompt=[f"\n{USER_NAME}:"], repeat_last_n=256, n_predict=2048, temp=0.7, top_p=0.5, top_k=40, repeat_penalty=1.17647 - ) - print("Loaded model!") + ) as m: + print("Loaded model!") - for i in m.output(): - print(i,end="",flush=True) - m.input_echo = False - - while True: - m.input(" " + input('\n> ' if m.instruct else " ")) for i in m.output(): - print(i,end="",flush=True) \ No newline at end of file + print(i,end="",flush=True) + m.input_echo = False + + def inp(): + out = "" + while (t := input()).endswith("\\"): + out += t[:-1] + "\n" + return out + t + "\n" + + while True: + if (m.instruct): + print('\n> ', end="") + m.input(inp()) + else: + print(f" ", end="") + m.input(f" {inp()}{AI_NAME}:") + print(f"{AI_NAME}: ",end="") + + try: + for i in m.output(): + print(i,end="",flush=True) + except KeyboardInterrupt: + print(f"\n{USER_NAME}:",end="") + m.input(f"\n{USER_NAME}:") From 9cde7973ccc2f823fd518b8dadb7c395175c6697 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Tue, 4 Apr 2023 16:20:27 +0200 Subject: [PATCH 04/10] Fix stripping instruction prompt --- examples/low_level_api_chatllama_cpp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api_chatllama_cpp.py index 357b381..f7540ee 100644 --- a/examples/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api_chatllama_cpp.py @@ -75,7 +75,7 @@ class LLaMAInteract: # add instruction as antiprompt if (self.instruct): - self.first_antiprompt.append(self.inp_prefix.strip()) + self.first_antiprompt.append(self._tokenize(self.inp_prefix.strip())) # primer feed if (len(primer) > 0): From c862e8bac523e1bcf6b92e058741a81905ebab96 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Tue, 4 Apr 2023 17:54:47 +0200 Subject: [PATCH 05/10] Fix repeating instructions and an antiprompt bug --- examples/low_level_api_chatllama_cpp.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api_chatllama_cpp.py index f7540ee..594d15e 100644 --- a/examples/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api_chatllama_cpp.py @@ -75,7 +75,7 @@ class LLaMAInteract: # add instruction as antiprompt if (self.instruct): - self.first_antiprompt.append(self._tokenize(self.inp_prefix.strip())) + self.first_antiprompt.append(self._tokenize(instruct_inp_prefix.strip(), False)) # primer feed if (len(primer) > 0): @@ -197,7 +197,8 @@ class LLaMAInteract: # respect n_predict even if antiprompt is present if (self.use_antiprompt() and self.remaining_tokens <= 0 and self.n_predict != -1): - self.embd_inp += self.first_antiprompt[0] + if not self.instruct: + self.embd_inp += self.first_antiprompt[0] break def __enter__(self): @@ -213,7 +214,7 @@ class LLaMAInteract: # write input def input(self, prompt: str): - if (self.instruct): + if (self.instruct and self.last_n_tokens[-len(self.inp_prefix):] != self.inp_prefix): self.embd_inp += self.inp_prefix self.embd_inp += self._tokenize(prompt) if (self.instruct): @@ -284,5 +285,6 @@ The transcript only includes text, it does not include markup like HTML and Mark for i in m.output(): print(i,end="",flush=True) except KeyboardInterrupt: - print(f"\n{USER_NAME}:",end="") - m.input(f"\n{USER_NAME}:") + if not m.instruct: + print(f"\n{USER_NAME}:",end="") + m.input(f"\n{USER_NAME}:") From 99ceecfccd3749291193d880047b238e2a18f2f8 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Wed, 5 Apr 2023 14:28:02 +0200 Subject: [PATCH 06/10] Move to new examples directory --- examples/{ => low_level_api}/low_level_api_chatllama_cpp.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename examples/{ => low_level_api}/low_level_api_chatllama_cpp.py (100%) diff --git a/examples/low_level_api_chatllama_cpp.py b/examples/low_level_api/low_level_api_chatllama_cpp.py similarity index 100% rename from examples/low_level_api_chatllama_cpp.py rename to examples/low_level_api/low_level_api_chatllama_cpp.py From 283e59c5e9d1d44916b2349660b3eee3c34a4bb4 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Wed, 5 Apr 2023 14:47:24 +0200 Subject: [PATCH 07/10] Fix bug in init_break not being set when exited via antiprompt and others. --- .../low_level_api/low_level_api_chatllama_cpp.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/examples/low_level_api/low_level_api_chatllama_cpp.py b/examples/low_level_api/low_level_api_chatllama_cpp.py index 594d15e..02adf3c 100644 --- a/examples/low_level_api/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api/low_level_api_chatllama_cpp.py @@ -33,6 +33,7 @@ class LLaMAInteract: top_p: float=1., temp: float=1.0, repeat_penalty: float=1, + init_break: bool=True, instruct_inp_prefix: str="\n\n### Instruction:\n\n", instruct_inp_suffix: str="\n\n### Response:\n\n", ) -> None: @@ -48,6 +49,7 @@ class LLaMAInteract: self.top_p=top_p self.temp=temp self.repeat_penalty=repeat_penalty + self.init_break = init_break # runtime args self.input_consumed = 0 @@ -81,9 +83,6 @@ class LLaMAInteract: if (len(primer) > 0): self.embd_inp += self._tokenize(primer) - # break immediately if using instruct - self.init_break = self.instruct - # number of tokens to keep when resetting context if (self.n_keep < 0 or self.n_keep > len(self.embd_inp) or self.instruct): self.n_keep = len(self.embd_inp) @@ -182,13 +181,14 @@ class LLaMAInteract: if (len(self.embd_inp) <= self.input_consumed): # if antiprompt is present, stop if (self.use_antiprompt()): - for i in self.first_antiprompt: - if i == self.last_n_tokens[-len(i):]: - return + if True in [ + i == self.last_n_tokens[-len(i):] + for i in self.first_antiprompt + ]: + break # if we are using instruction mode, and we have processed the initial prompt if (self.init_break): - self.init_break = False break # if end of generation @@ -201,6 +201,8 @@ class LLaMAInteract: self.embd_inp += self.first_antiprompt[0] break + self.init_break = False + def __enter__(self): return self From 085cc92b1f1def4f13c39bdad4d00c87272a99a5 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Thu, 6 Apr 2023 15:30:57 +0200 Subject: [PATCH 08/10] Better llama.cpp interoperability Has some too many newline issues so WIP --- examples/__init__.py | 0 examples/common.py | 135 +++++++ examples/low_level_api/__init__.py | 0 .../low_level_api_chatllama_cpp.py | 342 ++++++++++++------ 4 files changed, 357 insertions(+), 120 deletions(-) create mode 100644 examples/__init__.py create mode 100644 examples/common.py create mode 100644 examples/low_level_api/__init__.py diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/common.py b/examples/common.py new file mode 100644 index 0000000..f80d995 --- /dev/null +++ b/examples/common.py @@ -0,0 +1,135 @@ +import os +import argparse + +from dataclasses import dataclass, field +from typing import List, Optional + +# Based on https://github.com/ggerganov/llama.cpp/blob/master/examples/common.cpp + + +@dataclass +class GptParams: + seed: int = -1 + n_threads: int = min(4, os.cpu_count() or 1) + n_predict: int = 128 + repeat_last_n: int = 64 + n_parts: int = -1 + n_ctx: int = 512 + n_batch: int = 8 + n_keep: int = 0 + + top_k: int = 40 + top_p: float = 0.95 + temp: float = 0.80 + repeat_penalty: float = 1.10 + + model: str = "./models/llama-7B/ggml-model.bin" + prompt: str = "" + input_prefix: str = " " + fix_prefix: str = "" + output_postfix: str = "" + input_echo: bool = True, + + antiprompt: List[str] = field(default_factory=list) + + memory_f16: bool = True + random_prompt: bool = False + use_color: bool = False + interactive: bool = False + + embedding: bool = False + interactive_start: bool = False + + instruct: bool = False + ignore_eos: bool = False + perplexity: bool = False + use_mlock: bool = False + mem_test: bool = False + verbose_prompt: bool = False + + # Default instructions for Alpaca + # switch to "Human" and "Assistant" for Vicuna. + instruct_inp_prefix: str="\n\n### Instruction:\n\n", + instruct_inp_suffix: str="\n\n### Response:\n\n", + + +def gpt_params_parse(argv = None, params: Optional[GptParams] = None): + if params is None: + params = GptParams() + + parser = argparse.ArgumentParser() + parser.add_argument("-h", "--help", action="store_true", help="show this help message and exit") + parser.add_argument("-s", "--seed", type=int, default=-1, help="",dest="seed") + parser.add_argument("-t", "--threads", type=int, default=1, help="",dest="n_threads") + parser.add_argument("-p", "--prompt", type=str, default="", help="",dest="prompt") + parser.add_argument("-f", "--file", type=str, default=None, help="") + parser.add_argument("-c", "--ctx_size", type=int, default=512, help="",dest="n_ctx") + parser.add_argument("--memory_f32", action="store_false", help="",dest="memory_f16") + parser.add_argument("--top_p", type=float, default=0.9, help="",dest="top_p") + parser.add_argument("--temp", type=float, default=1.0, help="",dest="temp") + parser.add_argument("--repeat_last_n", type=int, default=64, help="",dest="repeat_last_n") + parser.add_argument("--repeat_penalty", type=float, default=1.0, help="",dest="repeat_penalty") + parser.add_argument("-b", "--batch_size", type=int, default=8, help="",dest="n_batch") + parser.add_argument("--keep", type=int, default=0, help="",dest="n_keep") + parser.add_argument("-m", "--model", type=str, help="",dest="model") + parser.add_argument( + "-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive" + ) + parser.add_argument("--embedding", action="store_true", help="", dest="embedding") + parser.add_argument("--interactive-start", action="store_true", help="", dest="interactive_start") + parser.add_argument( + "--interactive-first", + action="store_true", + help="run in interactive mode and wait for input right away", + dest="interactive" + ) + parser.add_argument( + "-ins", + "--instruct", + action="store_true", + help="run in instruction mode (use with Alpaca or Vicuna models)", + dest="instruct" + ) + parser.add_argument( + "--color", + action="store_true", + help="colorise output to distinguish prompt and user input from generations", + dest="use_color" + ) + parser.add_argument("--mlock", action="store_true",dest="use_mlock") + parser.add_argument("--mtest", action="store_true",dest="mem_test") + parser.add_argument( + "-r", + "--reverse-prompt", + type=str, + action='append', + help="run in interactive mode and poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", + dest="antiprompt" + ) + parser.add_argument("--perplexity", action="store_true", help="", dest="perplexity") + parser.add_argument("--ignore-eos", action="store_true", help="", dest="ignore_eos") + parser.add_argument("--n_parts", type=int, default=-1, help="", dest="n_parts") + parser.add_argument("--random-prompt", action="store_true", help="", dest="random_prompt") + parser.add_argument("--in-prefix", type=str, default=" ", help="", dest="input_prefix") + parser.add_argument("--fix-prefix", type=str, default=" ", help="", dest="fix_prefix") + parser.add_argument("--out-postfix", type=str, default="", help="", dest="output_postfix") + parser.add_argument("--input-noecho", action="store_false", help="", dest="input_echo") + args = parser.parse_args(argv) + return args + +def gpt_random_prompt(rng): + return [ + "So", + "Once upon a time", + "When", + "The", + "After", + "If", + "import", + "He", + "She", + "They", + ][rng % 10] + +if __name__ == "__main__": + print(GptParams(gpt_params_parse())) diff --git a/examples/low_level_api/__init__.py b/examples/low_level_api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/low_level_api/low_level_api_chatllama_cpp.py b/examples/low_level_api/low_level_api_chatllama_cpp.py index 02adf3c..5d2eadd 100644 --- a/examples/low_level_api/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api/low_level_api_chatllama_cpp.py @@ -12,102 +12,182 @@ Quirks: You should also still be feeding the model with a "primer" prompt that shows it the expected format. """ +import sys +from time import time +from os import cpu_count + import llama_cpp +from common import GptParams, gpt_params_parse, gpt_random_prompt + +ANSI_COLOR_RESET = "\x1b[0m" +ANSI_COLOR_YELLOW = "\x1b[33m" +ANSI_BOLD = "\x1b[1m" +ANSI_COLOR_GREEN = "\x1b[32m" + +CONSOLE_COLOR_DEFAULT = ANSI_COLOR_RESET +CONSOLE_COLOR_PROMPT = ANSI_COLOR_YELLOW +CONSOLE_COLOR_USER_INPUT = ANSI_BOLD + ANSI_COLOR_GREEN # A LLaMA interactive session class LLaMAInteract: - def __init__(self, - primer: str="", - model: str="./models/30B/ggml-model-q4_0.bin", - instruct: bool=False, - n_ctx: int=1024, - seed: int=0, - n_threads: int=8, - antiprompt: list[str]=[], - input_echo: bool=True, - n_predict: int=20, - n_keep: int=0, - n_batch: int=8, - repeat_last_n: int=64, - top_k: int=50, - top_p: float=1., - temp: float=1.0, - repeat_penalty: float=1, - init_break: bool=True, - instruct_inp_prefix: str="\n\n### Instruction:\n\n", - instruct_inp_suffix: str="\n\n### Response:\n\n", - ) -> None: + def __init__(self, params: GptParams) -> None: # input args - self.instruct = instruct - self.n_threads = n_threads - self.input_echo = input_echo - self.n_predict = n_predict - self.n_keep = n_keep - self.n_batch = n_batch - self.repeat_last_n = repeat_last_n - self.top_k=top_k - self.top_p=top_p - self.temp=temp - self.repeat_penalty=repeat_penalty - self.init_break = init_break + self.params = params + + if (self.params.perplexity): + raise NotImplementedError("""************ +please use the 'perplexity' tool for perplexity calculations +************""") + + if (self.params.embedding): + raise NotImplementedError("""************ +please use the 'embedding' tool for embedding calculations +************""") + + if (self.params.n_ctx > 2048): + print(f"""warning: model does not support \ +context sizes greater than 2048 tokens ({self.params.n_ctx} \ +specified) expect poor results""", file=sys.stderr) + + if (self.params.seed <= 0): + self.params.seed = int(time()) + + print(f"seed = {self.params.seed}", file=sys.stderr) + + if (self.params.random_prompt): + self.params.prompt = gpt_random_prompt(self.params.seed) # runtime args self.input_consumed = 0 self.embd = [] - self.embd_inp = [] self.n_past = 0 self.first_antiprompt = [] - self.remaining_tokens = self.n_predict - self.output_echo = input_echo + self.remaining_tokens = self.params.n_predict + self.output_echo = self.params.input_echo # model load self.lparams = llama_cpp.llama_context_default_params() - self.lparams.n_ctx = n_ctx - self.lparams.seed = seed - self.ctx = llama_cpp.llama_init_from_file(model.encode("utf8"), self.lparams) + self.lparams.n_ctx = self.params.n_ctx + self.lparams.n_parts = self.params.n_parts + self.lparams.seed = self.params.seed + self.lparams.memory_f16 = self.params.memory_f16 + self.lparams.use_mlock = self.params.use_mlock + + self.ctx = llama_cpp.llama_init_from_file(self.params.model.encode("utf8"), self.lparams) + if (self.ctx == 0): + raise RuntimeError(f"error: failed to load model '{self.params.model}'") + + print(file=sys.stderr) + print(f"system_info: n_threads = {self.params.n_threads} / {cpu_count()} \ +| {llama_cpp.llama_print_system_info().decode('utf8')}", file=sys.stderr) # determine the required inference memory per token: - tmp = [0, 1, 2, 3] - llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) - - # determine newline token - self.llama_token_newline = self._tokenize("\n", False) - self.inp_prefix = self._tokenize(instruct_inp_prefix) - self.inp_suffix = self._tokenize(instruct_inp_suffix, False) - - # add instruction as antiprompt - if (self.instruct): - self.first_antiprompt.append(self._tokenize(instruct_inp_prefix.strip(), False)) - - # primer feed - if (len(primer) > 0): - self.embd_inp += self._tokenize(primer) - - # number of tokens to keep when resetting context - if (self.n_keep < 0 or self.n_keep > len(self.embd_inp) or self.instruct): - self.n_keep = len(self.embd_inp) + if (self.params.mem_test): + tmp = [0, 1, 2, 3] + llama_cpp.llama_eval(self.ctx, (llama_cpp.c_int * len(tmp))(*tmp), len(tmp), 0, self.n_threads) + llama_cpp.llama_print_timings(self.ctx) + self.exit() + return # create internal context self.n_ctx = llama_cpp.llama_n_ctx(self.ctx) - self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices + + # Add a space in front of the first character to match OG llama tokenizer behavior + self.params.prompt = " " + self.params.prompt + + # tokenize the prompt + self.embd_inp = self._tokenize(self.params.prompt) + + if (len(self.embd_inp) > self.params.n_ctx - 4): + raise RuntimeError(f"error: prompt is too long ({len(self.embd_inp)} tokens, max {self.params.n_ctx - 4})") + + # number of tokens to keep when resetting context + if (self.params.n_keep < 0 or self.params.n_keep > len(self.embd_inp) or self.params.instruct): + self.params.n_keep = len(self.embd_inp) + + self.inp_prefix = self._tokenize(self.params.instruct_inp_prefix) + self.inp_suffix = self._tokenize(self.params.instruct_inp_suffix, False) + + # in instruct mode, we inject a prefix and a suffix to each input by the user + if (self.params.instruct): + self.params.interactive_start = True + self.first_antiprompt.append(self._tokenize(self.params.instruct_inp_prefix.strip(), False)) + + # enable interactive mode if reverse prompt or interactive start is specified + if (len(self.params.antiprompt) != 0 or self.params.interactive_start): + self.params.interactive = True + + # determine newline token + self.llama_token_newline = self._tokenize("\n", False) + + if (self.params.verbose_prompt): + print(f""" +prompt: '{self.params.prompt}' +number of tokens in prompt = {len(self.embd_inp)}""", file=sys.stderr) + + for i in range(len(self.embd_inp)): + print(f"{self.embd_inp[i]} -> '{llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i])}'", file=sys.stderr) + + if (self.params.n_keep > 0): + print("static prompt based on n_keep: '") + for i in range(self.params.n_keep): + print(llama_cpp.llama_token_to_str(self.ctx, self.embd_inp[i]), file=sys.stderr) + print("'", file=sys.stderr) + print(file=sys.stderr) + + if (self.params.interactive): + print("interactive mode on.", file=sys.stderr) + + if (len(self.params.antiprompt) > 0): + for antiprompt in self.params.antiprompt: + print(f"Reverse prompt: '{antiprompt}'", file=sys.stderr) + + if len(self.params.input_prefix) > 0: + print(f"Input prefix: '{self.params.input_prefix}'", file=sys.stderr) + + print(f"""sampling: temp = {self.params.temp},\ +top_k = {self.params.top_k},\ +top_p = {self.params.top_p},\ +repeat_last_n = {self.params.repeat_last_n},\ +repeat_penalty = {self.params.repeat_penalty} + +generate: n_ctx = {self.n_ctx}, \ +n_batch = {self.params.n_batch}, \ +n_predict = {self.params.n_predict}, \ +n_keep = {self.params.n_keep} +""", file=sys.stderr) # determine antiprompt tokens - for i in antiprompt: + for i in self.params.antiprompt: self.first_antiprompt.append(self._tokenize(i, False)) + self.last_n_tokens = [0]*self.n_ctx #TODO: deque doesnt support slices + + if (params.interactive): + print("""== Running in interactive mode. == + - Press Ctrl+C to interject at any time. + - Press Return to return control to LLaMa. + - If you want to submit another line, end your input in '\\'. + +""", file=sys.stderr) + self.set_color(CONSOLE_COLOR_PROMPT) + # tokenize a prompt def _tokenize(self, prompt, bos=True): _arr = (llama_cpp.llama_token * (len(prompt) + 1))() _n = llama_cpp.llama_tokenize(self.ctx, prompt.encode("utf8"), _arr, len(_arr), bos) return _arr[:_n] - # if an antiprompt is present def use_antiprompt(self): return len(self.first_antiprompt) > 0 + def set_color(self, c): + if (self.params.use_color): + print(c) + # generate tokens def generate(self): - while self.remaining_tokens > 0 or self.use_antiprompt(): + while self.remaining_tokens > 0 or self.params.interactive: # predict if len(self.embd) > 0: # infinite text generation via context swapping @@ -115,8 +195,8 @@ class LLaMAInteract: # - take the n_keep first tokens from the original prompt (via n_past) # - take half of the last (n_ctx - n_keep) tokens and recompute the logits in a batch if (self.n_past + len(self.embd) > self.n_ctx): - n_left = self.n_past - self.n_keep - self.n_past = self.n_keep + n_left = self.n_past - self.params.n_keep + self.n_past = self.params.n_keep # insert n_left/2 tokens at the start of embd from last_n_tokens _insert = self.last_n_tokens[ @@ -125,7 +205,7 @@ class LLaMAInteract: self.embd = _insert + self.embd if (llama_cpp.llama_eval( - self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.n_threads + self.ctx, (llama_cpp.llama_token * len(self.embd))(*self.embd), len(self.embd), self.n_past, self.params.n_threads ) != 0): raise Exception("Failed to llama_eval!") @@ -133,24 +213,28 @@ class LLaMAInteract: self.embd = [] if len(self.embd_inp) <= self.input_consumed: # out of user input, sample next token - _arr = self.last_n_tokens[-min(self.repeat_last_n, self.n_past):] + + #TODO: self.params.ignore_eos + + _arr = self.last_n_tokens[-min(self.params.repeat_last_n, self.n_past):] id = llama_cpp.llama_sample_top_p_top_k( self.ctx, (llama_cpp.llama_token * len(_arr))(*_arr), len(_arr), - self.top_k, - self.top_p, - self.temp, - self.repeat_penalty, + self.params.top_k, + self.params.top_p, + self.params.temp, + self.params.repeat_penalty, ) self.last_n_tokens.pop(0) self.last_n_tokens.append(id) # replace end of text token with newline token when in interactive mode - if (id == llama_cpp.llama_token_eos() and self.use_antiprompt() and not self.instruct): + if (id == llama_cpp.llama_token_eos() and self.params.interactive and not self.params.instruct): id = self.llama_token_newline[0] - # tokenize and inject first reverse prompt - self.embd_inp += self.first_antiprompt[0] + if (self.use_antiprompt()): + # tokenize and inject first reverse prompt + self.embd_inp += self.first_antiprompt[0] # add it to the context self.embd.append(id) @@ -162,7 +246,7 @@ class LLaMAInteract: self.remaining_tokens -= 1 else: # output to console if input echo is on - self.output_echo = self.input_echo + self.output_echo = self.params.input_echo # some user input remains from prompt or interaction, forward it to processing while len(self.embd_inp) > self.input_consumed: @@ -170,7 +254,7 @@ class LLaMAInteract: self.last_n_tokens.pop(0) self.last_n_tokens.append(self.embd_inp[self.input_consumed]) self.input_consumed += 1 - if len(self.embd) >= self.n_batch: + if len(self.embd) >= self.params.n_batch: break # display tokens @@ -178,7 +262,11 @@ class LLaMAInteract: for id in self.embd: yield id - if (len(self.embd_inp) <= self.input_consumed): + # reset color to default if we there is no pending user input + if (self.params.input_echo and len(self.embd_inp) == self.input_consumed): + self.set_color(CONSOLE_COLOR_DEFAULT) + + if (self.params.interactive and len(self.embd_inp) <= self.input_consumed): # if antiprompt is present, stop if (self.use_antiprompt()): if True in [ @@ -188,26 +276,36 @@ class LLaMAInteract: break # if we are using instruction mode, and we have processed the initial prompt - if (self.init_break): + if (self.n_past > 0 and self.params.interactive_start): break - # if end of generation + # end of text token if len(self.embd) > 0 and self.embd[-1] == llama_cpp.llama_token_eos(): + if (not self.params.instruct): + for i in " [end of text]\n": + yield i break # respect n_predict even if antiprompt is present - if (self.use_antiprompt() and self.remaining_tokens <= 0 and self.n_predict != -1): - if not self.instruct: + if (self.params.interactive and self.remaining_tokens <= 0 and self.params.n_predict != -1): + # If we arent in instruction mode, fix the current generation by appending the antiprompt. + # Makes it so if chat ends prematurely you dont append the AI's text etc. + if not self.params.instruct: self.embd_inp += self.first_antiprompt[0] + self.n_remain = self.params.n_predict break - self.init_break = False + self.params.interactive_start = False def __enter__(self): return self def __exit__(self, type, value, tb): + self.exit() + + def exit(self): llama_cpp.llama_free(self.ctx) + self.set_color(CONSOLE_COLOR_DEFAULT) # return past text def past(self): @@ -216,18 +314,51 @@ class LLaMAInteract: # write input def input(self, prompt: str): - if (self.instruct and self.last_n_tokens[-len(self.inp_prefix):] != self.inp_prefix): + if (self.params.instruct and self.last_n_tokens[-len(self.inp_prefix):] != self.inp_prefix): self.embd_inp += self.inp_prefix self.embd_inp += self._tokenize(prompt) - if (self.instruct): + if (self.params.instruct): self.embd_inp += self.inp_suffix # write output def output(self): - self.remaining_tokens = self.n_predict + self.remaining_tokens = self.params.n_predict for id in self.generate(): yield llama_cpp.llama_token_to_str(self.ctx, id).decode("utf-8") + # read user input + def read_input(self): + out = "" + while (t := input()).endswith("\\"): + out += t[:-1] + "\n" + return out + t + "\n" + + # interactive mode + def interact(self): + for i in self.output(): + print(i,end="",flush=True) + self.params.input_echo = False + + while self.params.interactive: + self.set_color(CONSOLE_COLOR_USER_INPUT) + if (self.params.instruct): + print('\n> ', end="") + self.input(self.read_input()) + else: + print(self.params.input_prefix, end="") + self.input(f"{self.params.input_prefix}{self.read_input()}{self.params.output_postfix}") + print(self.params.output_postfix,end="") + self.set_color(CONSOLE_COLOR_DEFAULT) + + try: + for i in self.output(): + print(i,end="",flush=True) + except KeyboardInterrupt: + self.set_color(CONSOLE_COLOR_DEFAULT) + if not self.params.instruct: + print(self.params.fix_prefix,end="") + self.input(self.params.fix_prefix) + if __name__ == "__main__": from datetime import datetime @@ -252,41 +383,12 @@ The transcript only includes text, it does not include markup like HTML and Mark {USER_NAME}: Name a color. {AI_NAME}: Blue {USER_NAME}:""" + args = gpt_params_parse() + params = GptParams(args) - print("Loading model...") - with LLaMAInteract(prompt, - model="./models/30B/ggml-model-q4_0.bin", - n_ctx=2048, - antiprompt=[f"\n{USER_NAME}:"], - repeat_last_n=256, - n_predict=2048, - temp=0.7, top_p=0.5, top_k=40, repeat_penalty=1.17647 - ) as m: - print("Loaded model!") + if (args.file): + with open(args.file) as f: + params.prompt = f.read() - for i in m.output(): - print(i,end="",flush=True) - m.input_echo = False - - def inp(): - out = "" - while (t := input()).endswith("\\"): - out += t[:-1] + "\n" - return out + t + "\n" - - while True: - if (m.instruct): - print('\n> ', end="") - m.input(inp()) - else: - print(f" ", end="") - m.input(f" {inp()}{AI_NAME}:") - print(f"{AI_NAME}: ",end="") - - try: - for i in m.output(): - print(i,end="",flush=True) - except KeyboardInterrupt: - if not m.instruct: - print(f"\n{USER_NAME}:",end="") - m.input(f"\n{USER_NAME}:") + with LLaMAInteract() as m: + m.interact() From 10c757111786966285eca6db88e037b7952764db Mon Sep 17 00:00:00 2001 From: Mug <> Date: Thu, 6 Apr 2023 15:33:22 +0200 Subject: [PATCH 09/10] Fixed too many newlines, now onto args. Still needs shipping work so you could do "python -m llama_cpp.examples." etc. --- examples/low_level_api/low_level_api_chatllama_cpp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/low_level_api/low_level_api_chatllama_cpp.py b/examples/low_level_api/low_level_api_chatllama_cpp.py index 5d2eadd..6aecb6d 100644 --- a/examples/low_level_api/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api/low_level_api_chatllama_cpp.py @@ -183,7 +183,7 @@ n_keep = {self.params.n_keep} def set_color(self, c): if (self.params.use_color): - print(c) + print(c, end="") # generate tokens def generate(self): From 16fc5b5d2334fe023b36d94a32706878cb9b2fe7 Mon Sep 17 00:00:00 2001 From: Mug <> Date: Fri, 7 Apr 2023 13:32:19 +0200 Subject: [PATCH 10/10] More interoperability to the original llama.cpp, and arguments now work --- examples/__init__.py | 0 examples/low_level_api/__init__.py | 0 examples/{ => low_level_api}/common.py | 79 +++++++++++-------- ...llama_cpp.py => low_level_api_chat_cpp.py} | 19 +++-- 4 files changed, 55 insertions(+), 43 deletions(-) delete mode 100644 examples/__init__.py delete mode 100644 examples/low_level_api/__init__.py rename examples/{ => low_level_api}/common.py (54%) rename examples/low_level_api/{low_level_api_chatllama_cpp.py => low_level_api_chat_cpp.py} (98%) diff --git a/examples/__init__.py b/examples/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/low_level_api/__init__.py b/examples/low_level_api/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/common.py b/examples/low_level_api/common.py similarity index 54% rename from examples/common.py rename to examples/low_level_api/common.py index f80d995..1758a2d 100644 --- a/examples/common.py +++ b/examples/low_level_api/common.py @@ -26,9 +26,6 @@ class GptParams: model: str = "./models/llama-7B/ggml-model.bin" prompt: str = "" input_prefix: str = " " - fix_prefix: str = "" - output_postfix: str = "" - input_echo: bool = True, antiprompt: List[str] = field(default_factory=list) @@ -47,41 +44,57 @@ class GptParams: mem_test: bool = False verbose_prompt: bool = False + file: str = None + + # If chat ended prematurely, append this to the conversation to fix it. + # Set to "\nUser:" etc. + # This is an alternative to input_prefix which always adds it, so it potentially duplicates "User:"" + fix_prefix: str = " " + output_postfix: str = "" + input_echo: bool = True, + # Default instructions for Alpaca # switch to "Human" and "Assistant" for Vicuna. - instruct_inp_prefix: str="\n\n### Instruction:\n\n", - instruct_inp_suffix: str="\n\n### Response:\n\n", + # TODO: TBD how they are gonna handle this upstream + instruct_inp_prefix: str="\n\n### Instruction:\n\n" + instruct_inp_suffix: str="\n\n### Response:\n\n" def gpt_params_parse(argv = None, params: Optional[GptParams] = None): if params is None: params = GptParams() - parser = argparse.ArgumentParser() - parser.add_argument("-h", "--help", action="store_true", help="show this help message and exit") - parser.add_argument("-s", "--seed", type=int, default=-1, help="",dest="seed") - parser.add_argument("-t", "--threads", type=int, default=1, help="",dest="n_threads") - parser.add_argument("-p", "--prompt", type=str, default="", help="",dest="prompt") - parser.add_argument("-f", "--file", type=str, default=None, help="") - parser.add_argument("-c", "--ctx_size", type=int, default=512, help="",dest="n_ctx") - parser.add_argument("--memory_f32", action="store_false", help="",dest="memory_f16") - parser.add_argument("--top_p", type=float, default=0.9, help="",dest="top_p") - parser.add_argument("--temp", type=float, default=1.0, help="",dest="temp") - parser.add_argument("--repeat_last_n", type=int, default=64, help="",dest="repeat_last_n") - parser.add_argument("--repeat_penalty", type=float, default=1.0, help="",dest="repeat_penalty") - parser.add_argument("-b", "--batch_size", type=int, default=8, help="",dest="n_batch") - parser.add_argument("--keep", type=int, default=0, help="",dest="n_keep") - parser.add_argument("-m", "--model", type=str, help="",dest="model") + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument("-s", "--seed", type=int, default=-1, help="RNG seed (use random seed for <= 0)",dest="seed") + parser.add_argument("-t", "--threads", type=int, default=min(4, os.cpu_count() or 1), help="number of threads to use during computation",dest="n_threads") + parser.add_argument("-p", "--prompt", type=str, default="", help="initial prompt",dest="prompt") + parser.add_argument("-f", "--file", type=str, default=None, help="file containing initial prompt to load",dest="file") + parser.add_argument("-c", "--ctx_size", type=int, default=512, help="size of the prompt context",dest="n_ctx") + parser.add_argument("--memory_f32", action="store_false", help="use f32 instead of f16 for memory key+value",dest="memory_f16") + parser.add_argument("--top_p", type=float, default=0.95, help="top-p samplin",dest="top_p") + parser.add_argument("--top_k", type=int, default=40, help="top-k sampling",dest="top_k") + parser.add_argument("--temp", type=float, default=0.80, help="temperature",dest="temp") + parser.add_argument("--n_predict", type=int, default=128, help="number of model parts",dest="n_predict") + parser.add_argument("--repeat_last_n", type=int, default=64, help="last n tokens to consider for penalize ",dest="repeat_last_n") + parser.add_argument("--repeat_penalty", type=float, default=1.10, help="penalize repeat sequence of tokens",dest="repeat_penalty") + parser.add_argument("-b", "--batch_size", type=int, default=8, help="batch size for prompt processing",dest="n_batch") + parser.add_argument("--keep", type=int, default=0, help="number of tokens to keep from the initial prompt",dest="n_keep") + parser.add_argument("-m", "--model", type=str, default="./models/llama-7B/ggml-model.bin", help="model path",dest="model") parser.add_argument( "-i", "--interactive", action="store_true", help="run in interactive mode", dest="interactive" ) parser.add_argument("--embedding", action="store_true", help="", dest="embedding") - parser.add_argument("--interactive-start", action="store_true", help="", dest="interactive_start") + parser.add_argument( + "--interactive-start", + action="store_true", + help="run in interactive mode", + dest="interactive" + ) parser.add_argument( "--interactive-first", action="store_true", help="run in interactive mode and wait for input right away", - dest="interactive" + dest="interactive_start" ) parser.add_argument( "-ins", @@ -96,24 +109,24 @@ def gpt_params_parse(argv = None, params: Optional[GptParams] = None): help="colorise output to distinguish prompt and user input from generations", dest="use_color" ) - parser.add_argument("--mlock", action="store_true",dest="use_mlock") - parser.add_argument("--mtest", action="store_true",dest="mem_test") + parser.add_argument("--mlock", action="store_true",help="force system to keep model in RAM rather than swapping or compressing",dest="use_mlock") + parser.add_argument("--mtest", action="store_true",help="compute maximum memory usage",dest="mem_test") parser.add_argument( "-r", "--reverse-prompt", type=str, action='append', - help="run in interactive mode and poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", + help="poll user input upon seeing PROMPT (can be\nspecified more than once for multiple prompts).", dest="antiprompt" ) - parser.add_argument("--perplexity", action="store_true", help="", dest="perplexity") - parser.add_argument("--ignore-eos", action="store_true", help="", dest="ignore_eos") - parser.add_argument("--n_parts", type=int, default=-1, help="", dest="n_parts") - parser.add_argument("--random-prompt", action="store_true", help="", dest="random_prompt") - parser.add_argument("--in-prefix", type=str, default=" ", help="", dest="input_prefix") - parser.add_argument("--fix-prefix", type=str, default=" ", help="", dest="fix_prefix") - parser.add_argument("--out-postfix", type=str, default="", help="", dest="output_postfix") - parser.add_argument("--input-noecho", action="store_false", help="", dest="input_echo") + parser.add_argument("--perplexity", action="store_true", help="compute perplexity over the prompt", dest="perplexity") + parser.add_argument("--ignore-eos", action="store_true", help="ignore end of stream token and continue generating", dest="ignore_eos") + parser.add_argument("--n_parts", type=int, default=-1, help="number of model parts", dest="n_parts") + parser.add_argument("--random-prompt", action="store_true", help="start with a randomized prompt.", dest="random_prompt") + parser.add_argument("--in-prefix", type=str, default="", help="string to prefix user inputs with", dest="input_prefix") + parser.add_argument("--fix-prefix", type=str, default="", help="append to input when generated n_predict tokens", dest="fix_prefix") + parser.add_argument("--out-postfix", type=str, default="", help="append to input", dest="output_postfix") + parser.add_argument("--input-noecho", action="store_false", help="dont output the input", dest="input_echo") args = parser.parse_args(argv) return args diff --git a/examples/low_level_api/low_level_api_chatllama_cpp.py b/examples/low_level_api/low_level_api_chat_cpp.py similarity index 98% rename from examples/low_level_api/low_level_api_chatllama_cpp.py rename to examples/low_level_api/low_level_api_chat_cpp.py index 6aecb6d..f4d77d5 100644 --- a/examples/low_level_api/low_level_api_chatllama_cpp.py +++ b/examples/low_level_api/low_level_api_chat_cpp.py @@ -6,8 +6,6 @@ Quirks: * The first antiprompt should be the userprompt like "\nUser:", because its added when n_predict is reached (aka generation ended prematurely) * n_predict can be set to -1 for unlimited length responses (or just a really high value) - * It's always in interactive mode, generation ends either by reaching an antiprompt - or running out of n_predict. * Instruction mode adds its own antiprompt. You should also still be feeding the model with a "primer" prompt that shows it the expected format. @@ -59,7 +57,6 @@ specified) expect poor results""", file=sys.stderr) # runtime args self.input_consumed = 0 - self.embd = [] self.n_past = 0 self.first_antiprompt = [] self.remaining_tokens = self.params.n_predict @@ -74,7 +71,7 @@ specified) expect poor results""", file=sys.stderr) self.lparams.use_mlock = self.params.use_mlock self.ctx = llama_cpp.llama_init_from_file(self.params.model.encode("utf8"), self.lparams) - if (self.ctx == 0): + if (not self.ctx): raise RuntimeError(f"error: failed to load model '{self.params.model}'") print(file=sys.stderr) @@ -95,7 +92,13 @@ specified) expect poor results""", file=sys.stderr) # Add a space in front of the first character to match OG llama tokenizer behavior self.params.prompt = " " + self.params.prompt + # Load prompt file + if (self.params.file): + with open(self.params.file) as f: + self.params.prompt = f.read() + # tokenize the prompt + self.embd = [] self.embd_inp = self._tokenize(self.params.prompt) if (len(self.embd_inp) > self.params.n_ctx - 4): @@ -384,11 +387,7 @@ The transcript only includes text, it does not include markup like HTML and Mark {AI_NAME}: Blue {USER_NAME}:""" args = gpt_params_parse() - params = GptParams(args) + params = GptParams(**vars(args)) - if (args.file): - with open(args.file) as f: - params.prompt = f.read() - - with LLaMAInteract() as m: + with LLaMAInteract(params) as m: m.interact()