add templates to prompt command

This commit is contained in:
Bruce MacDonald 2023-06-26 13:41:16 -04:00
parent 3ca8f72327
commit d34985b9df
3 changed files with 60 additions and 0 deletions

1
.gitignore vendored
View file

@ -5,3 +5,4 @@
*.spec *.spec
build build
dist dist
__pycache__

View file

@ -5,6 +5,7 @@ import click
from llama_cpp import Llama from llama_cpp import Llama
from flask import Flask, Response, stream_with_context, request from flask import Flask, Response, stream_with_context, request
from flask_cors import CORS from flask_cors import CORS
from template import template
app = Flask(__name__) app = Flask(__name__)
CORS(app) # enable CORS for all routes CORS(app) # enable CORS for all routes
@ -124,6 +125,7 @@ def generate(model, prompt):
if prompt == "": if prompt == "":
prompt = input("Prompt: ") prompt = input("Prompt: ")
output = "" output = ""
prompt = template(model, prompt)
for generated in query(model, prompt): for generated in query(model, prompt):
generated_json = json.loads(generated) generated_json = json.loads(generated)
text = generated_json["choices"][0]["text"] text = generated_json["choices"][0]["text"]

57
template.py Normal file
View file

@ -0,0 +1,57 @@
from difflib import SequenceMatcher
model_prompts = {
"alpaca": """Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction:
{prompt}
### Response:
""",
"oasst": "<|prompter|>{prompt}<|endoftext|><|assistant|>",
"vicuna": """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
USER: {prompt}
ASSISTANT:""",
"hermes": """### Instruction:
{prompt}
### Response:
""",
"gpt4": """### Instruction:
{prompt}
### Response:
""",
"qlora": """### Human: {prompt}
### Assistant:""",
"tulu": """<|user|>
{prompt}
<|assistant|>
(include newline)""",
"wizardlm-7b": """{prompt}
### Response:""",
"wizardlm-13b": """{prompt}
### Response:""",
"wizardlm-30b": """{prompt}
### Response:""",
}
def template(model, prompt):
max_ratio = 0
closest_key = ""
model_name = model.lower()
# Find the specialized prompt with the closest name match
for key in model_prompts.keys():
ratio = SequenceMatcher(None, model_name, key).ratio()
if ratio > max_ratio:
max_ratio = ratio
closest_key = key
# Return the value of the closest match
p = model_prompts.get(closest_key) # .format(placeholder=prompt)
return p.format(prompt=prompt)