fix: adding missing args in create_completion for functionary chat handler (#1430)
This commit is contained in:
parent
07966b9ba7
commit
903b28adf5
1 changed files with 7 additions and 7 deletions
|
@ -2322,7 +2322,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
prompt = prompt
|
prompt = prompt
|
||||||
stops = ["\n", END_ASSISTANT_TOKEN]
|
stops = ["\n", END_ASSISTANT_TOKEN]
|
||||||
|
|
||||||
completion = create_completion(stop=stops)
|
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
|
||||||
completion_text = completion["choices"][0]["text"]
|
completion_text = completion["choices"][0]["text"]
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
|
|
||||||
|
@ -2349,7 +2349,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
completion_text.split(START_FUNCTION_CALL_TOKEN)[-1][:-1].strip()
|
completion_text.split(START_FUNCTION_CALL_TOKEN)[-1][:-1].strip()
|
||||||
)
|
)
|
||||||
grammar = get_grammar(function_calls[-1])
|
grammar = get_grammar(function_calls[-1])
|
||||||
completion = create_completion(stop=END_FUNCTION_CALL_TOKEN)
|
completion = create_completion(prompt=prompt, stop=END_FUNCTION_CALL_TOKEN, grammar=grammar)
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
function_bodies.append(completion["choices"][0]["text"].strip())
|
function_bodies.append(completion["choices"][0]["text"].strip())
|
||||||
# If the prompt involves a function call, just append generated parameters to function_bodies
|
# If the prompt involves a function call, just append generated parameters to function_bodies
|
||||||
|
@ -2363,7 +2363,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
function_calls.append(function_call)
|
function_calls.append(function_call)
|
||||||
grammar = get_grammar(function_call)
|
grammar = get_grammar(function_call)
|
||||||
stops = [STOP_TOKEN, FROM_TOKEN]
|
stops = [STOP_TOKEN, FROM_TOKEN]
|
||||||
completion = create_completion(stop=stops)
|
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
|
||||||
completion_text = completion["choices"][0]["text"]
|
completion_text = completion["choices"][0]["text"]
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
function_bodies.append(completion_text.strip())
|
function_bodies.append(completion_text.strip())
|
||||||
|
@ -2373,7 +2373,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
# Generate function name first
|
# Generate function name first
|
||||||
grammar = None
|
grammar = None
|
||||||
stops = CONTENT_TOKEN
|
stops = CONTENT_TOKEN
|
||||||
completion = create_completion(stop=stops)
|
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
|
||||||
completion_text = completion["choices"][0]["text"]
|
completion_text = completion["choices"][0]["text"]
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
function_name = completion_text.strip()
|
function_name = completion_text.strip()
|
||||||
|
@ -2386,7 +2386,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
grammar = get_grammar(function_call)
|
grammar = get_grammar(function_call)
|
||||||
# Generate content
|
# Generate content
|
||||||
stops = [RECIPIENT_TOKEN, STOP_TOKEN]
|
stops = [RECIPIENT_TOKEN, STOP_TOKEN]
|
||||||
completion = create_completion(stop=stops)
|
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
|
||||||
completion_text = completion["choices"][0]["text"]
|
completion_text = completion["choices"][0]["text"]
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
if function_name == "all":
|
if function_name == "all":
|
||||||
|
@ -2413,7 +2413,7 @@ def functionary_v1_v2_chat_handler(
|
||||||
# Check whether the model wants to generate another turn
|
# Check whether the model wants to generate another turn
|
||||||
prompt += completion_text.strip()
|
prompt += completion_text.strip()
|
||||||
grammar = None
|
grammar = None
|
||||||
completion = create_completion(stop=stops)
|
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
|
||||||
completion_tokens += completion["usage"]["completion_tokens"]
|
completion_tokens += completion["usage"]["completion_tokens"]
|
||||||
if "<|from|> assistant" in completion["choices"][0]["text"] or "<|from|>assistant" in completion["choices"][0]["text"]:
|
if "<|from|> assistant" in completion["choices"][0]["text"] or "<|from|>assistant" in completion["choices"][0]["text"]:
|
||||||
prompt += "\n<|from|>assistant\n<|recipient|>"
|
prompt += "\n<|from|>assistant\n<|recipient|>"
|
||||||
|
|
Loading…
Reference in a new issue