fix: adding missing args in create_completion for functionary chat handler (#1430)

This commit is contained in:
Sarunas Kalade 2024-05-08 07:21:27 +01:00 committed by GitHub
parent 07966b9ba7
commit 903b28adf5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -2322,7 +2322,7 @@ def functionary_v1_v2_chat_handler(
prompt = prompt
stops = ["\n", END_ASSISTANT_TOKEN]
completion = create_completion(stop=stops)
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
completion_text = completion["choices"][0]["text"]
completion_tokens += completion["usage"]["completion_tokens"]
@ -2349,7 +2349,7 @@ def functionary_v1_v2_chat_handler(
completion_text.split(START_FUNCTION_CALL_TOKEN)[-1][:-1].strip()
)
grammar = get_grammar(function_calls[-1])
completion = create_completion(stop=END_FUNCTION_CALL_TOKEN)
completion = create_completion(prompt=prompt, stop=END_FUNCTION_CALL_TOKEN, grammar=grammar)
completion_tokens += completion["usage"]["completion_tokens"]
function_bodies.append(completion["choices"][0]["text"].strip())
# If the prompt involves a function call, just append generated parameters to function_bodies
@ -2363,7 +2363,7 @@ def functionary_v1_v2_chat_handler(
function_calls.append(function_call)
grammar = get_grammar(function_call)
stops = [STOP_TOKEN, FROM_TOKEN]
completion = create_completion(stop=stops)
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
completion_text = completion["choices"][0]["text"]
completion_tokens += completion["usage"]["completion_tokens"]
function_bodies.append(completion_text.strip())
@ -2373,7 +2373,7 @@ def functionary_v1_v2_chat_handler(
# Generate function name first
grammar = None
stops = CONTENT_TOKEN
completion = create_completion(stop=stops)
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
completion_text = completion["choices"][0]["text"]
completion_tokens += completion["usage"]["completion_tokens"]
function_name = completion_text.strip()
@ -2386,7 +2386,7 @@ def functionary_v1_v2_chat_handler(
grammar = get_grammar(function_call)
# Generate content
stops = [RECIPIENT_TOKEN, STOP_TOKEN]
completion = create_completion(stop=stops)
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
completion_text = completion["choices"][0]["text"]
completion_tokens += completion["usage"]["completion_tokens"]
if function_name == "all":
@ -2413,7 +2413,7 @@ def functionary_v1_v2_chat_handler(
# Check whether the model wants to generate another turn
prompt += completion_text.strip()
grammar = None
completion = create_completion(stop=stops)
completion = create_completion(prompt=prompt, stop=stops, grammar=grammar)
completion_tokens += completion["usage"]["completion_tokens"]
if "<|from|> assistant" in completion["choices"][0]["text"] or "<|from|>assistant" in completion["choices"][0]["text"]:
prompt += "\n<|from|>assistant\n<|recipient|>"
@ -3564,4 +3564,4 @@ def chatml_function_calling(
},
}
raise ValueError("Automatic streaming tool choice is not supported")
raise ValueError("Automatic streaming tool choice is not supported")