omit prompt and generate settings from final response

This commit is contained in:
Michael Yang 2024-05-03 16:11:49 -07:00
parent 52663284cf
commit 44869c59d6

View file

@ -1186,8 +1186,6 @@ struct llama_server_context
{"model", params.model_alias},
{"tokens_predicted", slot.n_decoded},
{"tokens_evaluated", slot.n_prompt_tokens},
{"generation_settings", get_formated_generation(slot)},
{"prompt", slot.prompt},
{"truncated", slot.truncated},
{"stopped_eos", slot.stopped_eos},
{"stopped_word", slot.stopped_word},