Merge pull request #4143 from ollama/mxyng/final-response
omit prompt and generate settings from final response
This commit is contained in:
commit
aed545872d
1 changed files with 0 additions and 2 deletions
2
llm/ext_server/server.cpp
vendored
2
llm/ext_server/server.cpp
vendored
|
@ -1186,8 +1186,6 @@ struct llama_server_context
|
|||
{"model", params.model_alias},
|
||||
{"tokens_predicted", slot.n_decoded},
|
||||
{"tokens_evaluated", slot.n_prompt_tokens},
|
||||
{"generation_settings", get_formated_generation(slot)},
|
||||
{"prompt", slot.prompt},
|
||||
{"truncated", slot.truncated},
|
||||
{"stopped_eos", slot.stopped_eos},
|
||||
{"stopped_word", slot.stopped_word},
|
||||
|
|
Loading…
Reference in a new issue