From 978b6daf9313a11367d0a9393226379173fdb688 Mon Sep 17 00:00:00 2001 From: Lucas Doyle Date: Sat, 29 Apr 2023 14:37:36 -0700 Subject: [PATCH] llama_cpp server: add some more information to fields for completions --- llama_cpp/server/app.py | 70 ++++++++++++++++++++++++++++++++++------- 1 file changed, 59 insertions(+), 11 deletions(-) diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index e1045af..e168485 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -71,22 +71,70 @@ model_field = Field( ) class CreateCompletionRequest(BaseModel): - prompt: Union[str, List[str]] - suffix: Optional[str] = Field(None) - max_tokens: int = 16 - temperature: float = 0.8 - top_p: float = 0.95 - echo: bool = False - stop: Optional[List[str]] = [] - stream: bool = False - logprobs: Optional[int] = Field(None) + prompt: Union[str, List[str]] = Field( + default="", + description="The prompt to generate completions for." + ) + suffix: Optional[str] = Field( + default=None, + description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots." + ) + max_tokens: int = Field( + default=16, + ge=1, + le=2048, + description="The maximum number of tokens to generate." + ) + temperature: float = Field( + default=0.8, + ge=0.0, + le=2.0, + description="Adjust the randomness of the generated text.\n\n" + + "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run." + ) + top_p: float = Field( + default=0.95, + ge=0.0, + le=1.0, + description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + + "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text." + ) + echo: bool = Field( + default=False, + description="Whether to echo the prompt in the generated text. Useful for chatbots." + ) + stop: Optional[List[str]] = Field( + default=None, + description="A list of tokens at which to stop generation. If None, no stop tokens are used." + ) + stream: bool = Field( + default=False, + description="Whether to stream the results as they are generated. Useful for chatbots." + ) + logprobs: Optional[int] = Field( + default=None, + ge=0, + description="The number of logprobs to generate. If None, no logprobs are generated." + ) + + # ignored, but marked as required for the sake of compatibility with openai's api model: str = model_field # llama.cpp specific parameters - top_k: int = 40 - repeat_penalty: float = 1.1 + top_k: int = Field( + default=40, + ge=0, + description="Limit the next token selection to the K most probable tokens.\n\n" + + "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text." + ) + repeat_penalty: float = Field( + default=1.0, + ge=0.0, + description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + + "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient." + ) class Config: schema_extra = {