diff --git a/llama_cpp/server/app.py b/llama_cpp/server/app.py index e168485..ec5dbd3 100644 --- a/llama_cpp/server/app.py +++ b/llama_cpp/server/app.py @@ -70,6 +70,55 @@ model_field = Field( description="The model to use for generating completions." ) +max_tokens_field = Field( + default=16, + ge=1, + le=2048, + description="The maximum number of tokens to generate." +) + +temperature_field = Field( + default=0.8, + ge=0.0, + le=2.0, + description="Adjust the randomness of the generated text.\n\n" + + "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run." +) + +top_p_field = Field( + default=0.95, + ge=0.0, + le=1.0, + description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + + "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text." +) + +stop_field = Field( + default=None, + description="A list of tokens at which to stop generation. If None, no stop tokens are used." +) + +stream_field = Field( + default=False, + description="Whether to stream the results as they are generated. Useful for chatbots." +) + +top_k_field = Field( + default=40, + ge=0, + description="Limit the next token selection to the K most probable tokens.\n\n" + + "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text." +) + +repeat_penalty_field = Field( + default=1.0, + ge=0.0, + description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + + "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient." +) + + + class CreateCompletionRequest(BaseModel): prompt: Union[str, List[str]] = Field( default="", @@ -79,62 +128,27 @@ class CreateCompletionRequest(BaseModel): default=None, description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots." ) - max_tokens: int = Field( - default=16, - ge=1, - le=2048, - description="The maximum number of tokens to generate." - ) - temperature: float = Field( - default=0.8, - ge=0.0, - le=2.0, - description="Adjust the randomness of the generated text.\n\n" + - "Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run." - ) - top_p: float = Field( - default=0.95, - ge=0.0, - le=1.0, - description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" + - "Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text." - ) + max_tokens: int = max_tokens_field + temperature: float = temperature_field + top_p: float = top_p_field echo: bool = Field( default=False, description="Whether to echo the prompt in the generated text. Useful for chatbots." ) - stop: Optional[List[str]] = Field( - default=None, - description="A list of tokens at which to stop generation. If None, no stop tokens are used." - ) - stream: bool = Field( - default=False, - description="Whether to stream the results as they are generated. Useful for chatbots." - ) + stop: Optional[List[str]] = stop_field + stream: bool = stream_field logprobs: Optional[int] = Field( default=None, ge=0, description="The number of logprobs to generate. If None, no logprobs are generated." ) - - # ignored, but marked as required for the sake of compatibility with openai's api model: str = model_field # llama.cpp specific parameters - top_k: int = Field( - default=40, - ge=0, - description="Limit the next token selection to the K most probable tokens.\n\n" + - "Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text." - ) - repeat_penalty: float = Field( - default=1.0, - ge=0.0, - description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" + - "Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient." - ) + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field class Config: schema_extra = { @@ -199,26 +213,29 @@ def create_embedding( class ChatCompletionRequestMessage(BaseModel): - role: Union[Literal["system"], Literal["user"], Literal["assistant"]] - content: str - user: Optional[str] = None + role: Union[Literal["system"], Literal["user"], Literal["assistant"]] = Field( + default=Literal["user"], description="The role of the message." + ) + content: str = Field(default="", description="The content of the message.") class CreateChatCompletionRequest(BaseModel): - model: Optional[str] - messages: List[ChatCompletionRequestMessage] - temperature: float = 0.8 - top_p: float = 0.95 - stream: bool = False - stop: Optional[List[str]] = [] - max_tokens: int = 128 + messages: List[ChatCompletionRequestMessage] = Field( + default=[], + description="A list of messages to generate completions for." + ) + max_tokens: int = max_tokens_field + temperature: float = temperature_field + top_p: float = top_p_field + stop: Optional[List[str]] = stop_field + stream: bool = stream_field # ignored, but marked as required for the sake of compatibility with openai's api model: str = model_field # llama.cpp specific parameters - top_k: int = 40, - repeat_penalty: float = 1.1 + top_k: int = top_k_field + repeat_penalty: float = repeat_penalty_field class Config: schema_extra = {