llama_cpp server: define fields for chat completions
Slight refactor for common fields shared between completion and chat completion
This commit is contained in:
parent
978b6daf93
commit
8dcbf65a45
1 changed files with 71 additions and 54 deletions
|
@ -70,6 +70,55 @@ model_field = Field(
|
||||||
description="The model to use for generating completions."
|
description="The model to use for generating completions."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
max_tokens_field = Field(
|
||||||
|
default=16,
|
||||||
|
ge=1,
|
||||||
|
le=2048,
|
||||||
|
description="The maximum number of tokens to generate."
|
||||||
|
)
|
||||||
|
|
||||||
|
temperature_field = Field(
|
||||||
|
default=0.8,
|
||||||
|
ge=0.0,
|
||||||
|
le=2.0,
|
||||||
|
description="Adjust the randomness of the generated text.\n\n" +
|
||||||
|
"Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run."
|
||||||
|
)
|
||||||
|
|
||||||
|
top_p_field = Field(
|
||||||
|
default=0.95,
|
||||||
|
ge=0.0,
|
||||||
|
le=1.0,
|
||||||
|
description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" +
|
||||||
|
"Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text."
|
||||||
|
)
|
||||||
|
|
||||||
|
stop_field = Field(
|
||||||
|
default=None,
|
||||||
|
description="A list of tokens at which to stop generation. If None, no stop tokens are used."
|
||||||
|
)
|
||||||
|
|
||||||
|
stream_field = Field(
|
||||||
|
default=False,
|
||||||
|
description="Whether to stream the results as they are generated. Useful for chatbots."
|
||||||
|
)
|
||||||
|
|
||||||
|
top_k_field = Field(
|
||||||
|
default=40,
|
||||||
|
ge=0,
|
||||||
|
description="Limit the next token selection to the K most probable tokens.\n\n" +
|
||||||
|
"Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text."
|
||||||
|
)
|
||||||
|
|
||||||
|
repeat_penalty_field = Field(
|
||||||
|
default=1.0,
|
||||||
|
ge=0.0,
|
||||||
|
description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" +
|
||||||
|
"Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class CreateCompletionRequest(BaseModel):
|
class CreateCompletionRequest(BaseModel):
|
||||||
prompt: Union[str, List[str]] = Field(
|
prompt: Union[str, List[str]] = Field(
|
||||||
default="",
|
default="",
|
||||||
|
@ -79,62 +128,27 @@ class CreateCompletionRequest(BaseModel):
|
||||||
default=None,
|
default=None,
|
||||||
description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots."
|
description="A suffix to append to the generated text. If None, no suffix is appended. Useful for chatbots."
|
||||||
)
|
)
|
||||||
max_tokens: int = Field(
|
max_tokens: int = max_tokens_field
|
||||||
default=16,
|
temperature: float = temperature_field
|
||||||
ge=1,
|
top_p: float = top_p_field
|
||||||
le=2048,
|
|
||||||
description="The maximum number of tokens to generate."
|
|
||||||
)
|
|
||||||
temperature: float = Field(
|
|
||||||
default=0.8,
|
|
||||||
ge=0.0,
|
|
||||||
le=2.0,
|
|
||||||
description="Adjust the randomness of the generated text.\n\n" +
|
|
||||||
"Temperature is a hyperparameter that controls the randomness of the generated text. It affects the probability distribution of the model's output tokens. A higher temperature (e.g., 1.5) makes the output more random and creative, while a lower temperature (e.g., 0.5) makes the output more focused, deterministic, and conservative. The default value is 0.8, which provides a balance between randomness and determinism. At the extreme, a temperature of 0 will always pick the most likely next token, leading to identical outputs in each run."
|
|
||||||
)
|
|
||||||
top_p: float = Field(
|
|
||||||
default=0.95,
|
|
||||||
ge=0.0,
|
|
||||||
le=1.0,
|
|
||||||
description="Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P.\n\n" +
|
|
||||||
"Top-p sampling, also known as nucleus sampling, is another text generation method that selects the next token from a subset of tokens that together have a cumulative probability of at least p. This method provides a balance between diversity and quality by considering both the probabilities of tokens and the number of tokens to sample from. A higher value for top_p (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text."
|
|
||||||
)
|
|
||||||
echo: bool = Field(
|
echo: bool = Field(
|
||||||
default=False,
|
default=False,
|
||||||
description="Whether to echo the prompt in the generated text. Useful for chatbots."
|
description="Whether to echo the prompt in the generated text. Useful for chatbots."
|
||||||
)
|
)
|
||||||
stop: Optional[List[str]] = Field(
|
stop: Optional[List[str]] = stop_field
|
||||||
default=None,
|
stream: bool = stream_field
|
||||||
description="A list of tokens at which to stop generation. If None, no stop tokens are used."
|
|
||||||
)
|
|
||||||
stream: bool = Field(
|
|
||||||
default=False,
|
|
||||||
description="Whether to stream the results as they are generated. Useful for chatbots."
|
|
||||||
)
|
|
||||||
logprobs: Optional[int] = Field(
|
logprobs: Optional[int] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
ge=0,
|
ge=0,
|
||||||
description="The number of logprobs to generate. If None, no logprobs are generated."
|
description="The number of logprobs to generate. If None, no logprobs are generated."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# ignored, but marked as required for the sake of compatibility with openai's api
|
# ignored, but marked as required for the sake of compatibility with openai's api
|
||||||
model: str = model_field
|
model: str = model_field
|
||||||
|
|
||||||
# llama.cpp specific parameters
|
# llama.cpp specific parameters
|
||||||
top_k: int = Field(
|
top_k: int = top_k_field
|
||||||
default=40,
|
repeat_penalty: float = repeat_penalty_field
|
||||||
ge=0,
|
|
||||||
description="Limit the next token selection to the K most probable tokens.\n\n" +
|
|
||||||
"Top-k sampling is a text generation method that selects the next token only from the top k most likely tokens predicted by the model. It helps reduce the risk of generating low-probability or nonsensical tokens, but it may also limit the diversity of the output. A higher value for top_k (e.g., 100) will consider more tokens and lead to more diverse text, while a lower value (e.g., 10) will focus on the most probable tokens and generate more conservative text."
|
|
||||||
)
|
|
||||||
repeat_penalty: float = Field(
|
|
||||||
default=1.0,
|
|
||||||
ge=0.0,
|
|
||||||
description="A penalty applied to each token that is already generated. This helps prevent the model from repeating itself.\n\n" +
|
|
||||||
"Repeat penalty is a hyperparameter used to penalize the repetition of token sequences during text generation. It helps prevent the model from generating repetitive or monotonous text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient."
|
|
||||||
)
|
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
|
@ -199,26 +213,29 @@ def create_embedding(
|
||||||
|
|
||||||
|
|
||||||
class ChatCompletionRequestMessage(BaseModel):
|
class ChatCompletionRequestMessage(BaseModel):
|
||||||
role: Union[Literal["system"], Literal["user"], Literal["assistant"]]
|
role: Union[Literal["system"], Literal["user"], Literal["assistant"]] = Field(
|
||||||
content: str
|
default=Literal["user"], description="The role of the message."
|
||||||
user: Optional[str] = None
|
)
|
||||||
|
content: str = Field(default="", description="The content of the message.")
|
||||||
|
|
||||||
|
|
||||||
class CreateChatCompletionRequest(BaseModel):
|
class CreateChatCompletionRequest(BaseModel):
|
||||||
model: Optional[str]
|
messages: List[ChatCompletionRequestMessage] = Field(
|
||||||
messages: List[ChatCompletionRequestMessage]
|
default=[],
|
||||||
temperature: float = 0.8
|
description="A list of messages to generate completions for."
|
||||||
top_p: float = 0.95
|
)
|
||||||
stream: bool = False
|
max_tokens: int = max_tokens_field
|
||||||
stop: Optional[List[str]] = []
|
temperature: float = temperature_field
|
||||||
max_tokens: int = 128
|
top_p: float = top_p_field
|
||||||
|
stop: Optional[List[str]] = stop_field
|
||||||
|
stream: bool = stream_field
|
||||||
|
|
||||||
# ignored, but marked as required for the sake of compatibility with openai's api
|
# ignored, but marked as required for the sake of compatibility with openai's api
|
||||||
model: str = model_field
|
model: str = model_field
|
||||||
|
|
||||||
# llama.cpp specific parameters
|
# llama.cpp specific parameters
|
||||||
top_k: int = 40,
|
top_k: int = top_k_field
|
||||||
repeat_penalty: float = 1.1
|
repeat_penalty: float = repeat_penalty_field
|
||||||
|
|
||||||
class Config:
|
class Config:
|
||||||
schema_extra = {
|
schema_extra = {
|
||||||
|
|
Loading…
Reference in a new issue