fix(server): minor type fixes

This commit is contained in:
Andrei Betlen 2024-03-23 17:14:15 -04:00
parent c1325dcdfb
commit d11ccc3036
2 changed files with 4 additions and 4 deletions

View file

@ -493,7 +493,7 @@ async def tokenize(
) -> TokenizeInputResponse: ) -> TokenizeInputResponse:
tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True)
return {"tokens": tokens} return TokenizeInputResponse(tokens=tokens)
@router.post( @router.post(
@ -508,7 +508,7 @@ async def count_query_tokens(
) -> TokenizeInputCountResponse: ) -> TokenizeInputCountResponse:
tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True) tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True)
return {"count": len(tokens)} return TokenizeInputCountResponse(count=len(tokens))
@router.post( @router.post(
@ -523,4 +523,4 @@ async def detokenize(
) -> DetokenizeInputResponse: ) -> DetokenizeInputResponse:
text = llama_proxy(body.model).detokenize(body.tokens).decode("utf-8") text = llama_proxy(body.model).detokenize(body.tokens).decode("utf-8")
return {"text": text} return DetokenizeInputResponse(text=text)

View file

@ -268,7 +268,7 @@ class ModelList(TypedDict):
class TokenizeInputRequest(BaseModel): class TokenizeInputRequest(BaseModel):
model: Optional[str] = model_field model: Optional[str] = model_field
input: Optional[str] = Field(description="The input to tokenize.") input: str = Field(description="The input to tokenize.")
model_config = { model_config = {
"json_schema_extra": {"examples": [{"input": "How many tokens in this query?"}]} "json_schema_extra": {"examples": [{"input": "How many tokens in this query?"}]}