2023-12-22 10:51:25 +00:00
|
|
|
from __future__ import annotations
|
|
|
|
|
|
|
|
import os
|
2023-04-29 05:43:37 +00:00
|
|
|
import json
|
2023-12-22 10:51:25 +00:00
|
|
|
|
2023-04-29 05:43:37 +00:00
|
|
|
from threading import Lock
|
2023-05-27 13:12:58 +00:00
|
|
|
from functools import partial
|
2023-12-22 10:51:25 +00:00
|
|
|
from typing import Iterator, List, Optional, Union, Dict
|
2023-04-29 05:43:37 +00:00
|
|
|
|
|
|
|
import llama_cpp
|
|
|
|
|
2023-05-27 13:12:58 +00:00
|
|
|
import anyio
|
|
|
|
from anyio.streams.memory import MemoryObjectSendStream
|
|
|
|
from starlette.concurrency import run_in_threadpool, iterate_in_threadpool
|
2024-03-19 14:52:53 +00:00
|
|
|
from fastapi import Depends, FastAPI, APIRouter, Request, HTTPException, status, Body
|
2023-09-13 20:18:31 +00:00
|
|
|
from fastapi.middleware import Middleware
|
2023-04-29 05:43:37 +00:00
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
2023-12-21 18:44:49 +00:00
|
|
|
from fastapi.security import HTTPBearer
|
2023-04-29 05:43:37 +00:00
|
|
|
from sse_starlette.sse import EventSourceResponse
|
2023-12-22 10:51:25 +00:00
|
|
|
from starlette_context.plugins import RequestIdPlugin # type: ignore
|
2023-09-13 20:18:31 +00:00
|
|
|
from starlette_context.middleware import RawContextMiddleware
|
2023-04-29 05:43:37 +00:00
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
from llama_cpp.server.model import (
|
|
|
|
LlamaProxy,
|
|
|
|
)
|
|
|
|
from llama_cpp.server.settings import (
|
|
|
|
ConfigFileSettings,
|
|
|
|
Settings,
|
|
|
|
ModelSettings,
|
|
|
|
ServerSettings,
|
|
|
|
)
|
|
|
|
from llama_cpp.server.types import (
|
|
|
|
CreateCompletionRequest,
|
|
|
|
CreateEmbeddingRequest,
|
|
|
|
CreateChatCompletionRequest,
|
|
|
|
ModelList,
|
2024-03-09 02:09:00 +00:00
|
|
|
TokenizeInputRequest,
|
|
|
|
TokenizeInputResponse,
|
|
|
|
TokenizeInputCountResponse,
|
|
|
|
DetokenizeInputRequest,
|
|
|
|
DetokenizeInputResponse,
|
2023-12-22 10:51:25 +00:00
|
|
|
)
|
|
|
|
from llama_cpp.server.errors import RouteErrorHandler
|
2023-07-16 05:57:39 +00:00
|
|
|
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
router = APIRouter(route_class=RouteErrorHandler)
|
2023-07-16 05:57:39 +00:00
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
_server_settings: Optional[ServerSettings] = None
|
2023-07-16 05:57:39 +00:00
|
|
|
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
def set_server_settings(server_settings: ServerSettings):
|
|
|
|
global _server_settings
|
|
|
|
_server_settings = server_settings
|
2023-07-16 05:57:39 +00:00
|
|
|
|
2023-05-02 02:38:46 +00:00
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
def get_server_settings():
|
|
|
|
yield _server_settings
|
2023-04-29 05:43:37 +00:00
|
|
|
|
2023-05-02 02:38:46 +00:00
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
_llama_proxy: Optional[LlamaProxy] = None
|
2023-04-29 05:43:37 +00:00
|
|
|
|
2023-07-07 07:04:17 +00:00
|
|
|
llama_outer_lock = Lock()
|
|
|
|
llama_inner_lock = Lock()
|
2023-05-02 02:38:46 +00:00
|
|
|
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
def set_llama_proxy(model_settings: List[ModelSettings]):
|
|
|
|
global _llama_proxy
|
|
|
|
_llama_proxy = LlamaProxy(models=model_settings)
|
|
|
|
|
|
|
|
|
|
|
|
def get_llama_proxy():
|
2023-07-07 07:04:17 +00:00
|
|
|
# NOTE: This double lock allows the currently streaming llama model to
|
|
|
|
# check if any other requests are pending in the same thread and cancel
|
|
|
|
# the stream if so.
|
|
|
|
llama_outer_lock.acquire()
|
|
|
|
release_outer_lock = True
|
|
|
|
try:
|
|
|
|
llama_inner_lock.acquire()
|
|
|
|
try:
|
|
|
|
llama_outer_lock.release()
|
|
|
|
release_outer_lock = False
|
2023-12-22 10:51:25 +00:00
|
|
|
yield _llama_proxy
|
2023-07-07 07:04:17 +00:00
|
|
|
finally:
|
|
|
|
llama_inner_lock.release()
|
|
|
|
finally:
|
|
|
|
if release_outer_lock:
|
|
|
|
llama_outer_lock.release()
|
2023-04-29 05:43:37 +00:00
|
|
|
|
2023-05-07 06:52:20 +00:00
|
|
|
|
2024-04-17 14:08:19 +00:00
|
|
|
_ping_message_factory = None
|
|
|
|
|
|
|
|
def set_ping_message_factory(factory):
|
|
|
|
global _ping_message_factory
|
|
|
|
_ping_message_factory = factory
|
|
|
|
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
def create_app(
|
|
|
|
settings: Settings | None = None,
|
|
|
|
server_settings: ServerSettings | None = None,
|
|
|
|
model_settings: List[ModelSettings] | None = None,
|
|
|
|
):
|
|
|
|
config_file = os.environ.get("CONFIG_FILE", None)
|
|
|
|
if config_file is not None:
|
|
|
|
if not os.path.exists(config_file):
|
|
|
|
raise ValueError(f"Config file {config_file} not found!")
|
|
|
|
with open(config_file, "rb") as f:
|
2024-04-10 06:47:01 +00:00
|
|
|
# Check if yaml file
|
|
|
|
if config_file.endswith(".yaml") or config_file.endswith(".yml"):
|
|
|
|
import yaml
|
|
|
|
|
|
|
|
config_file_settings = ConfigFileSettings.model_validate_json(
|
|
|
|
json.dumps(yaml.safe_load(f))
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
config_file_settings = ConfigFileSettings.model_validate_json(f.read())
|
2023-12-22 10:51:25 +00:00
|
|
|
server_settings = ServerSettings.model_validate(config_file_settings)
|
|
|
|
model_settings = config_file_settings.models
|
|
|
|
|
|
|
|
if server_settings is None and model_settings is None:
|
|
|
|
if settings is None:
|
|
|
|
settings = Settings()
|
|
|
|
server_settings = ServerSettings.model_validate(settings)
|
|
|
|
model_settings = [ModelSettings.model_validate(settings)]
|
|
|
|
|
|
|
|
assert (
|
|
|
|
server_settings is not None and model_settings is not None
|
|
|
|
), "server_settings and model_settings must be provided together"
|
|
|
|
|
|
|
|
set_server_settings(server_settings)
|
|
|
|
middleware = [Middleware(RawContextMiddleware, plugins=(RequestIdPlugin(),))]
|
|
|
|
app = FastAPI(
|
|
|
|
middleware=middleware,
|
|
|
|
title="🦙 llama.cpp Python API",
|
2024-01-25 16:23:18 +00:00
|
|
|
version=llama_cpp.__version__,
|
2023-12-22 10:51:25 +00:00
|
|
|
)
|
|
|
|
app.add_middleware(
|
|
|
|
CORSMiddleware,
|
|
|
|
allow_origins=["*"],
|
|
|
|
allow_credentials=True,
|
|
|
|
allow_methods=["*"],
|
|
|
|
allow_headers=["*"],
|
|
|
|
)
|
|
|
|
app.include_router(router)
|
|
|
|
|
|
|
|
assert model_settings is not None
|
|
|
|
set_llama_proxy(model_settings=model_settings)
|
|
|
|
|
2024-04-17 14:08:19 +00:00
|
|
|
if server_settings.disable_ping_events:
|
|
|
|
set_ping_message_factory(lambda: bytes())
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
return app
|
2023-05-16 21:22:00 +00:00
|
|
|
|
|
|
|
|
2023-07-16 05:57:39 +00:00
|
|
|
async def get_event_publisher(
|
|
|
|
request: Request,
|
|
|
|
inner_send_chan: MemoryObjectSendStream,
|
|
|
|
iterator: Iterator,
|
|
|
|
):
|
|
|
|
async with inner_send_chan:
|
|
|
|
try:
|
|
|
|
async for chunk in iterate_in_threadpool(iterator):
|
|
|
|
await inner_send_chan.send(dict(data=json.dumps(chunk)))
|
|
|
|
if await request.is_disconnected():
|
|
|
|
raise anyio.get_cancelled_exc_class()()
|
2023-12-22 10:51:25 +00:00
|
|
|
if (
|
|
|
|
next(get_server_settings()).interrupt_requests
|
|
|
|
and llama_outer_lock.locked()
|
|
|
|
):
|
2023-07-16 05:57:39 +00:00
|
|
|
await inner_send_chan.send(dict(data="[DONE]"))
|
|
|
|
raise anyio.get_cancelled_exc_class()()
|
|
|
|
await inner_send_chan.send(dict(data="[DONE]"))
|
|
|
|
except anyio.get_cancelled_exc_class() as e:
|
|
|
|
print("disconnected")
|
|
|
|
with anyio.move_on_after(1, shield=True):
|
2023-09-14 01:23:23 +00:00
|
|
|
print(f"Disconnected from client (via refresh/close) {request.client}")
|
2023-07-16 05:57:39 +00:00
|
|
|
raise e
|
|
|
|
|
2023-09-14 01:23:23 +00:00
|
|
|
|
2023-11-21 08:59:46 +00:00
|
|
|
def _logit_bias_tokens_to_input_ids(
|
2023-06-09 17:13:08 +00:00
|
|
|
llama: llama_cpp.Llama,
|
|
|
|
logit_bias: Dict[str, float],
|
2023-11-21 08:59:46 +00:00
|
|
|
) -> Dict[str, float]:
|
|
|
|
to_bias: Dict[str, float] = {}
|
|
|
|
for token, score in logit_bias.items():
|
|
|
|
token = token.encode("utf-8")
|
|
|
|
for input_id in llama.tokenize(token, add_bos=False, special=True):
|
|
|
|
to_bias[str(input_id)] = score
|
|
|
|
return to_bias
|
2023-06-09 17:13:08 +00:00
|
|
|
|
|
|
|
|
2023-12-21 18:44:49 +00:00
|
|
|
# Setup Bearer authentication scheme
|
|
|
|
bearer_scheme = HTTPBearer(auto_error=False)
|
|
|
|
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
async def authenticate(
|
|
|
|
settings: Settings = Depends(get_server_settings),
|
|
|
|
authorization: Optional[str] = Depends(bearer_scheme),
|
|
|
|
):
|
2023-12-21 18:44:49 +00:00
|
|
|
# Skip API key check if it's not set in settings
|
|
|
|
if settings.api_key is None:
|
|
|
|
return True
|
|
|
|
|
|
|
|
# check bearer credentials against the api_key
|
|
|
|
if authorization and authorization.credentials == settings.api_key:
|
|
|
|
# api key is valid
|
|
|
|
return authorization.credentials
|
|
|
|
|
|
|
|
# raise http error 401
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_401_UNAUTHORIZED,
|
|
|
|
detail="Invalid API key",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-03-09 02:09:00 +00:00
|
|
|
openai_v1_tag = "OpenAI V1"
|
|
|
|
|
|
|
|
|
2023-05-02 02:38:46 +00:00
|
|
|
@router.post(
|
2024-01-16 17:52:52 +00:00
|
|
|
"/v1/completions",
|
|
|
|
summary="Completion",
|
2024-02-28 19:27:40 +00:00
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
response_model=Union[
|
2024-01-16 17:52:52 +00:00
|
|
|
llama_cpp.CreateCompletionResponse,
|
|
|
|
str,
|
|
|
|
],
|
|
|
|
responses={
|
|
|
|
"200": {
|
|
|
|
"description": "Successful Response",
|
|
|
|
"content": {
|
|
|
|
"application/json": {
|
|
|
|
"schema": {
|
|
|
|
"anyOf": [
|
2024-02-28 19:27:40 +00:00
|
|
|
{"$ref": "#/components/schemas/CreateCompletionResponse"}
|
2024-01-16 17:52:52 +00:00
|
|
|
],
|
|
|
|
"title": "Completion response, when stream=False",
|
|
|
|
}
|
|
|
|
},
|
2024-02-28 19:27:40 +00:00
|
|
|
"text/event-stream": {
|
|
|
|
"schema": {
|
|
|
|
"type": "string",
|
|
|
|
"title": "Server Side Streaming response, when stream=True. "
|
|
|
|
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501
|
|
|
|
"example": """data: {... see CreateCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""",
|
2024-01-16 17:52:52 +00:00
|
|
|
}
|
2024-02-28 19:27:40 +00:00
|
|
|
},
|
2024-01-16 17:52:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
2024-03-09 02:09:00 +00:00
|
|
|
tags=[openai_v1_tag],
|
2023-12-22 10:51:25 +00:00
|
|
|
)
|
|
|
|
@router.post(
|
|
|
|
"/v1/engines/copilot-codex/completions",
|
|
|
|
include_in_schema=False,
|
|
|
|
dependencies=[Depends(authenticate)],
|
2024-03-09 02:09:00 +00:00
|
|
|
tags=[openai_v1_tag],
|
2023-04-29 05:43:37 +00:00
|
|
|
)
|
2023-05-27 13:12:58 +00:00
|
|
|
async def create_completion(
|
|
|
|
request: Request,
|
|
|
|
body: CreateCompletionRequest,
|
2023-12-22 10:51:25 +00:00
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
2023-07-14 03:25:12 +00:00
|
|
|
) -> llama_cpp.Completion:
|
2023-05-27 13:12:58 +00:00
|
|
|
if isinstance(body.prompt, list):
|
|
|
|
assert len(body.prompt) <= 1
|
|
|
|
body.prompt = body.prompt[0] if len(body.prompt) > 0 else ""
|
|
|
|
|
2023-12-22 10:51:25 +00:00
|
|
|
llama = llama_proxy(
|
|
|
|
body.model
|
|
|
|
if request.url.path != "/v1/engines/copilot-codex/completions"
|
|
|
|
else "copilot-codex"
|
|
|
|
)
|
|
|
|
|
2023-05-27 13:12:58 +00:00
|
|
|
exclude = {
|
|
|
|
"n",
|
|
|
|
"best_of",
|
2023-06-09 17:13:08 +00:00
|
|
|
"logit_bias_type",
|
2023-05-27 13:12:58 +00:00
|
|
|
"user",
|
|
|
|
}
|
2023-07-14 03:25:12 +00:00
|
|
|
kwargs = body.model_dump(exclude=exclude)
|
2023-06-09 17:13:08 +00:00
|
|
|
|
|
|
|
if body.logit_bias is not None:
|
2023-11-21 08:59:46 +00:00
|
|
|
kwargs["logit_bias"] = (
|
|
|
|
_logit_bias_tokens_to_input_ids(llama, body.logit_bias)
|
|
|
|
if body.logit_bias_type == "tokens"
|
|
|
|
else body.logit_bias
|
2023-07-19 07:48:27 +00:00
|
|
|
)
|
2023-06-09 17:13:08 +00:00
|
|
|
|
2023-11-01 22:51:12 +00:00
|
|
|
if body.grammar is not None:
|
|
|
|
kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar)
|
|
|
|
|
2023-09-14 01:23:23 +00:00
|
|
|
iterator_or_completion: Union[
|
2023-11-21 09:02:20 +00:00
|
|
|
llama_cpp.CreateCompletionResponse,
|
|
|
|
Iterator[llama_cpp.CreateCompletionStreamResponse],
|
2023-09-14 01:23:23 +00:00
|
|
|
] = await run_in_threadpool(llama, **kwargs)
|
2023-05-27 13:12:58 +00:00
|
|
|
|
2023-07-16 05:57:39 +00:00
|
|
|
if isinstance(iterator_or_completion, Iterator):
|
|
|
|
# EAFP: It's easier to ask for forgiveness than permission
|
|
|
|
first_response = await run_in_threadpool(next, iterator_or_completion)
|
2023-05-19 06:04:30 +00:00
|
|
|
|
2023-07-16 05:57:39 +00:00
|
|
|
# If no exception was raised from first_response, we can assume that
|
|
|
|
# the iterator is valid and we can use it to stream the response.
|
2023-11-08 03:48:51 +00:00
|
|
|
def iterator() -> Iterator[llama_cpp.CreateCompletionStreamResponse]:
|
2023-07-16 05:57:39 +00:00
|
|
|
yield first_response
|
|
|
|
yield from iterator_or_completion
|
|
|
|
|
|
|
|
send_chan, recv_chan = anyio.create_memory_object_stream(10)
|
2023-05-27 13:12:58 +00:00
|
|
|
return EventSourceResponse(
|
2023-09-14 01:23:23 +00:00
|
|
|
recv_chan,
|
|
|
|
data_sender_callable=partial( # type: ignore
|
2023-07-16 05:57:39 +00:00
|
|
|
get_event_publisher,
|
|
|
|
request=request,
|
|
|
|
inner_send_chan=send_chan,
|
|
|
|
iterator=iterator(),
|
2023-09-14 01:23:23 +00:00
|
|
|
),
|
2024-02-28 19:27:40 +00:00
|
|
|
sep="\n",
|
2024-04-17 14:08:19 +00:00
|
|
|
ping_message_factory=_ping_message_factory,
|
2023-07-16 05:57:39 +00:00
|
|
|
)
|
2023-05-27 13:12:58 +00:00
|
|
|
else:
|
2023-07-16 05:57:39 +00:00
|
|
|
return iterator_or_completion
|
2023-04-29 05:43:37 +00:00
|
|
|
|
|
|
|
|
2023-05-02 02:38:46 +00:00
|
|
|
@router.post(
|
2024-03-09 02:09:00 +00:00
|
|
|
"/v1/embeddings",
|
|
|
|
summary="Embedding",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
tags=[openai_v1_tag],
|
2023-04-29 05:43:37 +00:00
|
|
|
)
|
2023-05-27 13:12:58 +00:00
|
|
|
async def create_embedding(
|
2023-12-21 18:44:49 +00:00
|
|
|
request: CreateEmbeddingRequest,
|
2023-12-22 10:51:25 +00:00
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
2023-04-29 05:43:37 +00:00
|
|
|
):
|
2023-05-27 13:12:58 +00:00
|
|
|
return await run_in_threadpool(
|
2023-12-22 10:51:25 +00:00
|
|
|
llama_proxy(request.model).create_embedding,
|
|
|
|
**request.model_dump(exclude={"user"}),
|
2023-05-27 13:12:58 +00:00
|
|
|
)
|
2023-04-29 05:43:37 +00:00
|
|
|
|
|
|
|
|
2023-05-02 02:38:46 +00:00
|
|
|
@router.post(
|
2024-02-28 19:27:40 +00:00
|
|
|
"/v1/chat/completions",
|
|
|
|
summary="Chat",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
response_model=Union[llama_cpp.ChatCompletion, str],
|
2024-01-16 17:52:52 +00:00
|
|
|
responses={
|
|
|
|
"200": {
|
|
|
|
"description": "Successful Response",
|
|
|
|
"content": {
|
|
|
|
"application/json": {
|
|
|
|
"schema": {
|
|
|
|
"anyOf": [
|
2024-02-28 19:27:40 +00:00
|
|
|
{
|
|
|
|
"$ref": "#/components/schemas/CreateChatCompletionResponse"
|
|
|
|
}
|
2024-01-16 17:52:52 +00:00
|
|
|
],
|
|
|
|
"title": "Completion response, when stream=False",
|
|
|
|
}
|
|
|
|
},
|
2024-02-28 19:27:40 +00:00
|
|
|
"text/event-stream": {
|
|
|
|
"schema": {
|
|
|
|
"type": "string",
|
|
|
|
"title": "Server Side Streaming response, when stream=True"
|
|
|
|
+ "See SSE format: https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format", # noqa: E501
|
|
|
|
"example": """data: {... see CreateChatCompletionResponse ...} \\n\\n data: ... \\n\\n ... data: [DONE]""",
|
2024-01-16 17:52:52 +00:00
|
|
|
}
|
2024-02-28 19:27:40 +00:00
|
|
|
},
|
2024-01-16 17:52:52 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
2024-03-09 02:09:00 +00:00
|
|
|
tags=[openai_v1_tag],
|
2023-04-29 05:43:37 +00:00
|
|
|
)
|
2023-05-27 13:12:58 +00:00
|
|
|
async def create_chat_completion(
|
|
|
|
request: Request,
|
2024-03-19 14:52:53 +00:00
|
|
|
body: CreateChatCompletionRequest = Body(
|
|
|
|
openapi_examples={
|
|
|
|
"normal": {
|
|
|
|
"summary": "Chat Completion",
|
|
|
|
"value": {
|
|
|
|
"model": "gpt-3.5-turbo",
|
|
|
|
"messages": [
|
|
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
|
|
{"role": "user", "content": "What is the capital of France?"},
|
|
|
|
],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"json_mode": {
|
|
|
|
"summary": "JSON Mode",
|
|
|
|
"value": {
|
|
|
|
"model": "gpt-3.5-turbo",
|
|
|
|
"messages": [
|
|
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
|
|
{"role": "user", "content": "Who won the world series in 2020"},
|
|
|
|
],
|
|
|
|
"response_format": { "type": "json_object" }
|
|
|
|
},
|
|
|
|
},
|
|
|
|
"tool_calling": {
|
|
|
|
"summary": "Tool Calling",
|
|
|
|
"value": {
|
|
|
|
"model": "gpt-3.5-turbo",
|
|
|
|
"messages": [
|
|
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
|
|
{"role": "user", "content": "Extract Jason is 30 years old."},
|
|
|
|
],
|
|
|
|
"tools": [
|
|
|
|
{
|
|
|
|
"type": "function",
|
|
|
|
"function": {
|
|
|
|
"name": "User",
|
|
|
|
"description": "User record",
|
|
|
|
"parameters": {
|
|
|
|
"type": "object",
|
|
|
|
"properties": {
|
|
|
|
"name": {"type": "string"},
|
|
|
|
"age": {"type": "number"},
|
|
|
|
},
|
|
|
|
"required": ["name", "age"],
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"tool_choice": {
|
|
|
|
"type": "function",
|
|
|
|
"function": {
|
|
|
|
"name": "User",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2024-03-31 17:30:13 +00:00
|
|
|
"logprobs": {
|
|
|
|
"summary": "Logprobs",
|
|
|
|
"value": {
|
|
|
|
"model": "gpt-3.5-turbo",
|
|
|
|
"messages": [
|
|
|
|
{"role": "system", "content": "You are a helpful assistant."},
|
|
|
|
{"role": "user", "content": "What is the capital of France?"},
|
|
|
|
],
|
|
|
|
"logprobs": True,
|
|
|
|
"top_logprobs": 10
|
|
|
|
},
|
|
|
|
},
|
2024-03-19 14:52:53 +00:00
|
|
|
}
|
|
|
|
),
|
2023-12-22 10:51:25 +00:00
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
2023-07-14 03:25:12 +00:00
|
|
|
) -> llama_cpp.ChatCompletion:
|
2023-05-27 13:12:58 +00:00
|
|
|
exclude = {
|
|
|
|
"n",
|
2023-06-09 17:13:08 +00:00
|
|
|
"logit_bias_type",
|
2023-05-27 13:12:58 +00:00
|
|
|
"user",
|
|
|
|
}
|
2023-07-14 03:25:12 +00:00
|
|
|
kwargs = body.model_dump(exclude=exclude)
|
2023-12-22 10:51:25 +00:00
|
|
|
llama = llama_proxy(body.model)
|
2023-06-09 17:13:08 +00:00
|
|
|
if body.logit_bias is not None:
|
2023-11-21 08:59:46 +00:00
|
|
|
kwargs["logit_bias"] = (
|
|
|
|
_logit_bias_tokens_to_input_ids(llama, body.logit_bias)
|
|
|
|
if body.logit_bias_type == "tokens"
|
|
|
|
else body.logit_bias
|
2023-07-19 07:48:27 +00:00
|
|
|
)
|
2023-06-09 17:13:08 +00:00
|
|
|
|
2023-11-01 22:51:12 +00:00
|
|
|
if body.grammar is not None:
|
|
|
|
kwargs["grammar"] = llama_cpp.LlamaGrammar.from_string(body.grammar)
|
|
|
|
|
2023-09-14 01:23:23 +00:00
|
|
|
iterator_or_completion: Union[
|
|
|
|
llama_cpp.ChatCompletion, Iterator[llama_cpp.ChatCompletionChunk]
|
|
|
|
] = await run_in_threadpool(llama.create_chat_completion, **kwargs)
|
2023-05-27 13:12:58 +00:00
|
|
|
|
2023-07-16 05:57:39 +00:00
|
|
|
if isinstance(iterator_or_completion, Iterator):
|
|
|
|
# EAFP: It's easier to ask for forgiveness than permission
|
|
|
|
first_response = await run_in_threadpool(next, iterator_or_completion)
|
|
|
|
|
|
|
|
# If no exception was raised from first_response, we can assume that
|
|
|
|
# the iterator is valid and we can use it to stream the response.
|
|
|
|
def iterator() -> Iterator[llama_cpp.ChatCompletionChunk]:
|
|
|
|
yield first_response
|
|
|
|
yield from iterator_or_completion
|
2023-04-29 05:43:37 +00:00
|
|
|
|
2023-07-16 05:57:39 +00:00
|
|
|
send_chan, recv_chan = anyio.create_memory_object_stream(10)
|
2023-04-29 05:43:37 +00:00
|
|
|
return EventSourceResponse(
|
2023-09-14 01:23:23 +00:00
|
|
|
recv_chan,
|
|
|
|
data_sender_callable=partial( # type: ignore
|
2023-07-16 05:57:39 +00:00
|
|
|
get_event_publisher,
|
|
|
|
request=request,
|
|
|
|
inner_send_chan=send_chan,
|
|
|
|
iterator=iterator(),
|
2023-09-14 01:23:23 +00:00
|
|
|
),
|
2024-02-28 19:27:40 +00:00
|
|
|
sep="\n",
|
2024-04-17 14:08:19 +00:00
|
|
|
ping_message_factory=_ping_message_factory,
|
2023-04-29 05:43:37 +00:00
|
|
|
)
|
2023-07-16 05:57:39 +00:00
|
|
|
else:
|
|
|
|
return iterator_or_completion
|
2023-04-29 05:43:37 +00:00
|
|
|
|
|
|
|
|
2024-03-09 02:09:00 +00:00
|
|
|
@router.get(
|
|
|
|
"/v1/models",
|
|
|
|
summary="Models",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
tags=[openai_v1_tag],
|
|
|
|
)
|
2023-05-27 13:12:58 +00:00
|
|
|
async def get_models(
|
2023-12-22 10:51:25 +00:00
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
2023-05-08 00:17:52 +00:00
|
|
|
) -> ModelList:
|
2023-04-29 05:43:37 +00:00
|
|
|
return {
|
|
|
|
"object": "list",
|
|
|
|
"data": [
|
|
|
|
{
|
2023-12-22 10:51:25 +00:00
|
|
|
"id": model_alias,
|
2023-04-29 05:43:37 +00:00
|
|
|
"object": "model",
|
|
|
|
"owned_by": "me",
|
|
|
|
"permissions": [],
|
|
|
|
}
|
2023-12-22 10:51:25 +00:00
|
|
|
for model_alias in llama_proxy
|
2023-04-29 05:43:37 +00:00
|
|
|
],
|
|
|
|
}
|
2024-03-09 02:09:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
extras_tag = "Extras"
|
|
|
|
|
|
|
|
|
|
|
|
@router.post(
|
|
|
|
"/extras/tokenize",
|
|
|
|
summary="Tokenize",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
tags=[extras_tag],
|
|
|
|
)
|
|
|
|
async def tokenize(
|
|
|
|
body: TokenizeInputRequest,
|
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
|
|
|
) -> TokenizeInputResponse:
|
|
|
|
tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True)
|
|
|
|
|
2024-03-23 21:14:15 +00:00
|
|
|
return TokenizeInputResponse(tokens=tokens)
|
2024-03-09 02:09:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
@router.post(
|
|
|
|
"/extras/tokenize/count",
|
|
|
|
summary="Tokenize Count",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
tags=[extras_tag],
|
|
|
|
)
|
|
|
|
async def count_query_tokens(
|
|
|
|
body: TokenizeInputRequest,
|
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
|
|
|
) -> TokenizeInputCountResponse:
|
|
|
|
tokens = llama_proxy(body.model).tokenize(body.input.encode("utf-8"), special=True)
|
|
|
|
|
2024-03-23 21:14:15 +00:00
|
|
|
return TokenizeInputCountResponse(count=len(tokens))
|
2024-03-09 02:09:00 +00:00
|
|
|
|
|
|
|
|
|
|
|
@router.post(
|
|
|
|
"/extras/detokenize",
|
|
|
|
summary="Detokenize",
|
|
|
|
dependencies=[Depends(authenticate)],
|
|
|
|
tags=[extras_tag],
|
|
|
|
)
|
|
|
|
async def detokenize(
|
|
|
|
body: DetokenizeInputRequest,
|
|
|
|
llama_proxy: LlamaProxy = Depends(get_llama_proxy),
|
|
|
|
) -> DetokenizeInputResponse:
|
|
|
|
text = llama_proxy(body.model).detokenize(body.tokens).decode("utf-8")
|
|
|
|
|
2024-03-23 21:14:15 +00:00
|
|
|
return DetokenizeInputResponse(text=text)
|