2023-03-24 23:10:31 +00:00
|
|
|
"""Example FastAPI server for llama.cpp.
|
|
|
|
"""
|
2023-04-01 17:05:20 +00:00
|
|
|
import json
|
|
|
|
from typing import List, Optional, Iterator
|
2023-03-24 05:41:24 +00:00
|
|
|
|
2023-04-01 17:05:20 +00:00
|
|
|
import llama_cpp
|
2023-03-24 05:41:24 +00:00
|
|
|
|
|
|
|
from fastapi import FastAPI
|
2023-04-01 17:05:20 +00:00
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict
|
|
|
|
from sse_starlette.sse import EventSourceResponse
|
2023-03-24 05:41:24 +00:00
|
|
|
|
2023-03-24 18:35:41 +00:00
|
|
|
|
2023-03-24 05:41:24 +00:00
|
|
|
class Settings(BaseSettings):
|
|
|
|
model: str
|
|
|
|
|
2023-03-24 18:35:41 +00:00
|
|
|
|
2023-03-24 05:41:24 +00:00
|
|
|
app = FastAPI(
|
|
|
|
title="🦙 llama.cpp Python API",
|
|
|
|
version="0.0.1",
|
|
|
|
)
|
2023-04-01 17:05:20 +00:00
|
|
|
app.add_middleware(
|
|
|
|
CORSMiddleware,
|
|
|
|
allow_origins=["*"],
|
|
|
|
allow_credentials=True,
|
|
|
|
allow_methods=["*"],
|
|
|
|
allow_headers=["*"],
|
|
|
|
)
|
2023-03-24 05:41:24 +00:00
|
|
|
settings = Settings()
|
2023-04-01 17:05:20 +00:00
|
|
|
llama = llama_cpp.Llama(
|
|
|
|
settings.model,
|
|
|
|
f16_kv=True,
|
|
|
|
use_mlock=True,
|
2023-04-01 19:12:25 +00:00
|
|
|
embedding=True,
|
2023-04-01 17:05:20 +00:00
|
|
|
n_threads=6,
|
|
|
|
n_batch=2048,
|
|
|
|
)
|
2023-03-24 05:41:24 +00:00
|
|
|
|
2023-03-24 18:35:41 +00:00
|
|
|
|
2023-04-01 17:05:20 +00:00
|
|
|
class CreateCompletionRequest(BaseModel):
|
2023-03-24 05:41:24 +00:00
|
|
|
prompt: str
|
|
|
|
suffix: Optional[str] = Field(None)
|
|
|
|
max_tokens: int = 16
|
|
|
|
temperature: float = 0.8
|
|
|
|
top_p: float = 0.95
|
|
|
|
logprobs: Optional[int] = Field(None)
|
|
|
|
echo: bool = False
|
|
|
|
stop: List[str] = []
|
|
|
|
repeat_penalty: float = 1.1
|
|
|
|
top_k: int = 40
|
2023-04-01 17:05:20 +00:00
|
|
|
stream: bool = False
|
2023-03-24 05:41:24 +00:00
|
|
|
|
|
|
|
class Config:
|
|
|
|
schema_extra = {
|
|
|
|
"example": {
|
2023-03-24 18:34:15 +00:00
|
|
|
"prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n",
|
2023-03-24 18:35:41 +00:00
|
|
|
"stop": ["\n", "###"],
|
2023-03-24 05:41:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-04-01 17:05:20 +00:00
|
|
|
CreateCompletionResponse = create_model_from_typeddict(llama_cpp.Completion)
|
|
|
|
|
|
|
|
|
|
|
|
@app.post(
|
|
|
|
"/v1/completions",
|
|
|
|
response_model=CreateCompletionResponse,
|
|
|
|
)
|
|
|
|
def create_completion(request: CreateCompletionRequest):
|
|
|
|
if request.stream:
|
|
|
|
chunks: Iterator[llama_cpp.CompletionChunk] = llama(**request.dict()) # type: ignore
|
|
|
|
return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in chunks)
|
2023-03-24 18:35:41 +00:00
|
|
|
return llama(**request.dict())
|
2023-04-01 17:05:20 +00:00
|
|
|
|
|
|
|
|
|
|
|
class CreateEmbeddingRequest(BaseModel):
|
|
|
|
model: Optional[str]
|
|
|
|
input: str
|
|
|
|
user: Optional[str]
|
|
|
|
|
|
|
|
class Config:
|
|
|
|
schema_extra = {
|
|
|
|
"example": {
|
|
|
|
"input": "The food was delicious and the waiter...",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
CreateEmbeddingResponse = create_model_from_typeddict(llama_cpp.Embedding)
|
|
|
|
|
|
|
|
|
|
|
|
@app.post(
|
|
|
|
"/v1/embeddings",
|
|
|
|
response_model=CreateEmbeddingResponse,
|
|
|
|
)
|
|
|
|
def create_embedding(request: CreateEmbeddingRequest):
|
2023-04-01 19:12:25 +00:00
|
|
|
return llama.create_embedding(request.input)
|