48 lines
960 B
Python
48 lines
960 B
Python
"""Example FastAPI server for llama.cpp.
|
|
|
|
To run this example:
|
|
|
|
```bash
|
|
pip install fastapi uvicorn sse-starlette
|
|
export MODEL=../models/7B/...
|
|
```
|
|
|
|
Then run:
|
|
```
|
|
uvicorn llama_cpp.server.app:app --reload
|
|
```
|
|
|
|
or
|
|
|
|
```
|
|
python3 -m llama_cpp.server
|
|
```
|
|
|
|
Then visit http://localhost:8000/docs to see the interactive API docs.
|
|
|
|
"""
|
|
import os
|
|
import argparse
|
|
|
|
import uvicorn
|
|
|
|
from llama_cpp.server.app import create_app, Settings
|
|
|
|
if __name__ == "__main__":
|
|
parser = argparse.ArgumentParser()
|
|
for name, field in Settings.__fields__.items():
|
|
parser.add_argument(
|
|
f"--{name}",
|
|
dest=name,
|
|
type=field.type_,
|
|
default=field.default,
|
|
help=field.field_info.description,
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
settings = Settings(**vars(args))
|
|
app = create_app(settings=settings)
|
|
|
|
uvicorn.run(
|
|
app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000))
|
|
)
|