restructure proto.py
This commit is contained in:
parent
23c645388c
commit
2e99e7d5cb
1 changed files with 57 additions and 37 deletions
108
proto.py
108
proto.py
|
@ -1,5 +1,6 @@
|
|||
import json
|
||||
import os
|
||||
import threading
|
||||
from llama_cpp import Llama
|
||||
from flask import Flask, Response, stream_with_context, request
|
||||
from flask_cors import CORS
|
||||
|
@ -9,57 +10,31 @@ CORS(app) # enable CORS for all routes
|
|||
|
||||
# llms tracks which models are loaded
|
||||
llms = {}
|
||||
lock = threading.Lock()
|
||||
|
||||
|
||||
@app.route("/load", methods=["POST"])
|
||||
def load():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
def load(model):
|
||||
with lock:
|
||||
if not os.path.exists(f"./models/{model}.bin"):
|
||||
return {"error": "The model does not exist."}, 400
|
||||
|
||||
return {"error": "The model does not exist."}
|
||||
if model not in llms:
|
||||
llms[model] = Llama(model_path=f"./models/{model}.bin")
|
||||
|
||||
return Response(status=204)
|
||||
return None
|
||||
|
||||
|
||||
@app.route("/unload", methods=["POST"])
|
||||
def unload():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
def unload(model):
|
||||
with lock:
|
||||
if not os.path.exists(f"./models/{model}.bin"):
|
||||
return {"error": "The model does not exist."}, 400
|
||||
|
||||
return {"error": "The model does not exist."}
|
||||
llms.pop(model, None)
|
||||
|
||||
return Response(status=204)
|
||||
return None
|
||||
|
||||
|
||||
@app.route("/generate", methods=["POST"])
|
||||
def generate():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
prompt = data.get("prompt")
|
||||
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
if not prompt:
|
||||
return Response("Prompt is required", status=400)
|
||||
if not os.path.exists(f"./models/{model}.bin"):
|
||||
return {"error": "The model does not exist."}, 400
|
||||
|
||||
if model not in llms:
|
||||
def generate(model, prompt):
|
||||
# auto load
|
||||
llms[model] = Llama(model_path=f"./models/{model}.bin")
|
||||
|
||||
def stream_response():
|
||||
error = load(model)
|
||||
if error is not None:
|
||||
return error
|
||||
stream = llms[model](
|
||||
str(prompt), # TODO: optimize prompt based on model
|
||||
max_tokens=4096,
|
||||
|
@ -70,16 +45,61 @@ def generate():
|
|||
for output in stream:
|
||||
yield json.dumps(output)
|
||||
|
||||
return Response(
|
||||
stream_with_context(stream_response()), mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
@app.route("/models", methods=["GET"])
|
||||
def models():
|
||||
all_files = os.listdir("./models")
|
||||
bin_files = [file.replace(".bin", "") for file in all_files if file.endswith(".bin")]
|
||||
bin_files = [
|
||||
file.replace(".bin", "") for file in all_files if file.endswith(".bin")
|
||||
]
|
||||
return bin_files
|
||||
|
||||
|
||||
@app.route("/load", methods=["POST"])
|
||||
def load_route_handler():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
error = load(model)
|
||||
if error is not None:
|
||||
return error
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@app.route("/unload", methods=["POST"])
|
||||
def unload_route_handler():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
error = unload(model)
|
||||
if error is not None:
|
||||
return error
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@app.route("/generate", methods=["POST"])
|
||||
def generate_route_handler():
|
||||
data = request.get_json()
|
||||
model = data.get("model")
|
||||
prompt = data.get("prompt")
|
||||
if not model:
|
||||
return Response("Model is required", status=400)
|
||||
if not prompt:
|
||||
return Response("Prompt is required", status=400)
|
||||
if not os.path.exists(f"./models/{model}.bin"):
|
||||
return {"error": "The model does not exist."}, 400
|
||||
return Response(
|
||||
stream_with_context(generate(model, prompt)), mimetype="text/event-stream"
|
||||
)
|
||||
|
||||
|
||||
@app.route("/models", methods=["GET"])
|
||||
def models_route_handler():
|
||||
bin_files = models()
|
||||
return Response(json.dumps(bin_files), mimetype="application/json")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(debug=True, threaded=True, port=5001)
|
||||
app.run()
|
||||
|
|
Loading…
Reference in a new issue