take full path to the model in the api

This commit is contained in:
Bruce MacDonald 2023-06-26 18:08:16 -04:00
parent df5fdd6647
commit 41419f7577

View file

@ -17,16 +17,16 @@ lock = threading.Lock()
def load(model): def load(model):
with lock: with lock:
if not os.path.exists(f"./models/{model}.bin"): if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."} return {"error": "The model does not exist."}
if model not in llms: if model not in llms:
llms[model] = Llama(model_path=f"./models/{model}.bin") llms[model] = Llama(model_path=f"{model}")
return None return None
def unload(model): def unload(model):
with lock: with lock:
if not os.path.exists(f"./models/{model}.bin"): if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."} return {"error": "The model does not exist."}
llms.pop(model, None) llms.pop(model, None)
return None return None
@ -89,7 +89,7 @@ def generate_route_handler():
return Response("Model is required", status=400) return Response("Model is required", status=400)
if not prompt: if not prompt:
return Response("Prompt is required", status=400) return Response("Prompt is required", status=400)
if not os.path.exists(f"./models/{model}.bin"): if not os.path.exists(f"{model}"):
return {"error": "The model does not exist."}, 400 return {"error": "The model does not exist."}, 400
return Response( return Response(
stream_with_context(query(model, prompt)), mimetype="text/event-stream" stream_with_context(query(model, prompt)), mimetype="text/event-stream"