diff --git a/examples/langchain_custom_llm.py b/examples/langchain_custom_llm.py new file mode 100644 index 0000000..b170d5c --- /dev/null +++ b/examples/langchain_custom_llm.py @@ -0,0 +1,29 @@ +from llama_cpp import Llama + +from langchain.llms.base import LLM +from typing import Optional, List, Mapping, Any + +class LlamaLLM(LLM): + model_path: str + llm: Llama + + @property + def _llm_type(self) -> str: + return "llama-cpp-python" + + def __init__(self, model_path: str, **kwargs: Any): + model_path = model_path + llm = Llama(model_path=model_path) + super().__init__(model_path=model_path, llm=llm, **kwargs) + + def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str: + response = self.llm(prompt, stop=stop or []) + return response["choices"][0]["text"] + + @property + def _identifying_params(self) -> Mapping[str, Any]: + return {"model_path": self.model_path} + +llm = LlamaLLM(model_path="models/...") + +print(llm("Question: What is the capital of France? Answer: ", stop=["Question:", "\n"])) \ No newline at end of file