From 868033220375e5e93569f38ef779288545cf0d35 Mon Sep 17 00:00:00 2001 From: Andrei Betlen Date: Thu, 23 Mar 2023 23:12:42 -0400 Subject: [PATCH] Update examples --- ...c.py => high_level_api_basic_inference.py} | 4 +-- examples/langchain_custom_llm.py | 26 +++++++++++++++++-- 2 files changed, 26 insertions(+), 4 deletions(-) rename examples/{basic.py => high_level_api_basic_inference.py} (82%) diff --git a/examples/basic.py b/examples/high_level_api_basic_inference.py similarity index 82% rename from examples/basic.py rename to examples/high_level_api_basic_inference.py index c676e55..f6f36d2 100644 --- a/examples/basic.py +++ b/examples/high_level_api_basic_inference.py @@ -4,11 +4,11 @@ import argparse from llama_cpp import Llama parser = argparse.ArgumentParser() -parser.add_argument("-m", "--model", type=str, default="../models/...") +parser.add_argument("-m", "--model", type=str, default=".//models/...") args = parser.parse_args() llm = Llama(model_path=args.model) -output = llm("Question: What are the names of the planets in the solar system? Answer: ", max_tokens=48, stop=["Q:", "\n"], echo=False) +output = llm("Question: What are the names of the planets in the solar system? Answer: ", max_tokens=48, stop=["Q:", "\n"], echo=True) print(json.dumps(output, indent=2)) \ No newline at end of file diff --git a/examples/langchain_custom_llm.py b/examples/langchain_custom_llm.py index b170d5c..5d4806d 100644 --- a/examples/langchain_custom_llm.py +++ b/examples/langchain_custom_llm.py @@ -1,3 +1,5 @@ +import argparse + from llama_cpp import Llama from langchain.llms.base import LLM @@ -24,6 +26,26 @@ class LlamaLLM(LLM): def _identifying_params(self) -> Mapping[str, Any]: return {"model_path": self.model_path} -llm = LlamaLLM(model_path="models/...") +parser = argparse.ArgumentParser() +parser.add_argument("-m", "--model", type=str, default="./models/...") +args = parser.parse_args() -print(llm("Question: What is the capital of France? Answer: ", stop=["Question:", "\n"])) \ No newline at end of file +# Load the model +llm = LlamaLLM(model_path=args.model) + +# Basic Q&A +answer = llm("Question: What is the capital of France? Answer: ", stop=["Question:", "\n"]) +print(f"Answer: {answer.strip()}") + +# Using in a chain +from langchain.prompts import PromptTemplate +from langchain.chains import LLMChain + +prompt = PromptTemplate( + input_variables=["product"], + template="\n\n### Instruction:\nWrite a good name for a company that makes {product}\n\n### Response:\n", +) +chain = LLMChain(llm=llm, prompt=prompt) + +# Run the chain only specifying the input variable. +print(chain.run("colorful socks")) \ No newline at end of file