Update and Fix example models (#6065)
* Update example models * Remove unused README.md
This commit is contained in:
parent
1a83581a8e
commit
0be8baad2b
19 changed files with 32 additions and 24 deletions
|
@ -35,7 +35,7 @@ func main() {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
req := &api.ChatRequest{
|
req := &api.ChatRequest{
|
||||||
Model: "llama3",
|
Model: "llama3.1",
|
||||||
Messages: messages,
|
Messages: messages,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ func main() {
|
||||||
|
|
||||||
// By default, GenerateRequest is streaming.
|
// By default, GenerateRequest is streaming.
|
||||||
req := &api.GenerateRequest{
|
req := &api.GenerateRequest{
|
||||||
Model: "gemma",
|
Model: "gemma2",
|
||||||
Prompt: "how many planets are there?",
|
Prompt: "how many planets are there?",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
req := &api.GenerateRequest{
|
req := &api.GenerateRequest{
|
||||||
Model: "gemma",
|
Model: "gemma2",
|
||||||
Prompt: "how many planets are there?",
|
Prompt: "how many planets are there?",
|
||||||
|
|
||||||
// set streaming to false
|
// set streaming to false
|
||||||
|
|
|
@ -4,6 +4,14 @@ This example provides an interface for asking questions to a PDF document.
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
|
1. Ensure you have the `llama3.1` model installed:
|
||||||
|
|
||||||
|
```
|
||||||
|
ollama pull llama3.1
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Install the Python Requirements.
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install -r requirements.txt
|
pip install -r requirements.txt
|
||||||
```
|
```
|
||||||
|
|
|
@ -51,7 +51,7 @@ while True:
|
||||||
template=template,
|
template=template,
|
||||||
)
|
)
|
||||||
|
|
||||||
llm = Ollama(model="llama3:8b", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
llm = Ollama(model="llama3.1", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
|
||||||
qa_chain = RetrievalQA.from_chain_type(
|
qa_chain = RetrievalQA.from_chain_type(
|
||||||
llm,
|
llm,
|
||||||
retriever=vectorstore.as_retriever(),
|
retriever=vectorstore.as_retriever(),
|
||||||
|
|
|
@ -4,10 +4,10 @@ This example summarizes the website, [https://ollama.com/blog/run-llama2-uncenso
|
||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama2` model installed:
|
1. Ensure you have the `llama3.1` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama2
|
ollama pull llama3.1
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|
|
@ -5,8 +5,8 @@ from langchain.chains.summarize import load_summarize_chain
|
||||||
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
|
loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
|
||||||
docs = loader.load()
|
docs = loader.load()
|
||||||
|
|
||||||
llm = Ollama(model="llama3")
|
llm = Ollama(model="llama3.1")
|
||||||
chain = load_summarize_chain(llm, chain_type="stuff")
|
chain = load_summarize_chain(llm, chain_type="stuff")
|
||||||
|
|
||||||
result = chain.invoke(docs)
|
result = chain.invoke(docs)
|
||||||
print(result)
|
print(result)
|
||||||
|
|
|
@ -4,10 +4,10 @@ This example is a basic "hello world" of using LangChain with Ollama.
|
||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama3` model installed:
|
1. Ensure you have the `llama3.1` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama3
|
ollama pull llama3.1
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from langchain.llms import Ollama
|
from langchain.llms import Ollama
|
||||||
|
|
||||||
input = input("What is your question?")
|
input = input("What is your question?")
|
||||||
llm = Ollama(model="llama3")
|
llm = Ollama(model="llama3.1")
|
||||||
res = llm.predict(input)
|
res = llm.predict(input)
|
||||||
print (res)
|
print (res)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM llama3
|
FROM llama3.1
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
SYSTEM """
|
SYSTEM """
|
||||||
You are Mario from super mario bros, acting as an assistant.
|
You are Mario from super mario bros, acting as an assistant.
|
||||||
|
|
|
@ -2,12 +2,12 @@
|
||||||
|
|
||||||
# Example character: Mario
|
# Example character: Mario
|
||||||
|
|
||||||
This example shows how to create a basic character using Llama3 as the base model.
|
This example shows how to create a basic character using Llama3.1 as the base model.
|
||||||
|
|
||||||
To run this example:
|
To run this example:
|
||||||
|
|
||||||
1. Download the Modelfile
|
1. Download the Modelfile
|
||||||
2. `ollama pull llama3` to get the base model used in the model file.
|
2. `ollama pull llama3.1` to get the base model used in the model file.
|
||||||
3. `ollama create NAME -f ./Modelfile`
|
3. `ollama create NAME -f ./Modelfile`
|
||||||
4. `ollama run NAME`
|
4. `ollama run NAME`
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ Ask it some questions like "Who are you?" or "Is Peach in trouble again?"
|
||||||
What the model file looks like:
|
What the model file looks like:
|
||||||
|
|
||||||
```
|
```
|
||||||
FROM llama3
|
FROM llama3.1
|
||||||
PARAMETER temperature 1
|
PARAMETER temperature 1
|
||||||
SYSTEM """
|
SYSTEM """
|
||||||
You are Mario from Super Mario Bros, acting as an assistant.
|
You are Mario from Super Mario Bros, acting as an assistant.
|
||||||
|
|
|
@ -4,7 +4,7 @@ imageName = input("Enter the name of the image: ")
|
||||||
client = docker.from_env()
|
client = docker.from_env()
|
||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
output=""
|
output=""
|
||||||
with s.post('http://localhost:11434/api/generate', json={'model': 'dockerit', 'prompt': inputDescription}, stream=True) as r:
|
with s.post('http://localhost:11434/api/generate', json={'model': 'mattw/dockerit', 'prompt': inputDescription}, stream=True) as r:
|
||||||
for line in r.iter_lines():
|
for line in r.iter_lines():
|
||||||
if line:
|
if line:
|
||||||
j = json.loads(line)
|
j = json.loads(line)
|
||||||
|
|
|
@ -2,7 +2,7 @@ import requests
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
|
|
||||||
model = "llama3"
|
model = "llama3.1"
|
||||||
template = {
|
template = {
|
||||||
"firstName": "",
|
"firstName": "",
|
||||||
"lastName": "",
|
"lastName": "",
|
||||||
|
|
|
@ -12,7 +12,7 @@ countries = [
|
||||||
"France",
|
"France",
|
||||||
]
|
]
|
||||||
country = random.choice(countries)
|
country = random.choice(countries)
|
||||||
model = "llama3"
|
model = "llama3.1"
|
||||||
|
|
||||||
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
prompt = f"generate one realistically believable sample data set of a persons first name, last name, address in {country}, and phone number. Do not use common names. Respond using JSON. Key names should have no backslashes, values should use plain ascii with no special characters."
|
||||||
|
|
||||||
|
|
|
@ -6,10 +6,10 @@ There are two python scripts in this example. `randomaddresses.py` generates ran
|
||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama3` model installed:
|
1. Ensure you have the `llama3.1` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama3
|
ollama pull llama3.1
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
# NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve`
|
||||||
model = "llama3" # TODO: update this for whatever model you wish to use
|
model = "llama3.1" # TODO: update this for whatever model you wish to use
|
||||||
|
|
||||||
|
|
||||||
def chat(messages):
|
def chat(messages):
|
||||||
|
|
|
@ -4,10 +4,10 @@ The **chat** endpoint is one of two ways to generate text from an LLM with Ollam
|
||||||
|
|
||||||
## Running the Example
|
## Running the Example
|
||||||
|
|
||||||
1. Ensure you have the `llama3` model installed:
|
1. Ensure you have the `llama3.1` model installed:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
ollama pull llama3
|
ollama pull llama3.1
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the Python Requirements.
|
2. Install the Python Requirements.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import * as readline from "readline";
|
import * as readline from "readline";
|
||||||
|
|
||||||
const model = "llama3";
|
const model = "llama3.1";
|
||||||
type Message = {
|
type Message = {
|
||||||
role: "assistant" | "user" | "system";
|
role: "assistant" | "user" | "system";
|
||||||
content: string;
|
content: string;
|
||||||
|
|
Loading…
Reference in a new issue