upate to llama3.1
elsewhere in repo (#6032)
This commit is contained in:
parent
2c01610616
commit
0e4d653687
6 changed files with 7 additions and 7 deletions
|
@ -138,7 +138,7 @@ SetupAppRunningError=Another Ollama installer is running.%n%nPlease cancel or fi
|
|||
|
||||
|
||||
;FinishedHeadingLabel=Run your first model
|
||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3
|
||||
;FinishedLabel=%nRun this command in a PowerShell or cmd terminal.%n%n%n ollama run llama3.1
|
||||
;ClickFinish=%n
|
||||
|
||||
[Registry]
|
||||
|
|
|
@ -4,5 +4,5 @@ write-host "Welcome to Ollama!"
|
|||
write-host ""
|
||||
write-host "Run your first model:"
|
||||
write-host ""
|
||||
write-host "`tollama run llama3"
|
||||
write-host "`tollama run llama3.1"
|
||||
write-host ""
|
|
@ -63,7 +63,7 @@ docker run -d --device /dev/kfd --device /dev/dri -v ollama:/root/.ollama -p 114
|
|||
Now you can run a model:
|
||||
|
||||
```
|
||||
docker exec -it ollama ollama run llama3
|
||||
docker exec -it ollama ollama run llama3.1
|
||||
```
|
||||
|
||||
### Try different models
|
||||
|
|
|
@ -227,7 +227,7 @@ curl http://localhost:11434/api/chat -d '{"model": "mistral"}'
|
|||
|
||||
To preload a model using the CLI, use the command:
|
||||
```shell
|
||||
ollama run llama3 ""
|
||||
ollama run llama3.1 ""
|
||||
```
|
||||
|
||||
## How do I keep a model loaded in memory or make it unload immediately?
|
||||
|
|
|
@ -15,7 +15,7 @@ import { Ollama } from "@langchain/community/llms/ollama";
|
|||
|
||||
const ollama = new Ollama({
|
||||
baseUrl: "http://localhost:11434",
|
||||
model: "llama3",
|
||||
model: "llama3.1",
|
||||
});
|
||||
|
||||
const answer = await ollama.invoke(`why is the sky blue?`);
|
||||
|
@ -23,7 +23,7 @@ const answer = await ollama.invoke(`why is the sky blue?`);
|
|||
console.log(answer);
|
||||
```
|
||||
|
||||
That will get us the same thing as if we ran `ollama run llama3 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||
That will get us the same thing as if we ran `ollama run llama3.1 "why is the sky blue"` in the terminal. But we want to load a document from the web to ask a question against. **Cheerio** is a great library for ingesting a webpage, and **LangChain** uses it in their **CheerioWebBaseLoader**. So let's install **Cheerio** and build that part of the app.
|
||||
|
||||
```bash
|
||||
npm install cheerio
|
||||
|
|
|
@ -19,7 +19,7 @@ export default function () {
|
|||
const [step, setStep] = useState<Step>(Step.WELCOME)
|
||||
const [commandCopied, setCommandCopied] = useState<boolean>(false)
|
||||
|
||||
const command = 'ollama run llama3'
|
||||
const command = 'ollama run llama3.1'
|
||||
|
||||
return (
|
||||
<div className='drag'>
|
||||
|
|
Loading…
Reference in a new issue