flatten examples
This commit is contained in:
parent
38f0c54c64
commit
2e339c2bab
5 changed files with 0 additions and 47 deletions
|
@ -1,15 +0,0 @@
|
||||||
# Python
|
|
||||||
|
|
||||||
This is a simple example of calling the Ollama api from a python app.
|
|
||||||
|
|
||||||
First, download a model:
|
|
||||||
|
|
||||||
```
|
|
||||||
curl -L https://huggingface.co/TheBloke/orca_mini_3B-GGML/resolve/main/orca-mini-3b.ggmlv3.q4_1.bin -o orca.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
Then run it using the example script. You'll need to have Ollama running on your machine.
|
|
||||||
|
|
||||||
```
|
|
||||||
python3 main.py orca.bin
|
|
||||||
```
|
|
|
@ -1,32 +0,0 @@
|
||||||
import http.client
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
if len(sys.argv) < 2:
|
|
||||||
print("Usage: python main.py <model file>")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
conn = http.client.HTTPConnection('localhost', 11434)
|
|
||||||
|
|
||||||
headers = { 'Content-Type': 'application/json' }
|
|
||||||
|
|
||||||
# generate text from the model
|
|
||||||
conn.request("POST", "/api/generate", json.dumps({
|
|
||||||
'model': os.path.join(os.getcwd(), sys.argv[1]),
|
|
||||||
'prompt': 'write me a short story',
|
|
||||||
'stream': True
|
|
||||||
}), headers)
|
|
||||||
|
|
||||||
response = conn.getresponse()
|
|
||||||
|
|
||||||
def parse_generate(data):
|
|
||||||
for event in data.decode('utf-8').split("\n"):
|
|
||||||
if not event:
|
|
||||||
continue
|
|
||||||
yield event
|
|
||||||
|
|
||||||
if response.status == 200:
|
|
||||||
for chunk in response:
|
|
||||||
for event in parse_generate(chunk):
|
|
||||||
print(json.loads(event)['response'], end="", flush=True)
|
|
Loading…
Reference in a new issue