add demo video
This commit is contained in:
parent
e863066144
commit
155c1640f1
2 changed files with 10 additions and 6 deletions
|
@ -1,6 +1,8 @@
|
||||||
# privateGPT with Llama 2 Uncensored
|
# PrivateGPT with Llama 2 uncensored
|
||||||
|
|
||||||
> Note: this example is a simplified version of [PrivateGPT](https://github.com/imartinez/privateGPT) that works with Llama 2 Uncensored.
|
https://github.com/jmorganca/ollama/assets/3325447/20cf8ec6-ff25-42c6-bdd8-9be594e3ce1b
|
||||||
|
|
||||||
|
> Note: this example is a simplified version of [PrivateGPT](https://github.com/imartinez/privateGPT) that works with Llama 2 Uncensored. All credit for PrivateGPT goes to Iván Martínez who is the creator of it.
|
||||||
|
|
||||||
### Setup
|
### Setup
|
||||||
|
|
||||||
|
@ -23,7 +25,7 @@ Pull the model you'd like to use:
|
||||||
ollama pull llama2-uncensored
|
ollama pull llama2-uncensored
|
||||||
```
|
```
|
||||||
|
|
||||||
### Getting WeWork's latest quarterly report
|
### Getting WeWork's latest quarterly earnings report (10-Q)
|
||||||
|
|
||||||
```
|
```
|
||||||
mkdir source_documents
|
mkdir source_documents
|
||||||
|
|
|
@ -3,12 +3,15 @@ from langchain.chains import RetrievalQA
|
||||||
from langchain.embeddings import HuggingFaceEmbeddings
|
from langchain.embeddings import HuggingFaceEmbeddings
|
||||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||||
from langchain.vectorstores import Chroma
|
from langchain.vectorstores import Chroma
|
||||||
from langchain.llms import GPT4All, Ollama
|
from langchain.llms import Ollama
|
||||||
import os
|
import os
|
||||||
import argparse
|
import argparse
|
||||||
import time
|
import time
|
||||||
|
|
||||||
model = os.environ.get("MODEL", "llama2-uncensored")
|
model = os.environ.get("MODEL", "llama2-uncensored")
|
||||||
|
# For embeddings model, the example uses a sentence-transformers model
|
||||||
|
# https://www.sbert.net/docs/pretrained_models.html
|
||||||
|
# "The all-mpnet-base-v2 model provides the best quality, while all-MiniLM-L6-v2 is 5 times faster and still offers good quality."
|
||||||
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2")
|
embeddings_model_name = os.environ.get("EMBEDDINGS_MODEL_NAME", "all-MiniLM-L6-v2")
|
||||||
persist_directory = os.environ.get("PERSIST_DIRECTORY", "db")
|
persist_directory = os.environ.get("PERSIST_DIRECTORY", "db")
|
||||||
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
|
target_source_chunks = int(os.environ.get('TARGET_SOURCE_CHUNKS',4))
|
||||||
|
@ -44,7 +47,6 @@ def main():
|
||||||
# Print the result
|
# Print the result
|
||||||
print("\n\n> Question:")
|
print("\n\n> Question:")
|
||||||
print(query)
|
print(query)
|
||||||
print(f"\n> Answer (took {round(end - start, 2)} s.):")
|
|
||||||
print(answer)
|
print(answer)
|
||||||
|
|
||||||
# Print the relevant sources used for the answer
|
# Print the relevant sources used for the answer
|
||||||
|
|
Loading…
Reference in a new issue