Spaces:
Sleeping
Sleeping
Create ingest.py
Browse files
ingest.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.document_loaders import PyPDFLoader, DirectoryLoader, PDFMinerLoader
|
| 2 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 3 |
+
from langchain.embeddings import SentenceTransformerEmbeddings
|
| 4 |
+
from langchain.vectorstores import Chroma
|
| 5 |
+
import os
|
| 6 |
+
from constants import CHROMA_SETTINGS
|
| 7 |
+
|
| 8 |
+
persist_directory = "db"
|
| 9 |
+
|
| 10 |
+
def main():
|
| 11 |
+
for root, dirs, files in os.walk("docs"):
|
| 12 |
+
for file in files:
|
| 13 |
+
if file.endswith(".pdf"):
|
| 14 |
+
print(file)
|
| 15 |
+
loader = PyPDFLoader(os.path.join(root, file))
|
| 16 |
+
documents = loader.load()
|
| 17 |
+
print("splitting into chunks")
|
| 18 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
|
| 19 |
+
texts = text_splitter.split_documents(documents)
|
| 20 |
+
#create embeddings here
|
| 21 |
+
print("Loading sentence transformers model")
|
| 22 |
+
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
|
| 23 |
+
#create vector store here
|
| 24 |
+
print(f"Creating embeddings. May take some minutes...")
|
| 25 |
+
db = Chroma.from_documents(texts, embeddings, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
|
| 26 |
+
db.persist()
|
| 27 |
+
db=None
|
| 28 |
+
|
| 29 |
+
print(f"Ingestion complete! You can now run privateGPT.py to query your documents")
|
| 30 |
+
|
| 31 |
+
if __name__ == "__main__":
|
| 32 |
+
main()
|