Document loading is discussed in detail in this section
#Create Embeddings of your documents to get ready for semantic search
from langchain.vectorstores import Chroma, Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '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')
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY', '0b0244fc-fee8-43c0-8f6a-ec6bad3b2dcc')
PINECONE_API_ENV = os.environ.get('PINECONE_API_ENV', 'northamerica-northeast1-gcp')
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
Document loading is discussed in detail in this section
query = "What are examples of good data science teams ?"
docs = docsearch.similarity_search(query)
# Here's an example of the first document that was returned
print(docs[0].page_content[:450])
#Query those docs to get your answer back
from langchain.llms import OpenAI
from langchain.chains.question_answering import load_qa_chain
llm = OpenAI(temperature=0, openai_api_key=OPENAI_API_KEY)
chain = load_qa_chain(llm, chain_type="stuff")
query= "what is the collect stage of data maturity ?"
docs = docsearch.similarity_search(query)
answer = chain.run(input_documents=docs, question=query)
print(f"Question:{query}")
print(f"Answer:{answer}")