Spaces:
Running
Running
Chandranshu Jain
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,10 +11,9 @@ from langchain.chains.question_answering import load_qa_chain
|
|
| 11 |
from langchain.prompts import PromptTemplate
|
| 12 |
from langchain_community.document_loaders import PyPDFLoader
|
| 13 |
from langchain_chroma import Chroma
|
| 14 |
-
from langchain_community.vectorstores import Chroma
|
| 15 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 16 |
# Load model directly
|
| 17 |
-
from transformers import AutoModelForCausalLM
|
| 18 |
|
| 19 |
|
| 20 |
access_token = os.getenv("HUGGINGFACE_API_KEY")
|
|
@@ -67,7 +66,7 @@ def text_splitter(text):
|
|
| 67 |
|
| 68 |
#GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 69 |
#COHERE_API_KEY = os.getenv("COHERE_API_KEY")
|
| 70 |
-
|
| 71 |
|
| 72 |
def get_conversational_chain(retriever):
|
| 73 |
prompt_template = """
|
|
|
|
| 11 |
from langchain.prompts import PromptTemplate
|
| 12 |
from langchain_community.document_loaders import PyPDFLoader
|
| 13 |
from langchain_chroma import Chroma
|
|
|
|
| 14 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 15 |
# Load model directly
|
| 16 |
+
#from transformers import AutoModelForCausalLM
|
| 17 |
|
| 18 |
|
| 19 |
access_token = os.getenv("HUGGINGFACE_API_KEY")
|
|
|
|
| 66 |
|
| 67 |
#GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
|
| 68 |
#COHERE_API_KEY = os.getenv("COHERE_API_KEY")
|
| 69 |
+
HUGGINGFACE_API_KEY = os.getenv("HUGGINGFACE_API_KEY")
|
| 70 |
|
| 71 |
def get_conversational_chain(retriever):
|
| 72 |
prompt_template = """
|