Adding AzureSearch AI as vector store

This commit is contained in:
2025-05-16 22:01:05 -05:00
parent 226b51a6a1
commit 3beb160c22
18 changed files with 2751 additions and 96 deletions

View File

@@ -1,5 +1,5 @@
from llm.ollama import load_llm
from vectordb.vector_store import retrieve
from vectordb.azure_search import retrieve
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
@@ -16,24 +16,29 @@ prompt = PromptTemplate(
input_variables=["question", "documents"],
)
def get_rag_response(query):
print("⌄⌄⌄⌄ Retrieving ⌄⌄⌄⌄")
retrieved_docs, metadata = retrieve(query, 10)
print("Query Found %d documents." % len(retrieved_docs[0]))
for meta in metadata[0]:
print("Metadata: ", meta)
print("⌃⌃⌃⌃ Retrieving ⌃⌃⌃⌃ " )
print("⌄⌄⌄⌄ Augmented Prompt ⌄⌄⌄⌄")
llm = load_llm()
# Create a chain combining the prompt template and LLM
rag_chain = prompt | llm | StrOutputParser()
context = " ".join(retrieved_docs[0]) if retrieved_docs else "No relevant documents found."
print("⌃⌃⌃⌃ Augmented Prompt ⌃⌃⌃⌃")
print("⌄⌄⌄⌄ Generation ⌄⌄⌄⌄")
response = rag_chain.invoke({"question": query, "context": context});
print(response)
print("⌃⌃⌃⌃ Generation ⌃⌃⌃⌃")
return response
def get_rag_response(query):
print("⌄⌄⌄⌄ Retrieving ⌄⌄⌄⌄")
retrieved_docs = retrieve(query, 10)
print("Query Found %d documents." % len(retrieved_docs))
print("⌃⌃⌃⌃ Retrieving ⌃⌃⌃⌃ ")
print("⌄⌄⌄⌄ Augmented Prompt ⌄⌄⌄⌄")
llm = load_llm()
# Create a chain combining the prompt template and LLM
rag_chain = prompt | llm | StrOutputParser()
context = (
(" ".join(doc.page_content) for doc in retrieved_docs)
if retrieved_docs
else "No relevant documents found."
)
print("⌃⌃⌃⌃ Augmented Prompt ⌃⌃⌃⌃")
print("⌄⌄⌄⌄ Generation ⌄⌄⌄⌄")
response = rag_chain.invoke({"question": query, "context": context})
print(response)
print("⌃⌃⌃⌃ Generation ⌃⌃⌃⌃")
return response

View File

@@ -4,6 +4,6 @@ from app.rag_chain import get_rag_response
st.title("RAG System")
query = st.text_input("Ask a question:")
if query:
response = get_rag_response(query)
st.write("### Response:")
st.write(response)
response = get_rag_response(query)
st.write("### Response:")
st.write(response)