initial commit

This commit is contained in:
2025-05-01 12:21:47 -05:00
parent 2b9c4289e7
commit 226b51a6a1
18 changed files with 13479 additions and 0 deletions

0
app/__init__.py Normal file
View File

39
app/rag_chain.py Normal file
View File

@@ -0,0 +1,39 @@
from llm.ollama import load_llm
from vectordb.vector_store import retrieve
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
# Define the prompt template for the LLM
prompt = PromptTemplate(
template="""You are an assistant for question-answering tasks.
Use the following context to answer the question.
If you don't know the answer, just say that you don't know.
Use three sentences maximum and keep the answer concise:
Question: {question}
Context: {context}
Answer:
""",
input_variables=["question", "documents"],
)
def get_rag_response(query):
print("⌄⌄⌄⌄ Retrieving ⌄⌄⌄⌄")
retrieved_docs, metadata = retrieve(query, 10)
print("Query Found %d documents." % len(retrieved_docs[0]))
for meta in metadata[0]:
print("Metadata: ", meta)
print("⌃⌃⌃⌃ Retrieving ⌃⌃⌃⌃ " )
print("⌄⌄⌄⌄ Augmented Prompt ⌄⌄⌄⌄")
llm = load_llm()
# Create a chain combining the prompt template and LLM
rag_chain = prompt | llm | StrOutputParser()
context = " ".join(retrieved_docs[0]) if retrieved_docs else "No relevant documents found."
print("⌃⌃⌃⌃ Augmented Prompt ⌃⌃⌃⌃")
print("⌄⌄⌄⌄ Generation ⌄⌄⌄⌄")
response = rag_chain.invoke({"question": query, "context": context});
print(response)
print("⌃⌃⌃⌃ Generation ⌃⌃⌃⌃")
return response

9
app/streamlit_app.py Normal file
View File

@@ -0,0 +1,9 @@
import streamlit as st
from app.rag_chain import get_rag_response
st.title("RAG System")
query = st.text_input("Ask a question:")
if query:
response = get_rag_response(query)
st.write("### Response:")
st.write(response)