-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
133 lines (112 loc) · 4.71 KB
/
app.py
File metadata and controls
133 lines (112 loc) · 4.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import streamlit as st
import os
import asyncio
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain_chroma import Chroma
from langchain_community.embeddings.ollama import OllamaEmbeddings
from langchain_community.llms import Ollama
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_ollama import OllamaLLM
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import RetrievalQA
# Ensure necessary directories
os.makedirs('pdfFiles', exist_ok=True)
os.makedirs('vectorDB', exist_ok=True)
# Initialize session state variables
if 'chat_history' not in st.session_state:
st.session_state.chat_history = []
if 'template' not in st.session_state:
st.session_state.template = """You are a programming assistant that answers questions strictly based on the content provided from textbook PDF.
If you the related topics is in the textbook then use the content and give the answer using your knowledge.Otherwise If the information requested is not found in the textbook, respond with: "I'm sorry, I can only provide answers based on the textbook."
Context (from textbook only): {context}
User History: {history}
User: {question}
Assistant (based on textbook content only):"""
if 'prompt' not in st.session_state:
st.session_state.prompt = PromptTemplate(
input_variables=["history", "context", "question"],
template=st.session_state.template,
)
if 'memory' not in st.session_state:
st.session_state.memory = ConversationBufferMemory(
memory_key="history",
return_messages=True,
input_key="question",
)
# Define the path to the PDF file
fixed_pdf_path = 'book.pdf'
# Initialize vectorstore only once with cached documents
if 'vectorstore' not in st.session_state:
if os.path.exists('vectorDB'):
st.session_state.vectorstore = Chroma(
persist_directory='vectorDB',
embedding_function=OllamaEmbeddings(model="llama3.1")
)
else:
st.error("Please run the PDF ingestion script to populate the vectorDB directory.")
# Set up the retriever
if 'vectorstore' in st.session_state:
st.session_state.retriever = st.session_state.vectorstore.as_retriever(
search_type="mmr",
search_kwargs={"k": 5},
)
# Initialize the LLM model
if 'llm' not in st.session_state:
st.session_state.llm = OllamaLLM(
base_url="http://localhost:11434",
model="qwen2.5-coder:7b",
verbose=True,
callbacks=[StreamingStdOutCallbackHandler()],
)
if 'qa_chain' not in st.session_state:
st.session_state.qa_chain = RetrievalQA.from_chain_type(
llm=st.session_state.llm,
chain_type='stuff',
retriever=st.session_state.retriever,
verbose=True,
chain_type_kwargs={
"verbose": True,
"prompt": st.session_state.prompt,
"memory": st.session_state.memory,
}
)
# Set up Streamlit interface
st.set_page_config(page_title="CodeBuddy", layout="centered", page_icon="🔧")
st.markdown(
"""
<style>
body {background-color: #1a1a1a; color: white;}
.st-chat-message {border-radius: 10px; padding: 10px; margin: 5px 0;}
.st-chat-message-user {background-color: #4a4a4a; color: white;}
.st-chat-message-assistant {background-color: #2a2a2a; color: white;}
</style>
""",
unsafe_allow_html=True,
)
st.title("🔧 CodeBuddy: Your Programming Chatbot")
# Model selection
# model_options = ["llama3.1"]
model_options = ["qwen2.5-coder:7b","llama3.1", "llava:latest", "codellama:7b", "deepseek-r1:8b"]
selected_model = st.radio("Choose your AI Model:", model_options, index=0, horizontal=True)
st.write(f"### Selected Model: {selected_model}")
# Display last 5 chat messages
for message in st.session_state.chat_history[-5:]:
with st.chat_message(message["role"]):
st.markdown(message["message"])
# Async function to handle user input and chatbot response
async def get_response(user_input):
user_message = {"role": "user", "message": user_input}
st.session_state.chat_history.append(user_message)
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
with st.spinner("CodeBuddy is thinking..."):
# Ensure the input key matches the expected 'query'
response = st.session_state.qa_chain.invoke({"query": user_input})
st.markdown(response["result"])
chatbot_message = {"role": "assistant", "message": response["result"]}
st.session_state.chat_history.append(chatbot_message)
# Process user input
if user_input := st.chat_input("Ask a question about the textbook:"):
asyncio.run(get_response(user_input))