ak0601 commited on
Commit
b689891
·
verified ·
1 Parent(s): 595b5e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -79
app.py CHANGED
@@ -1,91 +1,202 @@
1
- from fastapi import FastAPI, Request, Form, UploadFile, File
2
- from fastapi.templating import Jinja2Templates
3
- from fastapi.responses import HTMLResponse, RedirectResponse
4
- from fastapi.staticfiles import StaticFiles
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  from dotenv import load_dotenv
6
- import os, io
7
  from PIL import Image
8
- import markdown
9
  import google.generativeai as genai
 
 
10
 
11
- # Load environment variable
12
  load_dotenv()
13
  API_KEY = os.getenv("GOOGLE_API_KEY")
14
- genai.configure(api_key=API_KEY)
15
-
16
- app = FastAPI()
17
- templates = Jinja2Templates(directory="templates")
18
- app.mount("/static", StaticFiles(directory="static"), name="static")
19
-
20
- model = genai.GenerativeModel('gemini-2.0-flash')
21
-
22
- # Create a global chat session
23
- chat = None
24
- chat_history = []
25
-
26
- @app.get("/", response_class=HTMLResponse)
27
- async def root(request: Request):
28
- return templates.TemplateResponse("index.html", {
29
- "request": request,
30
- "chat_history": chat_history,
31
- })
32
-
33
- @app.post("/", response_class=HTMLResponse)
34
- async def handle_input(
35
- request: Request,
36
- user_input: str = Form(...),
37
- image: UploadFile = File(None)
38
- ):
39
- global chat, chat_history
40
-
41
- # Initialize chat session if needed
42
- if chat is None:
43
- chat = model.start_chat(history=[])
44
-
45
- parts = []
46
- if user_input:
47
- parts.append(user_input)
48
 
49
- # For display in the UI
50
- user_message = user_input
51
-
52
- if image and image.content_type.startswith("image/"):
53
- data = await image.read()
54
- try:
55
- img = Image.open(io.BytesIO(data))
56
- parts.append(img)
57
- user_message += " [Image uploaded]" # Indicate image in chat history
58
- except Exception as e:
59
- chat_history.append({
60
- "role": "model",
61
- "content": markdown.markdown(f"**Error loading image:** {e}")
62
- })
63
- return RedirectResponse("/", status_code=303)
64
-
65
- # Store user message for display
66
- chat_history.append({"role": "user", "content": user_message})
67
 
68
  try:
69
- # Send message to Gemini model
70
  resp = chat.send_message(parts)
71
- # Add model response to history
72
- raw = resp.text
73
- chat_history.append({"role": "model", "content": raw})
74
 
75
  except Exception as e:
76
- err = f"**Error:** {e}"
77
- chat_history.append({
78
- "role": "model",
79
- "content": markdown.markdown(err)
80
- })
81
-
82
- # Post-Redirect-Get
83
- return RedirectResponse("/", status_code=303)
84
-
85
- # Clear chat history and start fresh
86
- @app.post("/new")
87
- async def new_chat():
88
- global chat, chat_history
89
- chat = None
90
- chat_history.clear()
91
- return RedirectResponse("/", status_code=303)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from fastapi import FastAPI, Request, Form, UploadFile, File
2
+ # from fastapi.templating import Jinja2Templates
3
+ # from fastapi.responses import HTMLResponse, RedirectResponse
4
+ # from fastapi.staticfiles import StaticFiles
5
+ # from dotenv import load_dotenv
6
+ # import os, io
7
+ # from PIL import Image
8
+ # import markdown
9
+ # import google.generativeai as genai
10
+
11
+ # # Load environment variable
12
+ # load_dotenv()
13
+ # API_KEY = os.getenv("GOOGLE_API_KEY")
14
+ # genai.configure(api_key=API_KEY)
15
+
16
+ # app = FastAPI()
17
+ # templates = Jinja2Templates(directory="templates")
18
+ # app.mount("/static", StaticFiles(directory="static"), name="static")
19
+
20
+ # model = genai.GenerativeModel('gemini-2.0-flash')
21
+
22
+ # # Create a global chat session
23
+ # chat = None
24
+ # chat_history = []
25
+
26
+ # @app.get("/", response_class=HTMLResponse)
27
+ # async def root(request: Request):
28
+ # return templates.TemplateResponse("index.html", {
29
+ # "request": request,
30
+ # "chat_history": chat_history,
31
+ # })
32
+
33
+ # @app.post("/", response_class=HTMLResponse)
34
+ # async def handle_input(
35
+ # request: Request,
36
+ # user_input: str = Form(...),
37
+ # image: UploadFile = File(None)
38
+ # ):
39
+ # global chat, chat_history
40
+
41
+ # # Initialize chat session if needed
42
+ # if chat is None:
43
+ # chat = model.start_chat(history=[])
44
+
45
+ # parts = []
46
+ # if user_input:
47
+ # parts.append(user_input)
48
+
49
+ # # For display in the UI
50
+ # user_message = user_input
51
+
52
+ # if image and image.content_type.startswith("image/"):
53
+ # data = await image.read()
54
+ # try:
55
+ # img = Image.open(io.BytesIO(data))
56
+ # parts.append(img)
57
+ # user_message += " [Image uploaded]" # Indicate image in chat history
58
+ # except Exception as e:
59
+ # chat_history.append({
60
+ # "role": "model",
61
+ # "content": markdown.markdown(f"**Error loading image:** {e}")
62
+ # })
63
+ # return RedirectResponse("/", status_code=303)
64
+
65
+ # # Store user message for display
66
+ # chat_history.append({"role": "user", "content": user_message})
67
+
68
+ # try:
69
+ # # Send message to Gemini model
70
+ # resp = chat.send_message(parts)
71
+ # # Add model response to history
72
+ # raw = resp.text
73
+ # chat_history.append({"role": "model", "content": raw})
74
+
75
+ # except Exception as e:
76
+ # err = f"**Error:** {e}"
77
+ # chat_history.append({
78
+ # "role": "model",
79
+ # "content": markdown.markdown(err)
80
+ # })
81
+
82
+ # # Post-Redirect-Get
83
+ # return RedirectResponse("/", status_code=303)
84
+
85
+ # # Clear chat history and start fresh
86
+ # @app.post("/new")
87
+ # async def new_chat():
88
+ # global chat, chat_history
89
+ # chat = None
90
+ # chat_history.clear()
91
+ # return RedirectResponse("/", status_code=303)
92
+
93
+
94
+ import os
95
+ import io
96
+ import streamlit as st
97
  from dotenv import load_dotenv
 
98
  from PIL import Image
 
99
  import google.generativeai as genai
100
+ from langgraph.graph import StateGraph,END
101
+ from typing import TypingDict, List,Union
102
 
 
103
  load_dotenv()
104
  API_KEY = os.getenv("GOOGLE_API_KEY")
105
+ genai.config(api_key = API_KEY)
106
+
107
+ model = genai.GenerativeModel("gemini-2.0-flash")
108
+
109
+ class ChatState(TypedDict):
110
+ user_input: str
111
+ image: Union[Image.Image,None]
112
+ raw_response:str
113
+ final_response:str
114
+ chat_history:List[dict]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
+
117
+ def input_node(state: ChatState)->ChatState:
118
+ return state
119
+
120
+ def processing_node(state:ChatState) -> ChatState:
121
+ parts = [state["user_input"]]
122
+ if state["image"]:
123
+ parts.append(state["image"])
 
 
 
 
 
 
 
 
 
 
124
 
125
  try:
126
+ chat = model.start_chat(history = [])
127
  resp = chat.send_message(parts)
 
 
 
128
 
129
  except Exception as e:
130
+ state["raw_response"] = f"Error: {e}"
131
+
132
+ return state
133
+
134
+
135
+ def checking_node(state: ChatState) -> ChatState:
136
+ raw = state["raw_response"]
137
+
138
+ if "Sure!" in raw or "The image shows" in raw or raw.startswith("I can see"):
139
+ lines = raw.split("\n")
140
+ filtered_lines = [line for line in lines if not line.startswith("Sure!") and "The image shows" not in line]
141
+ state["final_response"] = "\n".join(filtered_lines).strip()
142
+ else:
143
+ state["final_response"] = raw
144
+
145
+ st.session_state.chat_history.append({"role":"user","content":state["user_input"]})
146
+ st.session_state.chat_history.append({"role":"model","content":state["final_response"]})
147
+
148
+ return state
149
+
150
+ builder = StateGraph(ChatState)
151
+ builder.add_node("input",input_node)
152
+ builder.add_node("processing",processing_node)
153
+ builder.add_node("checking",checking_node)
154
+
155
+ builder.set_entry_point("input")
156
+ builder.add_edge("input","processing")
157
+ builder.add_edge("processing","checking")
158
+ builder.add_edge("checking",END)
159
+
160
+
161
+ graph = builder.compile()
162
+
163
+ st.set_page_config(page_title="Math Chatbot",layout="centered")
164
+ st.chatbot("Math Chatbot")
165
+
166
+ for msg in st.session_session.chat_history:
167
+ with st.chat_message(msg["role"]):
168
+ st.markdown(msg["content"])
169
+
170
+ with st.sidebar:
171
+ st.header("Options")
172
+ if st.button("New Chat"):
173
+ st.session_state.chat_historyb = []
174
+ st.rerun()
175
+
176
+
177
+ with st.form("chat_form",clear_on_submit=True):
178
+ user_input = st.text_input("Your message:", placeholder="Ask your math problem here")
179
+ uploaded_file = st.file_uploader("Upload an image",type = ["jpg","png","jpeg"])
180
+ submitted = st.form_submit_button("Send")
181
+
182
+ if submitted:
183
+ image = None
184
+ if uploaded_file:
185
+ try:
186
+ image = Image.open(io.BytesIO(uploaded_file.read()))
187
+ except Exception as e:
188
+ st.error(f"Error loading image: {e}")
189
+ st.stop()
190
+
191
+ input_state = {
192
+ "user_input":user_input,
193
+ "image": image,
194
+ "raw_response":"",
195
+ "final_response":"",
196
+ "chat_history":st.session_state.chat_history
197
+ }
198
+
199
+ output = graph.invoke(input_state)
200
+
201
+ with st.chat_message("model"):
202
+ st.markdown(output["final_response"])