shanusherly commited on
Commit
b10bbfb
Β·
verified Β·
1 Parent(s): 75d9e55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -82
app.py CHANGED
@@ -1,35 +1,25 @@
1
  import os
2
- import re
3
- import requests
4
- import json
5
  import gradio as gr
6
-
7
- # Google Gemini imports
8
  import google.generativeai as genai
9
 
10
- # LangChain imports
11
  from langchain_core.prompts import PromptTemplate
12
- from langchain_classic.chains import LLMChain
13
  from langchain_classic.memory import ConversationBufferMemory
14
 
15
- # -----------------------------
16
- # READ API KEYS FROM ENVIRONMENT
17
- # -----------------------------
18
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
19
  ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
20
-
21
- # ElevenLabs Voice ID
22
  ELEVENLABS_VOICE_ID = "21m00Tcm4TlvDq8ikWAM"
23
 
24
  # Configure Gemini
25
  genai.configure(api_key=GEMINI_API_KEY)
26
 
27
- print("βœ… API keys loaded safely from environment!")
28
-
29
- # -----------------------------
30
- # PROMPT + MEMORY
31
- # -----------------------------
32
- template = """You are a helpful assistant to answer user queries.
33
  {chat_history}
34
  User: {user_message}
35
  Chatbot:"""
@@ -41,12 +31,10 @@ prompt = PromptTemplate(
41
 
42
  memory = ConversationBufferMemory(memory_key="chat_history")
43
 
44
- print("βœ… Prompt template created!")
45
-
46
- # -----------------------------
47
- # CUSTOM GEMINI WRAPPER
48
- # -----------------------------
49
- gemini_model = genai.GenerativeModel('gemini-2.5-flash')
50
 
51
  class GeminiLLM:
52
  def __init__(self, model):
@@ -56,17 +44,15 @@ class GeminiLLM:
56
  def predict(self, user_message):
57
  full_prompt = "You are a helpful assistant.\n"
58
  for msg in self.memory_history:
59
- full_prompt += f"{msg}\n"
60
  full_prompt += f"User: {user_message}\nChatbot:"
61
 
62
  response = self.model.generate_content(full_prompt)
63
  answer = response.text
64
 
65
- # Add to memory
66
  self.memory_history.append(f"User: {user_message}")
67
  self.memory_history.append(f"Chatbot: {answer}")
68
 
69
- # Keep last 20 messages
70
  if len(self.memory_history) > 20:
71
  self.memory_history = self.memory_history[-20:]
72
 
@@ -74,76 +60,47 @@ class GeminiLLM:
74
 
75
  llm_chain = GeminiLLM(gemini_model)
76
 
77
- print("βœ… Gemini wrapper initialized!")
78
-
79
- # -----------------------------
80
- # ELEVENLABS AUDIO FUNCTION
81
- # -----------------------------
82
  def generate_audio_elevenlabs(text):
83
  from elevenlabs.client import ElevenLabs
84
  from elevenlabs import save
85
 
86
  try:
87
  client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
88
-
89
  audio = client.generate(
90
  text=text,
91
  voice=ELEVENLABS_VOICE_ID,
92
  model="eleven_monolingual_v1"
93
  )
94
 
95
- output_path = f"/tmp/output_{abs(hash(text)) % 100000}.mp3"
96
  save(audio, output_path)
97
-
98
- return {
99
- "type": "SUCCESS",
100
- "response": output_path
101
- }
102
 
103
  except Exception as e:
104
- return {
105
- "type": "ERROR",
106
- "response": str(e)
107
- }
108
-
109
- print("βœ… ElevenLabs audio function ready!")
110
-
111
- # -----------------------------
112
- # TEXT RESPONSE
113
- # -----------------------------
114
- def get_text_response(user_message):
115
- try:
116
- return llm_chain.predict(user_message=user_message)
117
- except Exception as e:
118
- return f"Error: {str(e)}"
119
-
120
- # -----------------------------
121
- # COMBINED RESPONSE
122
- # -----------------------------
123
- def get_text_response_and_audio_response(user_message):
124
- text_response = get_text_response(user_message)
125
- audio_reply = generate_audio_elevenlabs(text_response)
126
-
127
- return {
128
- "text": text_response,
129
- "audio_path": audio_reply.get("response", "")
130
- }
131
-
132
- # -----------------------------
133
- # MAIN CHATBOT RESPONSE HANDLER
134
- # -----------------------------
135
  def chat_bot_response(message, history):
136
- try:
137
- result = get_text_response_and_audio_response(message)
138
- return result["text"]
139
- except Exception as e:
140
- return f"Error: {str(e)}"
141
 
142
- print("βœ… Chatbot handler ready!")
143
-
144
- # -----------------------------
145
- # UI (UNCHANGED)
146
- # -----------------------------
147
  demo = gr.ChatInterface(
148
  fn=chat_bot_response,
149
  title="πŸ€– Gemini + ElevenLabs Chatbot",
@@ -158,7 +115,5 @@ demo = gr.ChatInterface(
158
  theme=gr.themes.Soft()
159
  )
160
 
161
- print("βœ… Gradio interface created!")
162
-
163
  if __name__ == "__main__":
164
  demo.launch(debug=True, share=True)
 
1
  import os
 
 
 
2
  import gradio as gr
3
+ import requests
 
4
  import google.generativeai as genai
5
 
 
6
  from langchain_core.prompts import PromptTemplate
 
7
  from langchain_classic.memory import ConversationBufferMemory
8
 
9
+ # ------------------------------------
10
+ # Load API keys from environment
11
+ # ------------------------------------
12
  GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
13
  ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
 
 
14
  ELEVENLABS_VOICE_ID = "21m00Tcm4TlvDq8ikWAM"
15
 
16
  # Configure Gemini
17
  genai.configure(api_key=GEMINI_API_KEY)
18
 
19
+ # ------------------------------------
20
+ # Prompt + Memory
21
+ # ------------------------------------
22
+ template = """You are a helpful assistant.
 
 
23
  {chat_history}
24
  User: {user_message}
25
  Chatbot:"""
 
31
 
32
  memory = ConversationBufferMemory(memory_key="chat_history")
33
 
34
+ # ------------------------------------
35
+ # Gemini Wrapper
36
+ # ------------------------------------
37
+ gemini_model = genai.GenerativeModel("gemini-2.5-flash")
 
 
38
 
39
  class GeminiLLM:
40
  def __init__(self, model):
 
44
  def predict(self, user_message):
45
  full_prompt = "You are a helpful assistant.\n"
46
  for msg in self.memory_history:
47
+ full_prompt += msg + "\n"
48
  full_prompt += f"User: {user_message}\nChatbot:"
49
 
50
  response = self.model.generate_content(full_prompt)
51
  answer = response.text
52
 
 
53
  self.memory_history.append(f"User: {user_message}")
54
  self.memory_history.append(f"Chatbot: {answer}")
55
 
 
56
  if len(self.memory_history) > 20:
57
  self.memory_history = self.memory_history[-20:]
58
 
 
60
 
61
  llm_chain = GeminiLLM(gemini_model)
62
 
63
+ # ------------------------------------
64
+ # ElevenLabs Audio (Hugging Face friendly)
65
+ # ------------------------------------
 
 
66
  def generate_audio_elevenlabs(text):
67
  from elevenlabs.client import ElevenLabs
68
  from elevenlabs import save
69
 
70
  try:
71
  client = ElevenLabs(api_key=ELEVENLABS_API_KEY)
 
72
  audio = client.generate(
73
  text=text,
74
  voice=ELEVENLABS_VOICE_ID,
75
  model="eleven_monolingual_v1"
76
  )
77
 
78
+ output_path = f"/tmp/audio_{abs(hash(text)) % 100000}.mp3"
79
  save(audio, output_path)
80
+ return output_path
 
 
 
 
81
 
82
  except Exception as e:
83
+ print("Audio error:", e)
84
+ return ""
85
+
86
+ # ------------------------------------
87
+ # Combined response
88
+ # ------------------------------------
89
+ def get_response_and_audio(message):
90
+ text = llm_chain.predict(message)
91
+ audio_path = generate_audio_elevenlabs(text)
92
+ return text, audio_path
93
+
94
+ # ------------------------------------
95
+ # Gradio ChatHandler (UI unchanged)
96
+ # ------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  def chat_bot_response(message, history):
98
+ text, audio_path = get_response_and_audio(message)
99
+ return text
 
 
 
100
 
101
+ # ------------------------------------
102
+ # UI (same as your original)
103
+ # ------------------------------------
 
 
104
  demo = gr.ChatInterface(
105
  fn=chat_bot_response,
106
  title="πŸ€– Gemini + ElevenLabs Chatbot",
 
115
  theme=gr.themes.Soft()
116
  )
117
 
 
 
118
  if __name__ == "__main__":
119
  demo.launch(debug=True, share=True)