Arif commited on
Commit
24b4795
Β·
1 Parent(s): 0d96540

Updated app.py to version 7

Browse files
Files changed (1) hide show
  1. app.py +59 -40
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  import pandas as pd
3
- from huggingface_hub import InferenceClient
4
 
5
  # Page configuration
6
  st.set_page_config(
@@ -11,23 +11,50 @@ st.set_page_config(
11
  )
12
 
13
  st.title("πŸ“Š LLM Data Analyzer")
14
- st.write("*Analyze data and chat with AI powered by Hugging Face Inference API*")
15
 
16
- # Initialize HF Inference Client
17
- @st.cache_resource
18
- def get_hf_client():
19
- """Get Hugging Face Inference Client"""
20
- try:
21
- return InferenceClient()
22
- except Exception as e:
23
- st.error(f"Error initializing HF client: {e}")
24
- return None
25
 
26
- client = get_hf_client()
 
27
 
28
- if client is None:
29
- st.error("Failed to initialize Hugging Face client")
30
- st.stop()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  # Create tabs
33
  tab1, tab2, tab3 = st.tabs(["πŸ“€ Upload & Analyze", "πŸ’¬ Chat", "πŸ“Š About"])
@@ -81,9 +108,8 @@ with tab1:
81
 
82
  if question:
83
  with st.spinner("πŸ€” AI is analyzing your data..."):
84
- try:
85
- data_summary = df.describe().to_string()
86
- prompt = f"""You are a data analyst expert. You have the following data summary:
87
 
88
  {data_summary}
89
 
@@ -92,18 +118,14 @@ Column names: {', '.join(df.columns.tolist())}
92
  User's question: {question}
93
 
94
  Please provide a clear, concise analysis based on the data summary."""
95
-
96
- # Use Hugging Face Inference API
97
- response = client.text_generation(
98
- prompt,
99
- max_new_tokens=300,
100
- temperature=0.7,
101
- )
102
-
103
  st.success("βœ… Analysis Complete")
104
  st.write(response)
105
- except Exception as e:
106
- st.error(f"Error analyzing data: {e}")
107
 
108
  except Exception as e:
109
  st.error(f"Error reading file: {e}")
@@ -124,7 +146,7 @@ with tab2:
124
  with st.chat_message(message["role"]):
125
  st.markdown(message["content"])
126
 
127
- # Chat input - MUST be outside tabs, so we use text_input instead
128
  user_input = st.text_input(
129
  "Type your message:",
130
  placeholder="Ask me anything...",
@@ -137,15 +159,12 @@ with tab2:
137
 
138
  # Generate AI response
139
  with st.spinner("⏳ Generating response..."):
140
- try:
141
- prompt = f"User: {user_input}\n\nAssistant:"
142
-
143
- response = client.text_generation(
144
- prompt,
145
- max_new_tokens=300,
146
- temperature=0.7,
147
- )
148
-
149
  assistant_message = response.strip()
150
 
151
  # Add assistant message to history
@@ -156,8 +175,6 @@ with tab2:
156
 
157
  # Rerun to display the new messages
158
  st.rerun()
159
- except Exception as e:
160
- st.error(f"Error generating response: {e}")
161
 
162
  # ============================================================================
163
  # TAB 3: About
@@ -174,6 +191,7 @@ with tab3:
174
 
175
  - **Framework:** Streamlit
176
  - **AI Engine:** Hugging Face Inference API
 
177
  - **Hosting:** Hugging Face Spaces (Free Tier)
178
  - **Language:** Python
179
 
@@ -194,6 +212,7 @@ with tab3:
194
 
195
  - [Hugging Face](https://huggingface.co/) - AI models and hosting
196
  - [Streamlit](https://streamlit.io/) - Web framework
 
197
 
198
  ### πŸ“– Quick Tips
199
 
 
1
  import streamlit as st
2
  import pandas as pd
3
+ import requests
4
 
5
  # Page configuration
6
  st.set_page_config(
 
11
  )
12
 
13
  st.title("πŸ“Š LLM Data Analyzer")
14
+ st.write("*Analyze data and chat with AI powered by Hugging Face*")
15
 
16
+ # Get HF token from environment
17
+ import os
18
+ HF_TOKEN = os.getenv("HF_TOKEN")
 
 
 
 
 
 
19
 
20
+ if not HF_TOKEN:
21
+ st.warning("⚠️ HF_TOKEN environment variable not set. Features may be limited.")
22
 
23
+ # Function to call HF API with new endpoint
24
+ def call_hf_api(prompt):
25
+ """Call Hugging Face Inference API using new router endpoint"""
26
+ try:
27
+ headers = {
28
+ "Authorization": f"Bearer {HF_TOKEN}" if HF_TOKEN else "",
29
+ "Content-Type": "application/json"
30
+ }
31
+
32
+ payload = {
33
+ "inputs": prompt,
34
+ "parameters": {
35
+ "max_new_tokens": 300,
36
+ "temperature": 0.7,
37
+ }
38
+ }
39
+
40
+ # Use new HF router endpoint
41
+ response = requests.post(
42
+ "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1",
43
+ headers=headers,
44
+ json=payload,
45
+ timeout=30
46
+ )
47
+
48
+ if response.status_code == 200:
49
+ result = response.json()
50
+ if isinstance(result, list) and len(result) > 0:
51
+ return result[0].get("generated_text", "")
52
+ return str(result)
53
+ else:
54
+ return f"Error: {response.status_code} - {response.text}"
55
+
56
+ except Exception as e:
57
+ return f"Error: {str(e)}"
58
 
59
  # Create tabs
60
  tab1, tab2, tab3 = st.tabs(["πŸ“€ Upload & Analyze", "πŸ’¬ Chat", "πŸ“Š About"])
 
108
 
109
  if question:
110
  with st.spinner("πŸ€” AI is analyzing your data..."):
111
+ data_summary = df.describe().to_string()
112
+ prompt = f"""You are a data analyst expert. You have the following data summary:
 
113
 
114
  {data_summary}
115
 
 
118
  User's question: {question}
119
 
120
  Please provide a clear, concise analysis based on the data summary."""
121
+
122
+ response = call_hf_api(prompt)
123
+
124
+ if response.startswith("Error"):
125
+ st.error(response)
126
+ else:
 
 
127
  st.success("βœ… Analysis Complete")
128
  st.write(response)
 
 
129
 
130
  except Exception as e:
131
  st.error(f"Error reading file: {e}")
 
146
  with st.chat_message(message["role"]):
147
  st.markdown(message["content"])
148
 
149
+ # Chat input
150
  user_input = st.text_input(
151
  "Type your message:",
152
  placeholder="Ask me anything...",
 
159
 
160
  # Generate AI response
161
  with st.spinner("⏳ Generating response..."):
162
+ prompt = f"User: {user_input}\n\nAssistant:"
163
+ response = call_hf_api(prompt)
164
+
165
+ if response.startswith("Error"):
166
+ st.error(response)
167
+ else:
 
 
 
168
  assistant_message = response.strip()
169
 
170
  # Add assistant message to history
 
175
 
176
  # Rerun to display the new messages
177
  st.rerun()
 
 
178
 
179
  # ============================================================================
180
  # TAB 3: About
 
191
 
192
  - **Framework:** Streamlit
193
  - **AI Engine:** Hugging Face Inference API
194
+ - **Model:** Mistral 7B Instruct
195
  - **Hosting:** Hugging Face Spaces (Free Tier)
196
  - **Language:** Python
197
 
 
212
 
213
  - [Hugging Face](https://huggingface.co/) - AI models and hosting
214
  - [Streamlit](https://streamlit.io/) - Web framework
215
+ - [Mistral AI](https://mistral.ai/) - 7B Language Model
216
 
217
  ### πŸ“– Quick Tips
218