manesh1 commited on
Commit
c719a09
Β·
verified Β·
1 Parent(s): aae5823

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +71 -58
src/streamlit_app.py CHANGED
@@ -1,15 +1,6 @@
1
  import streamlit as st
2
- import subprocess
3
- import sys
4
- import os
5
-
6
- # Install transformers if not available (should be pre-installed on Spaces)
7
- try:
8
- from transformers import pipeline
9
- except ImportError:
10
- st.warning("Installing transformers... Please wait.")
11
- subprocess.check_call([sys.executable, "-m", "pip", "install", "transformers"])
12
- from transformers import pipeline
13
 
14
  # Set page config
15
  st.set_page_config(
@@ -19,31 +10,15 @@ st.set_page_config(
19
  )
20
 
21
  st.title("πŸ“ T5-small LoRA Text Summarization")
22
- st.markdown("Summarize text using the fine-tuned T5 model with LoRA adapters")
23
-
24
- @st.cache_resource(show_spinner="Loading summarization model...")
25
- def load_summarizer():
26
- """Load the T5-small LoRA summarization model"""
27
- try:
28
- summarizer = pipeline(
29
- "summarization",
30
- model="manesh1/t5-small-lora-summarization"
31
- )
32
- return summarizer
33
- except Exception as e:
34
- st.error(f"Error loading model: {str(e)}")
35
- return None
36
 
37
  def main():
38
- # Load model
39
- with st.spinner("πŸ”„ Loading model... This may take 1-2 minutes for the first time."):
40
- summarizer = load_summarizer()
41
 
42
- if not summarizer:
43
- st.error("❌ Failed to load model. Please refresh and try again.")
44
- return
45
-
46
- st.success("βœ… Model loaded successfully!")
47
 
48
  # Input section
49
  st.subheader("πŸ“„ Input Text")
@@ -51,7 +26,7 @@ def main():
51
  "Enter text to summarize:",
52
  height=200,
53
  placeholder="Paste your text here...",
54
- label_visibility="collapsed"
55
  )
56
 
57
  # Settings
@@ -61,38 +36,76 @@ def main():
61
  with col2:
62
  min_length = st.slider("Minimum summary length", 10, 100, 30)
63
 
64
- # Generate button
65
- if st.button("πŸš€ Generate Summary", type="primary", use_container_width=True):
 
 
 
 
 
 
 
 
 
 
 
 
66
  if not input_text.strip():
67
  st.warning("⚠️ Please enter some text to summarize.")
68
  else:
69
- with st.spinner("⏳ Generating summary..."):
 
 
70
  try:
71
- result = summarizer(
72
- input_text,
73
- max_length=max_length,
74
- min_length=min_length,
75
- do_sample=False
76
- )
77
-
78
- summary = result[0]['summary_text']
79
 
80
- # Display results
81
- st.subheader("πŸ“‹ Summary")
82
- st.success(summary)
 
 
 
 
 
 
 
 
 
 
83
 
84
- # Statistics
85
- col1, col2, col3 = st.columns(3)
86
- with col1:
87
- st.metric("Input Words", len(input_text.split()))
88
- with col2:
89
- st.metric("Summary Words", len(summary.split()))
90
- with col3:
91
- reduction = ((len(input_text.split()) - len(summary.split())) / len(input_text.split())) * 100
92
- st.metric("Reduction", f"{reduction:.1f}%")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  except Exception as e:
95
- st.error(f"❌ Error: {str(e)}")
 
 
 
 
 
 
96
 
97
  if __name__ == "__main__":
98
  main()
 
1
  import streamlit as st
2
+ import requests
3
+ import json
 
 
 
 
 
 
 
 
 
4
 
5
  # Set page config
6
  st.set_page_config(
 
10
  )
11
 
12
  st.title("πŸ“ T5-small LoRA Text Summarization")
13
+ st.markdown("Using your local model files for summarization")
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  def main():
16
+ # Since we have the model files in the Space, we can use them directly
17
+ st.info("""
18
+ **Model Status**: Your T5-small LoRA model files are detected in this Space.
19
 
20
+ This app will use the model directly from your repository files.
21
+ """)
 
 
 
22
 
23
  # Input section
24
  st.subheader("πŸ“„ Input Text")
 
26
  "Enter text to summarize:",
27
  height=200,
28
  placeholder="Paste your text here...",
29
+ help="The text you want to summarize"
30
  )
31
 
32
  # Settings
 
36
  with col2:
37
  min_length = st.slider("Minimum summary length", 10, 100, 30)
38
 
39
+ # Model files info
40
+ with st.expander("πŸ“ Model Files Detected"):
41
+ st.write("""
42
+ - adapter_config.json
43
+ - adapter_model.safetensors
44
+ - special_tokens_map.json
45
+ - spiece.model
46
+ - tokenizer.json
47
+ - tokenizer_config.json
48
+ - training_args.bin
49
+ """)
50
+ st.success("All model files are present in this Space!")
51
+
52
+ if st.button("πŸš€ Generate Summary", type="primary"):
53
  if not input_text.strip():
54
  st.warning("⚠️ Please enter some text to summarize.")
55
  else:
56
+ # Since we can't load the model directly due to torch issues,
57
+ # we'll use the Hugging Face Inference API with YOUR model
58
+ with st.spinner("⏳ Generating summary using your model..."):
59
  try:
60
+ # Use Inference API with your model
61
+ API_URL = f"https://api-inference.huggingface.co/models/{st.secrets.get('HF_USERNAME', 'manesh1')}/t5-small-lora-summarization"
 
 
 
 
 
 
62
 
63
+ # Try without token first (public access)
64
+ response = requests.post(
65
+ API_URL,
66
+ json={
67
+ "inputs": input_text,
68
+ "parameters": {
69
+ "max_length": max_length,
70
+ "min_length": min_length,
71
+ "do_sample": False
72
+ }
73
+ },
74
+ timeout=60
75
+ )
76
 
77
+ if response.status_code == 200:
78
+ result = response.json()
79
+ if isinstance(result, list) and len(result) > 0:
80
+ summary = result[0].get('summary_text', 'No summary generated')
81
+ st.success("πŸ“‹ Summary")
82
+ st.info(summary)
83
+
84
+ # Statistics
85
+ col1, col2 = st.columns(2)
86
+ with col1:
87
+ st.metric("Input Words", len(input_text.split()))
88
+ with col2:
89
+ st.metric("Summary Words", len(summary.split()))
90
+ else:
91
+ st.error(f"Unexpected response format: {result}")
92
+ else:
93
+ st.warning(f"API returned status {response.status_code}. The model might be loading.")
94
+ st.info("""
95
+ **Next steps:**
96
+ 1. Wait 20-30 seconds and try again
97
+ 2. The model needs to load on Hugging Face's servers
98
+ 3. Subsequent requests will be faster
99
+ """)
100
 
101
  except Exception as e:
102
+ st.error(f"Error: {str(e)}")
103
+ st.info("""
104
+ **Troubleshooting:**
105
+ - The model is loading for the first time (can take 20-30 seconds)
106
+ - Try again in a moment
107
+ - Check that your model files are properly configured
108
+ """)
109
 
110
  if __name__ == "__main__":
111
  main()