manesh1 commited on
Commit
afbc73e
Β·
verified Β·
1 Parent(s): c719a09

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +9 -25
src/streamlit_app.py CHANGED
@@ -13,12 +13,8 @@ st.title("πŸ“ T5-small LoRA Text Summarization")
13
  st.markdown("Using your local model files for summarization")
14
 
15
  def main():
16
- # Since we have the model files in the Space, we can use them directly
17
- st.info("""
18
- **Model Status**: Your T5-small LoRA model files are detected in this Space.
19
-
20
- This app will use the model directly from your repository files.
21
- """)
22
 
23
  # Input section
24
  st.subheader("πŸ“„ Input Text")
@@ -47,20 +43,16 @@ def main():
47
  - tokenizer_config.json
48
  - training_args.bin
49
  """)
50
- st.success("All model files are present in this Space!")
51
 
52
  if st.button("πŸš€ Generate Summary", type="primary"):
53
  if not input_text.strip():
54
  st.warning("⚠️ Please enter some text to summarize.")
55
  else:
56
- # Since we can't load the model directly due to torch issues,
57
- # we'll use the Hugging Face Inference API with YOUR model
58
  with st.spinner("⏳ Generating summary using your model..."):
59
  try:
60
- # Use Inference API with your model
61
- API_URL = f"https://api-inference.huggingface.co/models/{st.secrets.get('HF_USERNAME', 'manesh1')}/t5-small-lora-summarization"
62
 
63
- # Try without token first (public access)
64
  response = requests.post(
65
  API_URL,
66
  json={
@@ -89,23 +81,15 @@ def main():
89
  st.metric("Summary Words", len(summary.split()))
90
  else:
91
  st.error(f"Unexpected response format: {result}")
 
 
 
 
92
  else:
93
- st.warning(f"API returned status {response.status_code}. The model might be loading.")
94
- st.info("""
95
- **Next steps:**
96
- 1. Wait 20-30 seconds and try again
97
- 2. The model needs to load on Hugging Face's servers
98
- 3. Subsequent requests will be faster
99
- """)
100
 
101
  except Exception as e:
102
  st.error(f"Error: {str(e)}")
103
- st.info("""
104
- **Troubleshooting:**
105
- - The model is loading for the first time (can take 20-30 seconds)
106
- - Try again in a moment
107
- - Check that your model files are properly configured
108
- """)
109
 
110
  if __name__ == "__main__":
111
  main()
 
13
  st.markdown("Using your local model files for summarization")
14
 
15
  def main():
16
+ # Display model files info
17
+ st.success("βœ… All model files are detected in this Space!")
 
 
 
 
18
 
19
  # Input section
20
  st.subheader("πŸ“„ Input Text")
 
43
  - tokenizer_config.json
44
  - training_args.bin
45
  """)
 
46
 
47
  if st.button("πŸš€ Generate Summary", type="primary"):
48
  if not input_text.strip():
49
  st.warning("⚠️ Please enter some text to summarize.")
50
  else:
 
 
51
  with st.spinner("⏳ Generating summary using your model..."):
52
  try:
53
+ # Use Inference API directly without secrets
54
+ API_URL = "https://api-inference.huggingface.co/models/manesh1/t5-small-lora-summarization"
55
 
 
56
  response = requests.post(
57
  API_URL,
58
  json={
 
81
  st.metric("Summary Words", len(summary.split()))
82
  else:
83
  st.error(f"Unexpected response format: {result}")
84
+ elif response.status_code == 503:
85
+ # Model is loading
86
+ st.warning("πŸ”„ Model is loading... Please wait 20-30 seconds and try again.")
87
+ st.info("This is normal for the first request. The model needs to load on Hugging Face's servers.")
88
  else:
89
+ st.error(f"API Error: Status {response.status_code} - {response.text}")
 
 
 
 
 
 
90
 
91
  except Exception as e:
92
  st.error(f"Error: {str(e)}")
 
 
 
 
 
 
93
 
94
  if __name__ == "__main__":
95
  main()