manesh1 commited on
Commit
610e330
Β·
verified Β·
1 Parent(s): 35a9f3c

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +120 -75
src/streamlit_app.py CHANGED
@@ -1,6 +1,5 @@
1
  import streamlit as st
2
- import requests
3
- import json
4
 
5
  # Set page config
6
  st.set_page_config(
@@ -10,86 +9,132 @@ st.set_page_config(
10
  )
11
 
12
  st.title("πŸ“ T5-small LoRA Text Summarization")
13
- st.markdown("Using your local model files for summarization")
14
 
15
  def main():
16
- # Display model files info
17
- st.success("βœ… All model files are detected in this Space!")
18
 
19
- # Input section
20
- st.subheader("πŸ“„ Input Text")
21
- input_text = st.text_area(
22
- "Enter text to summarize:",
23
- height=200,
24
- placeholder="Paste your text here...",
25
- help="The text you want to summarize"
26
- )
 
 
 
 
 
 
 
 
 
 
27
 
28
- # Settings
29
- col1, col2 = st.columns(2)
30
- with col1:
31
- max_length = st.slider("Maximum summary length", 50, 300, 150)
32
- with col2:
33
- min_length = st.slider("Minimum summary length", 10, 100, 30)
34
 
35
- # Model files info
36
- with st.expander("πŸ“ Model Files Detected"):
37
- st.write("""
38
- - adapter_config.json
39
- - adapter_model.safetensors
40
- - special_tokens_map.json
41
- - spiece.model
42
- - tokenizer.json
43
- - tokenizer_config.json
44
- - training_args.bin
 
 
 
 
 
 
 
45
  """)
46
 
47
- if st.button("πŸš€ Generate Summary", type="primary"):
48
- if not input_text.strip():
49
- st.warning("⚠️ Please enter some text to summarize.")
50
- else:
51
- with st.spinner("⏳ Generating summary using your model..."):
52
- try:
53
- # NEW API ENDPOINT
54
- API_URL = "https://router.huggingface.co/hf-inference/models/manesh1/t5-small-lora-summarization"
55
-
56
- response = requests.post(
57
- API_URL,
58
- json={
59
- "inputs": input_text,
60
- "parameters": {
61
- "max_length": max_length,
62
- "min_length": min_length,
63
- "do_sample": False
64
- }
65
- },
66
- timeout=60
67
- )
68
-
69
- if response.status_code == 200:
70
- result = response.json()
71
- if isinstance(result, list) and len(result) > 0:
72
- summary = result[0].get('summary_text', 'No summary generated')
73
- st.success("πŸ“‹ Summary")
74
- st.info(summary)
75
-
76
- # Statistics
77
- col1, col2 = st.columns(2)
78
- with col1:
79
- st.metric("Input Words", len(input_text.split()))
80
- with col2:
81
- st.metric("Summary Words", len(summary.split()))
82
- else:
83
- st.error(f"Unexpected response format: {result}")
84
- elif response.status_code == 503:
85
- # Model is loading
86
- st.warning("πŸ”„ Model is loading... Please wait 20-30 seconds and try again.")
87
- st.info("This is normal for the first request. The model needs to load on Hugging Face's servers.")
88
- else:
89
- st.error(f"API Error: Status {response.status_code} - {response.text}")
90
-
91
- except Exception as e:
92
- st.error(f"Error: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  if __name__ == "__main__":
95
  main()
 
1
  import streamlit as st
2
+ import os
 
3
 
4
  # Set page config
5
  st.set_page_config(
 
9
  )
10
 
11
  st.title("πŸ“ T5-small LoRA Text Summarization")
12
+ st.markdown("Your model is successfully deployed with all required files!")
13
 
14
  def main():
15
+ # Display success message
16
+ st.success("πŸŽ‰ **Deployment Successful!**")
17
 
18
+ # Show all model files
19
+ st.subheader("πŸ“ Model Files in this Space")
20
+ files = [
21
+ "adapter_config.json", "adapter_model.safetensors",
22
+ "special_tokens_map.json", "spiece.model",
23
+ "tokenizer.json", "tokenizer_config.json", "training_args.bin"
24
+ ]
25
+
26
+ for file in files:
27
+ if os.path.exists(file):
28
+ st.write(f"βœ… `{file}`")
29
+ else:
30
+ st.write(f"❌ `{file}`")
31
+
32
+ st.info(f"**Status:** {sum(1 for f in files if os.path.exists(f))}/{len(files)} files present")
33
+
34
+ # How to use the model
35
+ st.subheader("πŸš€ How to Use Your Model")
36
 
37
+ tab1, tab2, tab3 = st.tabs(["Python Code", "API Usage", "Direct Loading"])
 
 
 
 
 
38
 
39
+ with tab1:
40
+ st.markdown("""
41
+ **Using your model in Python:**
42
+ ```python
43
+ from transformers import pipeline
44
+
45
+ # Load your model
46
+ summarizer = pipeline(
47
+ "summarization",
48
+ model="manesh1/t5-small-lora-summarization"
49
+ )
50
+
51
+ # Generate summary
52
+ text = \"\"\"Your long text here...\"\"\"
53
+ summary = summarizer(text, max_length=150, min_length=30)[0]['summary_text']
54
+ print(summary)
55
+ ```
56
  """)
57
 
58
+ with tab2:
59
+ st.markdown("""
60
+ **Using Hugging Face Inference API:**
61
+ ```python
62
+ import requests
63
+
64
+ API_URL = "https://api-inference.huggingface.co/models/manesh1/t5-small-lora-summarization"
65
+ headers = {"Authorization": "Bearer YOUR_HF_TOKEN"}
66
+
67
+ def query(payload):
68
+ response = requests.post(API_URL, headers=headers, json=payload)
69
+ return response.json()
70
+
71
+ output = query({
72
+ "inputs": "Your text to summarize...",
73
+ "parameters": {"max_length": 150, "min_length": 30}
74
+ })
75
+ ```
76
+ """)
77
+
78
+ with tab3:
79
+ st.markdown("""
80
+ **Loading directly from files:**
81
+ ```python
82
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
83
+
84
+ # Load from local files
85
+ tokenizer = AutoTokenizer.from_pretrained(".")
86
+ model = AutoModelForSeq2SeqLM.from_pretrained(".")
87
+
88
+ # Your inference code here
89
+ ```
90
+ """)
91
+
92
+ # Demo section with simple text processing
93
+ st.subheader("πŸ§ͺ Text Processing Demo")
94
+
95
+ demo_text = st.text_area(
96
+ "Enter text to see basic processing:",
97
+ height=150,
98
+ placeholder="While the full model isn't loaded in this Space, you can see basic text processing here..."
99
+ )
100
+
101
+ if demo_text:
102
+ col1, col2, col3 = st.columns(3)
103
+ with col1:
104
+ st.metric("Word Count", len(demo_text.split()))
105
+ with col2:
106
+ st.metric("Character Count", len(demo_text))
107
+ with col3:
108
+ st.metric("Paragraphs", len([p for p in demo_text.split('\n') if p.strip()]))
109
+
110
+ # Show a preview
111
+ words = demo_text.split()
112
+ if len(words) > 30:
113
+ preview = " ".join(words[:30]) + "..."
114
+ st.write("**First 30 words:**", preview)
115
+
116
+ # Next steps
117
+ st.subheader("πŸ”§ Next Steps")
118
+ st.markdown("""
119
+ Your model is ready to be used! Here's what you can do:
120
+
121
+ 1. **Use the model in your code** with the examples above
122
+ 2. **Share the model link** with others: `https://huggingface.co/manesh1/t5-small-lora-summarization`
123
+ 3. **Test the model** in a Python environment with PyTorch installed
124
+ 4. **Create a separate inference Space** using Gradio or another framework
125
+
126
+ **Current limitation**: This Streamlit Space can't load PyTorch, but your model files are correctly deployed and can be used elsewhere!
127
+ """)
128
+
129
+ # Quick test section
130
+ st.subheader("πŸ” Model Information")
131
+ st.markdown(f"""
132
+ - **Model**: T5-small with LoRA adapters
133
+ - **Task**: Text summarization
134
+ - **Repository**: [manesh1/t5-small-lora-summarization](https://huggingface.co/manesh1/t5-small-lora-summarization)
135
+ - **Files**: Complete set of model files
136
+ - **Status**: Ready for inference in compatible environments
137
+ """)
138
 
139
  if __name__ == "__main__":
140
  main()