manesh1 commited on
Commit
48e3e54
Β·
verified Β·
1 Parent(s): 610e330

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +123 -115
src/streamlit_app.py CHANGED
@@ -1,140 +1,148 @@
1
  import streamlit as st
2
- import os
3
 
4
- # Set page config
5
  st.set_page_config(
6
  page_title="T5-small LoRA Summarization",
7
  page_icon="πŸ“",
8
  layout="wide"
9
  )
10
 
11
- st.title("πŸ“ T5-small LoRA Text Summarization")
12
- st.markdown("Your model is successfully deployed with all required files!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- def main():
15
- # Display success message
16
- st.success("πŸŽ‰ **Deployment Successful!**")
17
-
18
- # Show all model files
19
- st.subheader("πŸ“ Model Files in this Space")
20
- files = [
21
- "adapter_config.json", "adapter_model.safetensors",
22
- "special_tokens_map.json", "spiece.model",
23
- "tokenizer.json", "tokenizer_config.json", "training_args.bin"
24
- ]
25
-
26
- for file in files:
27
- if os.path.exists(file):
28
- st.write(f"βœ… `{file}`")
29
- else:
30
- st.write(f"❌ `{file}`")
31
-
32
- st.info(f"**Status:** {sum(1 for f in files if os.path.exists(f))}/{len(files)} files present")
33
 
34
- # How to use the model
35
- st.subheader("πŸš€ How to Use Your Model")
 
 
 
 
 
36
 
37
- tab1, tab2, tab3 = st.tabs(["Python Code", "API Usage", "Direct Loading"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- with tab1:
40
- st.markdown("""
41
- **Using your model in Python:**
42
- ```python
43
- from transformers import pipeline
44
-
45
- # Load your model
46
- summarizer = pipeline(
47
- "summarization",
48
- model="manesh1/t5-small-lora-summarization"
49
- )
50
-
51
- # Generate summary
52
- text = \"\"\"Your long text here...\"\"\"
53
- summary = summarizer(text, max_length=150, min_length=30)[0]['summary_text']
54
- print(summary)
55
- ```
56
- """)
57
 
58
- with tab2:
59
- st.markdown("""
60
- **Using Hugging Face Inference API:**
61
- ```python
62
- import requests
63
-
64
- API_URL = "https://api-inference.huggingface.co/models/manesh1/t5-small-lora-summarization"
65
- headers = {"Authorization": "Bearer YOUR_HF_TOKEN"}
66
-
67
- def query(payload):
68
- response = requests.post(API_URL, headers=headers, json=payload)
69
- return response.json()
70
-
71
- output = query({
72
- "inputs": "Your text to summarize...",
73
- "parameters": {"max_length": 150, "min_length": 30}
74
- })
75
- ```
76
- """)
77
 
78
- with tab3:
79
- st.markdown("""
80
- **Loading directly from files:**
81
- ```python
82
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
83
-
84
- # Load from local files
85
- tokenizer = AutoTokenizer.from_pretrained(".")
86
- model = AutoModelForSeq2SeqLM.from_pretrained(".")
87
-
88
- # Your inference code here
89
- ```
90
- """)
91
 
92
- # Demo section with simple text processing
93
- st.subheader("πŸ§ͺ Text Processing Demo")
 
 
94
 
95
- demo_text = st.text_area(
96
- "Enter text to see basic processing:",
97
- height=150,
98
- placeholder="While the full model isn't loaded in this Space, you can see basic text processing here..."
 
 
99
  )
100
 
101
- if demo_text:
102
- col1, col2, col3 = st.columns(3)
103
- with col1:
104
- st.metric("Word Count", len(demo_text.split()))
105
- with col2:
106
- st.metric("Character Count", len(demo_text))
107
- with col3:
108
- st.metric("Paragraphs", len([p for p in demo_text.split('\n') if p.strip()]))
109
-
110
- # Show a preview
111
- words = demo_text.split()
112
- if len(words) > 30:
113
- preview = " ".join(words[:30]) + "..."
114
- st.write("**First 30 words:**", preview)
115
 
116
- # Next steps
117
- st.subheader("πŸ”§ Next Steps")
118
  st.markdown("""
119
- Your model is ready to be used! Here's what you can do:
 
120
 
121
- 1. **Use the model in your code** with the examples above
122
- 2. **Share the model link** with others: `https://huggingface.co/manesh1/t5-small-lora-summarization`
123
- 3. **Test the model** in a Python environment with PyTorch installed
124
- 4. **Create a separate inference Space** using Gradio or another framework
125
 
126
- **Current limitation**: This Streamlit Space can't load PyTorch, but your model files are correctly deployed and can be used elsewhere!
 
 
 
 
 
 
 
 
127
  """)
128
 
129
- # Quick test section
130
- st.subheader("πŸ” Model Information")
131
- st.markdown(f"""
132
- - **Model**: T5-small with LoRA adapters
133
- - **Task**: Text summarization
134
- - **Repository**: [manesh1/t5-small-lora-summarization](https://huggingface.co/manesh1/t5-small-lora-summarization)
135
- - **Files**: Complete set of model files
136
- - **Status**: Ready for inference in compatible environments
137
- """)
138
 
139
- if __name__ == "__main__":
140
- main()
 
1
  import streamlit as st
 
2
 
 
3
  st.set_page_config(
4
  page_title="T5-small LoRA Summarization",
5
  page_icon="πŸ“",
6
  layout="wide"
7
  )
8
 
9
+ # Custom CSS
10
+ st.markdown("""
11
+ <style>
12
+ .model-card {
13
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
14
+ padding: 2rem;
15
+ border-radius: 10px;
16
+ color: white;
17
+ margin-bottom: 2rem;
18
+ }
19
+ .feature-box {
20
+ background-color: #f8f9fa;
21
+ padding: 1rem;
22
+ border-radius: 8px;
23
+ border-left: 4px solid #667eea;
24
+ margin: 0.5rem 0;
25
+ }
26
+ </style>
27
+ """, unsafe_allow_html=True)
28
 
29
+ # Header
30
+ st.markdown("""
31
+ <div class="model-card">
32
+ <h1>πŸ“ T5-small LoRA Summarization Model</h1>
33
+ <p>Fine-tuned for efficient text summarization using LoRA adapters</p>
34
+ </div>
35
+ """, unsafe_allow_html=True)
36
+
37
+ # Model Information
38
+ col1, col2 = st.columns([2, 1])
 
 
 
 
 
 
 
 
 
39
 
40
+ with col1:
41
+ st.header("πŸš€ Model Overview")
42
+ st.markdown("""
43
+ This model is a **T5-small** architecture fine-tuned with **LoRA (Low-Rank Adaptation)**
44
+ specifically for text summarization tasks. The model maintains the efficiency of T5-small
45
+ while being optimized for summarization through parameter-efficient fine-tuning.
46
+ """)
47
 
48
+ st.markdown("""
49
+ <div class="feature-box">
50
+ <h4>πŸ“ Model Files Status</h4>
51
+ <ul>
52
+ <li>βœ… adapter_config.json</li>
53
+ <li>βœ… adapter_model.safetensors</li>
54
+ <li>βœ… tokenizer files</li>
55
+ <li>βœ… configuration files</li>
56
+ </ul>
57
+ <p><strong>All model files are properly deployed!</strong></p>
58
+ </div>
59
+ """, unsafe_allow_html=True)
60
+
61
+ with col2:
62
+ st.header("πŸ”§ Quick Use")
63
+ st.code("""
64
+ from transformers import pipeline
65
+
66
+ summarizer = pipeline(
67
+ "summarization",
68
+ model="manesh1/t5-small-lora-summarization"
69
+ )
70
+ """, language="python")
71
+
72
+ # Usage Examples
73
+ st.header("πŸ’‘ Usage Examples")
74
+ tab1, tab2, tab3 = st.tabs(["Basic Usage", "Advanced", "API"])
75
+
76
+ with tab1:
77
+ st.markdown("""
78
+ ```python
79
+ from transformers import pipeline
80
 
81
+ # Load model
82
+ summarizer = pipeline(
83
+ "summarization",
84
+ model="manesh1/t5-small-lora-summarization"
85
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ # Summarize text
88
+ text = \"\"\"Long document text here...\"\"\"
89
+ summary = summarizer(
90
+ text,
91
+ max_length=150,
92
+ min_length=30,
93
+ do_sample=False
94
+ )[0]['summary_text']
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ print(summary)
97
+ ```
98
+ """)
 
 
 
 
 
 
 
 
 
 
99
 
100
+ with tab2:
101
+ st.markdown("""
102
+ ```python
103
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
104
 
105
+ # Load specific components
106
+ tokenizer = AutoTokenizer.from_pretrained(
107
+ "manesh1/t5-small-lora-summarization"
108
+ )
109
+ model = AutoModelForSeq2SeqLM.from_pretrained(
110
+ "manesh1/t5-small-lora-summarization"
111
  )
112
 
113
+ # Custom inference
114
+ inputs = tokenizer("summarize: " + your_text, return_tensors="pt")
115
+ outputs = model.generate(**inputs)
116
+ summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
117
+ ```
118
+ """)
 
 
 
 
 
 
 
 
119
 
120
+ with tab3:
 
121
  st.markdown("""
122
+ ```python
123
+ import requests
124
 
125
+ API_URL = "https://api-inference.huggingface.co/models/manesh1/t5-small-lora-summarization"
126
+ headers = {"Authorization": "Bearer YOUR_TOKEN"}
 
 
127
 
128
+ def query(payload):
129
+ response = requests.post(API_URL, headers=headers, json=payload)
130
+ return response.json()
131
+
132
+ output = query({
133
+ "inputs": "Your text here...",
134
+ "parameters": {"max_length": 150, "min_length": 30}
135
+ })
136
+ ```
137
  """)
138
 
139
+ # Final notes
140
+ st.header("πŸ“ Notes")
141
+ st.info("""
142
+ **About this Space**: This Streamlit interface demonstrates your deployed model.
143
+ While the model files are fully available in this Space, running inference requires
144
+ a PyTorch environment. Use the code examples above in your local environment or
145
+ in a Space with PyTorch support to test the model's summarization capabilities.
146
+ """)
 
147
 
148
+ st.success("**Your model is ready to use!** Share this link: https://huggingface.co/manesh1/t5-small-lora-summarization")