Spaces:
Running
Running
Ezi Ozoani
commited on
Commit
·
516fb17
1
Parent(s):
f6b562f
app inference complete
Browse files- app.py +33 -78
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -1,10 +1,8 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from pathlib import Path
|
| 3 |
import base64
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
from PIL import Image
|
| 7 |
-
|
| 8 |
|
| 9 |
|
| 10 |
# Initial page config
|
|
@@ -31,9 +29,13 @@ def img_to_bytes(img_path):
|
|
| 31 |
|
| 32 |
# sidebar
|
| 33 |
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
def cs_sidebar():
|
| 39 |
|
|
@@ -167,56 +169,6 @@ OpenAI states in the GPT-2 [model card](https://github.com/openai/gpt-2/blob/mas
|
|
| 167 |
|
| 168 |
''')
|
| 169 |
|
| 170 |
-
# How to Get Started
|
| 171 |
-
|
| 172 |
-
col1.subheader('How to Get Started')
|
| 173 |
-
with col1.expander(""):
|
| 174 |
-
st.markdown('''
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
*Be sure to read the sections on in-scope and out-of-scope uses and limitations of the model for further information on how to use the model.*
|
| 179 |
-
|
| 180 |
-
Using DistilGPT2 is similar to using GPT-2. DistilGPT2 can be used directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility:
|
| 181 |
-
|
| 182 |
-
```python
|
| 183 |
-
>>> from transformers import pipeline, set_seed
|
| 184 |
-
>>> generator = pipeline('text-generation', model='distilgpt2')
|
| 185 |
-
>>> set_seed(42)
|
| 186 |
-
>>> generator("Hello, I'm a language model", max_length=20, num_return_sequences=5)
|
| 187 |
-
Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.
|
| 188 |
-
[{'generated_text': "Hello, I'm a language model, I'm a language model. In my previous post I've"},
|
| 189 |
-
{'generated_text': "Hello, I'm a language model, and I'd love to hear what you think about it."},
|
| 190 |
-
{'generated_text': "Hello, I'm a language model, but I don't get much of a connection anymore, so"},
|
| 191 |
-
{'generated_text': "Hello, I'm a language model, a functional language... It's not an example, and that"},
|
| 192 |
-
{'generated_text': "Hello, I'm a language model, not an object model.\n\nIn a nutshell, I"}]
|
| 193 |
-
```
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
**Here is how to use this model to get the features of a given text in PyTorch**:
|
| 197 |
-
|
| 198 |
-
```python
|
| 199 |
-
from transformers import GPT2Tokenizer, GPT2Model
|
| 200 |
-
tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
|
| 201 |
-
model = GPT2Model.from_pretrained('distilgpt2')
|
| 202 |
-
text = "Replace me by any text you'd like."
|
| 203 |
-
encoded_input = tokenizer(text, return_tensors='pt')
|
| 204 |
-
output = model(**encoded_input)
|
| 205 |
-
```
|
| 206 |
-
|
| 207 |
-
**And in TensorFlow:**
|
| 208 |
-
|
| 209 |
-
```python
|
| 210 |
-
from transformers import GPT2Tokenizer, TFGPT2Model
|
| 211 |
-
tokenizer = GPT2Tokenizer.from_pretrained('distilgpt2')
|
| 212 |
-
model = TFGPT2Model.from_pretrained('distilgpt2')
|
| 213 |
-
text = "Replace me by any text you'd like."
|
| 214 |
-
encoded_input = tokenizer(text, return_tensors='tf')
|
| 215 |
-
output = model(encoded_input)
|
| 216 |
-
```
|
| 217 |
-
|
| 218 |
-
''')
|
| 219 |
-
|
| 220 |
|
| 221 |
# Training Data
|
| 222 |
|
|
@@ -274,17 +226,16 @@ GPT-2 reaches a perplexity on the test set of 16.3 compared to 21.1 for DistilGP
|
|
| 274 |
|
| 275 |
''')
|
| 276 |
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
''')
|
| 284 |
|
| 285 |
# How to Get Started
|
| 286 |
|
| 287 |
-
with col2.
|
| 288 |
col2.markdown('''
|
| 289 |
*Be sure to read the sections on in-scope and out-of-scope uses and limitations of the model for further information on how to use the model.*
|
| 290 |
''')
|
|
@@ -331,23 +282,27 @@ output = model(encoded_input)
|
|
| 331 |
|
| 332 |
''')
|
| 333 |
|
| 334 |
-
# Visuals
|
| 335 |
|
|
|
|
| 336 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 337 |
|
|
|
|
| 338 |
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
# Placeholders, help, and options
|
| 344 |
-
|
| 345 |
-
col2.subheader('Placeholders, help, and anything else')
|
| 346 |
-
#pipeline = load_model()
|
| 347 |
-
|
| 348 |
-
col2.code('''
|
| 349 |
-
|
| 350 |
-
''')
|
| 351 |
|
| 352 |
|
| 353 |
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from pathlib import Path
|
| 3 |
import base64
|
| 4 |
+
from transformers import pipeline, set_seed
|
| 5 |
+
from huggingface_hub.inference_api import InferenceApi
|
|
|
|
|
|
|
| 6 |
|
| 7 |
|
| 8 |
# Initial page config
|
|
|
|
| 29 |
|
| 30 |
# sidebar
|
| 31 |
|
| 32 |
+
def load_model():
|
| 33 |
+
generator = pipeline('text-generation', model='distilgpt2')
|
| 34 |
+
set_seed(48)
|
| 35 |
+
text = st.text_input('Provide an initial text prompt')
|
| 36 |
+
|
| 37 |
+
if text != '' :
|
| 38 |
+
out = generator(text, max_length=30, num_return_sequences=1)
|
| 39 |
|
| 40 |
def cs_sidebar():
|
| 41 |
|
|
|
|
| 169 |
|
| 170 |
''')
|
| 171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
# Training Data
|
| 174 |
|
|
|
|
| 226 |
|
| 227 |
''')
|
| 228 |
|
| 229 |
+
|
| 230 |
+
################################
|
| 231 |
+
## Column 2: right most column
|
| 232 |
+
################################
|
| 233 |
+
|
| 234 |
+
|
|
|
|
| 235 |
|
| 236 |
# How to Get Started
|
| 237 |
|
| 238 |
+
with col2.subheader('How to Get Started'):
|
| 239 |
col2.markdown('''
|
| 240 |
*Be sure to read the sections on in-scope and out-of-scope uses and limitations of the model for further information on how to use the model.*
|
| 241 |
''')
|
|
|
|
| 282 |
|
| 283 |
''')
|
| 284 |
|
|
|
|
| 285 |
|
| 286 |
+
# Try App
|
| 287 |
|
| 288 |
+
col2.header('Try out DistilGP2')
|
| 289 |
+
#print load_model()
|
| 290 |
+
with col2.subheader(''):
|
| 291 |
+
generator = pipeline('text-generation', model='distilgpt2')
|
| 292 |
+
set_seed(48)
|
| 293 |
+
text = st.text_input('Text Generation: Provide an initial text prompt')
|
| 294 |
+
if text != '' :
|
| 295 |
+
out = generator(text, max_length=30, num_return_sequences=1)
|
| 296 |
+
col2.write(out)
|
| 297 |
+
|
| 298 |
+
|
| 299 |
|
| 300 |
+
# Contact Section
|
| 301 |
|
| 302 |
+
with col2.header('Further Contact'):
|
| 303 |
+
url = "https://huggingface.co/spaces/Ezi/ModelCardsAnalysis/discussions"
|
| 304 |
+
col2.markdown("Further contact, input and/or questions are welcomed 🤗 [here](%s)" % url)
|
| 305 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
|
| 307 |
|
| 308 |
|
requirements.txt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
transformers
|
| 2 |
torch
|
| 3 |
-
transformers-interpret
|
|
|
|
| 1 |
transformers
|
| 2 |
torch
|
| 3 |
+
transformers-interpret
|