Update app.py
Browse files
app.py
CHANGED
|
@@ -200,15 +200,12 @@ def get_all_entities(text):
|
|
| 200 |
|
| 201 |
def get_and_compare_entities(article_content,summary_output):
|
| 202 |
|
| 203 |
-
print(article_content)
|
| 204 |
-
print(summary_output)
|
| 205 |
all_entities_per_sentence = get_all_entities_per_sentence(article_content)
|
| 206 |
entities_article = list(itertools.chain.from_iterable(all_entities_per_sentence))
|
| 207 |
-
|
| 208 |
-
|
| 209 |
all_entities_per_sentence = get_all_entities_per_sentence(summary_output)
|
| 210 |
entities_summary = list(itertools.chain.from_iterable(all_entities_per_sentence))
|
| 211 |
-
|
| 212 |
matched_entities = []
|
| 213 |
unmatched_entities = []
|
| 214 |
for entity in entities_summary:
|
|
@@ -262,23 +259,10 @@ def highlight_entities(article_content,summary_output):
|
|
| 262 |
for entity in unmatched_entities:
|
| 263 |
summary_output = summary_output.replace(entity, markdown_start_red + entity + markdown_end)
|
| 264 |
soup = BeautifulSoup(summary_output, features="html.parser")
|
| 265 |
-
print(soup)
|
| 266 |
-
print(matched_entities)
|
| 267 |
-
print(unmatched_entities)
|
| 268 |
-
return HTML_WRAPPER.format(soup)
|
| 269 |
|
|
|
|
| 270 |
|
| 271 |
|
| 272 |
-
def render_svg(svg_file):
|
| 273 |
-
with open(svg_file, "r") as f:
|
| 274 |
-
lines = f.readlines()
|
| 275 |
-
svg = "".join(lines)
|
| 276 |
-
|
| 277 |
-
# """Renders the given svg string."""
|
| 278 |
-
b64 = base64.b64encode(svg.encode("utf-8")).decode("utf-8")
|
| 279 |
-
html = r'<img src="data:image/svg+xml;base64,%s"/>' % b64
|
| 280 |
-
return html
|
| 281 |
-
|
| 282 |
def clean_text(text,doc=False,plain_text=False,url=False):
|
| 283 |
"""Return clean text from the various input sources"""
|
| 284 |
|
|
@@ -470,7 +454,7 @@ if summarize:
|
|
| 470 |
|
| 471 |
with st.spinner("Calculating and matching entities, this takes a few seconds..."):
|
| 472 |
|
| 473 |
-
entity_match_html = highlight_entities(
|
| 474 |
st.subheader("Summarized text with matched entities in Green and mismatched entities in Red relative to the Original Text")
|
| 475 |
st.markdown("####")
|
| 476 |
|
|
|
|
| 200 |
|
| 201 |
def get_and_compare_entities(article_content,summary_output):
|
| 202 |
|
|
|
|
|
|
|
| 203 |
all_entities_per_sentence = get_all_entities_per_sentence(article_content)
|
| 204 |
entities_article = list(itertools.chain.from_iterable(all_entities_per_sentence))
|
| 205 |
+
|
|
|
|
| 206 |
all_entities_per_sentence = get_all_entities_per_sentence(summary_output)
|
| 207 |
entities_summary = list(itertools.chain.from_iterable(all_entities_per_sentence))
|
| 208 |
+
|
| 209 |
matched_entities = []
|
| 210 |
unmatched_entities = []
|
| 211 |
for entity in entities_summary:
|
|
|
|
| 259 |
for entity in unmatched_entities:
|
| 260 |
summary_output = summary_output.replace(entity, markdown_start_red + entity + markdown_end)
|
| 261 |
soup = BeautifulSoup(summary_output, features="html.parser")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 262 |
|
| 263 |
+
return HTML_WRAPPER.format(soup)
|
| 264 |
|
| 265 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
def clean_text(text,doc=False,plain_text=False,url=False):
|
| 267 |
"""Return clean text from the various input sources"""
|
| 268 |
|
|
|
|
| 454 |
|
| 455 |
with st.spinner("Calculating and matching entities, this takes a few seconds..."):
|
| 456 |
|
| 457 |
+
entity_match_html = highlight_entities(text_to_summarize,summarized_text)
|
| 458 |
st.subheader("Summarized text with matched entities in Green and mismatched entities in Red relative to the Original Text")
|
| 459 |
st.markdown("####")
|
| 460 |
|