File size: 5,772 Bytes
454a628 85d86c4 ddfbdb3 454a628 e59cf4d 454a628 3b5816c 454a628 e59cf4d ddfbdb3 85d86c4 ddfbdb3 454a628 ddfbdb3 e59cf4d ddfbdb3 85d86c4 f2676fe 85d86c4 454a628 e59cf4d 454a628 ddfbdb3 e59cf4d 85d86c4 ddfbdb3 85d86c4 ddfbdb3 85d86c4 f2676fe ddfbdb3 85d86c4 f2676fe ddfbdb3 85d86c4 f2676fe ddfbdb3 f2676fe 454a628 b80369c 454a628 f2676fe 454a628 f2676fe 85d86c4 f2676fe ddfbdb3 f2676fe ddfbdb3 f2676fe 85d86c4 454a628 f2676fe 85d86c4 f2676fe 85d86c4 f2676fe e59cf4d f2676fe e59cf4d f2676fe 85d86c4 f2676fe e59cf4d f2676fe 454a628 f2676fe 454a628 f2676fe 454a628 b80369c 85d86c4 b80369c f2676fe 454a628 f2676fe e59cf4d f2676fe e59cf4d f2676fe e59cf4d f2676fe e59cf4d f2676fe e59cf4d f2676fe e59cf4d f2676fe e59cf4d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import gradio as gr
import re
import matplotlib.pyplot as plt
import numpy as np
from transformers import pipeline
# ---------------------------
# 1. AI Translator Model
# ---------------------------
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
def translate_ai(cn_text):
try:
return translator(cn_text)[0]["translation_text"]
except:
return "Translation model error."
# ---------------------------
# 2. Politeness Marker Extraction
# ---------------------------
POLITENESS_FEATURES = {
"modals": ["could", "would", "may", "might"],
"softener": ["please", "kindly"],
"hedging": ["possibly", "perhaps", "a bit", "a little"],
"formal": ["professor", "sir", "madam"],
"passive": ["is requested", "may cause", "is appreciated"],
"appreciation": ["thank", "appreciate", "grateful"]
}
def extract_politeness_scores(en_text):
text = en_text.lower()
scores = {}
for feat, markers in POLITENESS_FEATURES.items():
score = sum(text.count(m) for m in markers)
scores[feat] = min(score, 5)
return scores
# ---------------------------
# 3. TIRA Classification
# ---------------------------
def classify_tira(cn_text, en_text):
cn = cn_text.lower()
en = en_text.lower()
cn_amplify = ["能否", "是否方便", "请您帮忙", "抽空", "劳驾", "麻烦帮我"]
cn_attenuate = ["尽快", "必须", "需要您", "立即"]
cn_redirect = ["给您添麻烦", "不好意思打扰", "不胜感激"]
en_amplify = ["please", "could you", "possibly", "would you"]
en_attenuate = ["directly", "must", "asap"]
en_redirect = ["inconvenience", "may cause"]
if any(p in cn for p in cn_redirect) or any(p in en for p in en_redirect):
return "Redirect(转向)"
if any(p in cn for p in cn_amplify) or any(p in en for p in en_amplify):
return "Amplify(放大)"
if any(p in cn for p in cn_attenuate) or any(p in en for p in en_attenuate):
return "Attenuate(削弱)"
return "Retain(保留)"
# ---------------------------
# 4. Radar Chart (AI + Human)
# ---------------------------
def draw_dual_radar(ai_scores, human_scores=None):
labels = list(ai_scores.keys())
angles = np.linspace(0, 2 * np.pi, len(labels), endpoint=False).tolist()
ai_vals = list(ai_scores.values()) + [list(ai_scores.values())[0]]
angles_plot = angles + [angles[0]]
fig, ax = plt.subplots(figsize=(6,6), subplot_kw=dict(polar=True))
ax.fill(angles_plot, ai_vals, color="#1B3B6F80", alpha=0.4, label="AI Translation")
ax.plot(angles_plot, ai_vals, color="#1B3B6F", linewidth=2)
if human_scores:
hum_vals = list(human_scores.values()) + [list(human_scores.values())[0]]
ax.fill(angles_plot, hum_vals, color="#D9534F50", alpha=0.3, label="Human Translation")
ax.plot(angles_plot, hum_vals, color="#D9534F", linewidth=2)
ax.set_xticks(angles)
ax.set_xticklabels([l.capitalize() for l in labels])
ax.set_yticks(range(1, 6))
ax.set_ylim(0, 5)
ax.set_title("AI vs Human Politeness Radar", fontsize=15, color="#1B3B6F")
ax.legend(loc="upper right", bbox_to_anchor=(1.3, 1.1))
return fig
# ---------------------------
# 5. Generate TIRA Comparison Table (AI vs Human)
# ---------------------------
def generate_tira_table(ai_tira, human_tira=None):
if not human_tira:
table = f"""
| 比较项 | AI |
|--------|------------------|
| **TIRA 类型** | {ai_tira} |
"""
else:
table = f"""
| 比较项 | AI 译文 | 人工译文 |
|--------|--------------|----------------|
| **TIRA 类型** | {ai_tira} | {human_tira} |
| **偏移方向** | {"↑ 放大 / 更远" if ai_tira=="Amplify(放大)" else ("↓ 更直接" if ai_tira=="Attenuate(削弱)" else ("↔ 基本一致" if ai_tira=="Retain(保留)" else "→ 转向"))} |
"""
return table
# ---------------------------
# 6. Main Process
# ---------------------------
def full_process(cn_text, human_text):
en_ai = translate_ai(cn_text)
ai_scores = extract_politeness_scores(en_ai)
ai_tira = classify_tira(cn_text, en_ai)
human_scores = None
human_tira = None
if human_text and human_text.strip() != "":
human_scores = extract_politeness_scores(human_text)
human_tira = classify_tira(cn_text, human_text)
table = generate_tira_table(ai_tira, human_tira)
fig = draw_dual_radar(ai_scores, human_scores)
return en_ai, ai_scores, ai_tira, table, fig
# ---------------------------
# 7. UI
# ---------------------------
css = """
h1 {text-align:center; color:#1B3B6F;}
label {font-weight:bold;}
"""
with gr.Blocks(css=css, title="TIRA AI vs Human Comparison System") as demo:
gr.Markdown("""
# **📘 TIRA AI vs Human Translation Comparison System**
### *TIRA Table + Dual Radar Chart + Politeness Analysis*
---
输入中文→自动生成:
- AI 翻译
- AI vs Human 礼貌雷达叠加
- AI vs Human TIRA 类型对照表
非常适合学术展示与论文附录材料。
---
""")
cn_input = gr.Textbox(lines=4, label="✉ 输入中文邮件内容")
human_input = gr.Textbox(lines=3, label="📝 可选:输入人工英文译文")
btn = gr.Button("🔍 运行分析", variant="primary")
en_out = gr.Textbox(label="🌐 AI 翻译")
ai_scores_out = gr.JSON(label="🔬 AI 礼貌特征")
ai_tira_out = gr.Textbox(label="📌 AI 的 TIRA 类型")
table_out = gr.Markdown(label="📊 TIRA 对照表")
radar_out = gr.Plot(label="📉 AI vs Human 双雷达图叠加")
btn.click(
fn=full_process,
inputs=[cn_input, human_input],
outputs=[en_out, ai_scores_out, ai_tira_out, table_out, radar_out]
)
demo.launch()
|