Spaces:
Sleeping
Sleeping
File size: 4,071 Bytes
171f2ef 9602bb7 de239b9 ce41809 de239b9 ce41809 de239b9 171f2ef 9602bb7 21916d9 fdbc2bf ce41809 de239b9 ce41809 12932a7 ce41809 12932a7 ce41809 de239b9 ce41809 de239b9 ce41809 de239b9 ce41809 171f2ef 9602bb7 171f2ef 9602bb7 de239b9 0d2c9df fdbc2bf de239b9 fdbc2bf 0d2c9df 171f2ef de239b9 12932a7 de239b9 9602bb7 171f2ef 12932a7 171f2ef de239b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import gradio as gr
from comp import generate_response
import re
# --- Constants ---
WORKFLOW_SYSTEM_PROMPT = """你是一位分析对话和提取用户工作流的专家。
根据提供的聊天记录,识别用户的核心目标或意图。
然后,将对话分解为一系列可执行的步骤,以实现该目标。
输出应分为两部分,并明确分隔:
**意图**: [用户目标的简洁描述]
**步骤**:
[步骤的编号列表]
"""
# --- Helper Functions ---
def parse_workflow_response(response):
intent_match = re.search(r"\*\*Intent\*\*:\s*(.*)", response, re.IGNORECASE)
steps_match = re.search(r"\*\*Steps\*\*:\s*(.*)", response, re.DOTALL | re.IGNORECASE)
intent = intent_match.group(1).strip() if intent_match else "未能识别意图。"
steps = steps_match.group(1).strip() if steps_match else "未能识别步骤。"
return intent, steps
# --- Gradio UI ---
with gr.Blocks() as demo:
gr.Markdown("# Ling 灵动工作台")
gr.Markdown("这是一个对 Zero GPU 使用 Ring-mini-2.0 模型能力的验证项目。它会和用户聊天,并实时提取其中潜在有用的工作流。在合适的时机,它会告知用户,并提醒这些工作流未来可以被复用。")
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("## 聊天")
chat_chatbot = gr.Chatbot(label="聊天", bubble_full_width=False)
with gr.Row():
chat_msg = gr.Textbox(
label="请输入你的消息",
scale=4,
)
send_btn = gr.Button("发送", scale=1)
with gr.Column(scale=1):
gr.Markdown("## 工作流提取")
intent_textbox = gr.Textbox(label="任务意图", interactive=False)
steps_textbox = gr.Textbox(
label="提取步骤", interactive=False, lines=15
)
chat_clear = gr.ClearButton([chat_msg, chat_chatbot, intent_textbox, steps_textbox], value="清除")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
user_message = history[-1][0]
history[-1][1] = ""
# Main chat model call (uses default system prompt)
for response in generate_response(user_message, history[:-1]):
if "</think>" in response:
parts = response.split("</think>", 1)
thinking_text = parts[0].replace("<think>", "")
body_text = parts[1]
md_output = f"**Thinking...**\n```\n{thinking_text}\n```\n\n{body_text}"
history[-1][1] = md_output
else:
history[-1][1] = response
yield history
def update_workflow(history):
if not history or not history[-1][0]:
return "", ""
# The last user message is the main prompt for the workflow agent
user_message = history[-1][0]
# The rest of the conversation is the history
chat_history_for_workflow = history[:-1]
# Call the model with the workflow system prompt
full_response = ""
for response in generate_response(
user_message,
chat_history_for_workflow,
system_prompt=WORKFLOW_SYSTEM_PROMPT
):
full_response = response
intent, steps = parse_workflow_response(full_response)
return intent, steps
# Handler for pressing Enter in the textbox
( chat_msg.submit(user, [chat_msg, chat_chatbot], [chat_msg, chat_chatbot], queue=False)
.then(bot, chat_chatbot, chat_chatbot)
.then(update_workflow, chat_chatbot, [intent_textbox, steps_textbox])
)
# Handler for clicking the Send button
( send_btn.click(user, [chat_msg, chat_chatbot], [chat_msg, chat_chatbot], queue=False)
.then(bot, chat_chatbot, chat_chatbot)
.then(update_workflow, chat_chatbot, [intent_textbox, steps_textbox])
)
if __name__ == "__main__":
demo.launch(share=True) |