Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from comp import generate_response | |
| import re | |
| # --- Constants --- | |
| WORKFLOW_SYSTEM_PROMPT = """你是一位分析对话和提取用户工作流的专家。 | |
| 根据提供的聊天记录,识别用户的核心目标或意图。 | |
| 然后,将对话分解为一系列可执行的步骤,以实现该目标。 | |
| 输出应分为两部分,并明确分隔: | |
| **意图**: [用户目标的简洁描述] | |
| **步骤**: | |
| [步骤的编号列表] | |
| """ | |
| # --- Helper Functions --- | |
| def parse_workflow_response(response): | |
| intent_match = re.search(r"\*\*Intent\*\*:\s*(.*)", response, re.IGNORECASE) | |
| steps_match = re.search(r"\*\*Steps\*\*:\s*(.*)", response, re.DOTALL | re.IGNORECASE) | |
| intent = intent_match.group(1).strip() if intent_match else "未能识别意图。" | |
| steps = steps_match.group(1).strip() if steps_match else "未能识别步骤。" | |
| return intent, steps | |
| # --- Gradio UI --- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Ling 灵动工作台") | |
| gr.Markdown("这是一个对 Zero GPU 使用 Ring-mini-2.0 模型能力的验证项目。它会和用户聊天,并实时提取其中潜在有用的工作流。在合适的时机,它会告知用户,并提醒这些工作流未来可以被复用。") | |
| with gr.Row(): | |
| with gr.Column(scale=2): | |
| gr.Markdown("## 聊天") | |
| chat_chatbot = gr.Chatbot(label="聊天", bubble_full_width=False) | |
| with gr.Row(): | |
| chat_msg = gr.Textbox( | |
| label="请输入你的消息", | |
| scale=4, | |
| ) | |
| send_btn = gr.Button("发送", scale=1) | |
| with gr.Column(scale=1): | |
| gr.Markdown("## 工作流提取") | |
| intent_textbox = gr.Textbox(label="任务意图", interactive=False) | |
| steps_textbox = gr.Textbox( | |
| label="提取步骤", interactive=False, lines=15 | |
| ) | |
| chat_clear = gr.ClearButton([chat_msg, chat_chatbot, intent_textbox, steps_textbox], value="清除") | |
| def user(user_message, history): | |
| return "", history + [[user_message, None]] | |
| def bot(history): | |
| user_message = history[-1][0] | |
| history[-1][1] = "" | |
| # Main chat model call (uses default system prompt) | |
| for response in generate_response(user_message, history[:-1]): | |
| if "</think>" in response: | |
| parts = response.split("</think>", 1) | |
| thinking_text = parts[0].replace("<think>", "") | |
| body_text = parts[1] | |
| md_output = f"**Thinking...**\n```\n{thinking_text}\n```\n\n{body_text}" | |
| history[-1][1] = md_output | |
| else: | |
| history[-1][1] = response | |
| yield history | |
| def update_workflow(history): | |
| if not history or not history[-1][0]: | |
| return "", "" | |
| # The last user message is the main prompt for the workflow agent | |
| user_message = history[-1][0] | |
| # The rest of the conversation is the history | |
| chat_history_for_workflow = history[:-1] | |
| # Call the model with the workflow system prompt | |
| full_response = "" | |
| for response in generate_response( | |
| user_message, | |
| chat_history_for_workflow, | |
| system_prompt=WORKFLOW_SYSTEM_PROMPT | |
| ): | |
| full_response = response | |
| intent, steps = parse_workflow_response(full_response) | |
| return intent, steps | |
| # Handler for pressing Enter in the textbox | |
| ( chat_msg.submit(user, [chat_msg, chat_chatbot], [chat_msg, chat_chatbot], queue=False) | |
| .then(bot, chat_chatbot, chat_chatbot) | |
| .then(update_workflow, chat_chatbot, [intent_textbox, steps_textbox]) | |
| ) | |
| # Handler for clicking the Send button | |
| ( send_btn.click(user, [chat_msg, chat_chatbot], [chat_msg, chat_chatbot], queue=False) | |
| .then(bot, chat_chatbot, chat_chatbot) | |
| .then(update_workflow, chat_chatbot, [intent_textbox, steps_textbox]) | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=True) |