botbottingbot commited on
Commit
cc41e10
·
verified ·
1 Parent(s): 411eb79

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -11
app.py CHANGED
@@ -1,8 +1,13 @@
1
  import json
2
  from functools import partial
 
3
  import gradio as gr
4
  from transformers import pipeline
5
 
 
 
 
 
6
  with open("modules.json", "r", encoding="utf-8") as f:
7
  MODULES = json.load(f)["modules"]
8
 
@@ -10,14 +15,46 @@ GENERATORS = [m for m in MODULES if m["type"] == "generator"]
10
  CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"}
11
  GEN_BY_ID = {m["id"]: m for m in GENERATORS}
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  llm = pipeline("text-generation", model="gpt2", max_new_tokens=512)
14
 
15
- def call_llm(prompt):
16
- o = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"]
17
- return o[len(prompt):].strip() if o.startswith(prompt) else o
 
 
 
 
18
 
19
- def generator_prompt(mid, *inputs):
20
- m = GEN_BY_ID[mid]
 
 
 
 
 
 
 
 
 
 
21
  keys = list(m["input_placeholders"].keys())
22
  vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
23
  secs = m["output_sections"]
@@ -38,11 +75,15 @@ def generator_prompt(mid, *inputs):
38
  p.append("")
39
  return "\n".join(p)
40
 
41
- def checker_prompt(cid, *vals):
42
- c = CHECKERS[cid]
 
 
43
  secs = c["output_sections"]
 
44
  if len(vals) < 2:
45
- orig, draft = "", vals[0] if vals else ""
 
46
  else:
47
  orig = "\n\n".join(vals[:-1])
48
  draft = vals[-1]
@@ -65,16 +106,96 @@ def checker_prompt(cid, *vals):
65
  p.append("")
66
  return "\n".join(p)
67
 
68
- def run_generator(mid, *inputs):
 
69
  return call_llm(generator_prompt(mid, *inputs))
70
 
71
- def run_checker(cid, *inputs):
 
72
  return call_llm(checker_prompt(cid, *inputs))
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  def build_ui():
75
  with gr.Blocks(title="Modular Intelligence") as demo:
76
- gr.Markdown("# Modular Intelligence Demo")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
 
 
 
 
 
 
78
  for m in GENERATORS:
79
  with gr.Tab(m["label"]):
80
  gr.Markdown(m["description"])
@@ -100,8 +221,10 @@ def build_ui():
100
  inputs=inputs + [out],
101
  outputs=chk
102
  )
 
103
  return demo
104
 
 
105
  if __name__ == "__main__":
106
  app = build_ui()
107
  app.launch()
 
1
  import json
2
  from functools import partial
3
+
4
  import gradio as gr
5
  from transformers import pipeline
6
 
7
+
8
+ # -----------------------------
9
+ # Load module repository
10
+ # -----------------------------
11
  with open("modules.json", "r", encoding="utf-8") as f:
12
  MODULES = json.load(f)["modules"]
13
 
 
15
  CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"}
16
  GEN_BY_ID = {m["id"]: m for m in GENERATORS}
17
 
18
+ # Friendly names for classifier
19
+ MODULE_LABELS = {
20
+ "Analysis Note": "analysis_note_v1",
21
+ "Document Explainer": "document_explainer_v1",
22
+ "Strategy Memo": "strategy_memo_v1",
23
+ "Message / Post Reply": "message_reply_v1",
24
+ "Profile / Application": "profile_application_v1",
25
+ "System / Architecture Blueprint": "system_blueprint_v1",
26
+ "Modular Brainstorm": "modular_brainstorm_v1",
27
+ }
28
+
29
+ LABEL_LIST = list(MODULE_LABELS.keys())
30
+
31
+
32
+ # -----------------------------
33
+ # Models
34
+ # -----------------------------
35
+ # 1) Generator engine (same as before)
36
  llm = pipeline("text-generation", model="gpt2", max_new_tokens=512)
37
 
38
+ # 2) Task classifier (zero-shot)
39
+ # You can swap this to another classification model later.
40
+ classifier = pipeline(
41
+ "zero-shot-classification",
42
+ model="facebook/bart-large-mnli"
43
+ )
44
+
45
 
46
+ # -----------------------------
47
+ # Low-level helpers
48
+ # -----------------------------
49
+ def call_llm(prompt: str) -> str:
50
+ """Call the generator LLM and strip prompt prefix if echoed."""
51
+ out = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"]
52
+ return out[len(prompt):].strip() if out.startswith(prompt) else out
53
+
54
+
55
+ def generator_prompt(module_id: str, *inputs: str) -> str:
56
+ """Build a structured prompt for a generator module."""
57
+ m = GEN_BY_ID[module_id]
58
  keys = list(m["input_placeholders"].keys())
59
  vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
60
  secs = m["output_sections"]
 
75
  p.append("")
76
  return "\n".join(p)
77
 
78
+
79
+ def checker_prompt(checker_id: str, *vals: str) -> str:
80
+ """Build a prompt for a checker module."""
81
+ c = CHECKERS[checker_id]
82
  secs = c["output_sections"]
83
+
84
  if len(vals) < 2:
85
+ orig = ""
86
+ draft = vals[0] if vals else ""
87
  else:
88
  orig = "\n\n".join(vals[:-1])
89
  draft = vals[-1]
 
106
  p.append("")
107
  return "\n".join(p)
108
 
109
+
110
+ def run_generator(mid: str, *inputs: str) -> str:
111
  return call_llm(generator_prompt(mid, *inputs))
112
 
113
+
114
+ def run_checker(cid: str, *inputs: str) -> str:
115
  return call_llm(checker_prompt(cid, *inputs))
116
 
117
+
118
+ # -----------------------------
119
+ # Task classifier
120
+ # -----------------------------
121
+ def classify_task(task_text: str):
122
+ """
123
+ Classify a free-form task description into one of the Modular Intelligence modules.
124
+
125
+ Returns:
126
+ predicted_label: human-friendly label (e.g. "Strategy Memo")
127
+ module_id: internal id (e.g. "strategy_memo_v1")
128
+ scores_text: formatted confidence scores per label
129
+ """
130
+ if not task_text.strip():
131
+ return "No input", "", ""
132
+
133
+ res = classifier(
134
+ task_text,
135
+ candidate_labels=LABEL_LIST,
136
+ multi_label=False
137
+ )
138
+ # Zero-shot pipeline returns labels sorted by score descending
139
+ predicted_label = res["labels"][0]
140
+ module_id = MODULE_LABELS[predicted_label]
141
+
142
+ # Build a small score table
143
+ lines = []
144
+ for label, score in zip(res["labels"], res["scores"]):
145
+ lines.append(f"{label}: {score:.2f}")
146
+ scores_text = "\n".join(lines)
147
+
148
+ return predicted_label, module_id, scores_text
149
+
150
+
151
+ # -----------------------------
152
+ # UI
153
+ # -----------------------------
154
  def build_ui():
155
  with gr.Blocks(title="Modular Intelligence") as demo:
156
+ gr.Markdown(
157
+ "# Modular Intelligence Demo\n"
158
+ "Choose a module directly, or use the **Auto-route** tab to classify your task."
159
+ )
160
+
161
+ # ---- Auto-route tab (task classifier) ----
162
+ with gr.Tab("Auto-route (Task Classifier)"):
163
+ gr.Markdown(
164
+ "Paste any task or question. The classifier will suggest "
165
+ "which Modular Intelligence module to use."
166
+ )
167
+ task_box = gr.Textbox(
168
+ label="Describe what you want to do",
169
+ placeholder="E.g. 'I want a structured memo on options for exiting a JV under time pressure'...",
170
+ lines=6
171
+ )
172
+ predicted_label_box = gr.Textbox(
173
+ label="Predicted module (human-readable)",
174
+ interactive=False
175
+ )
176
+ predicted_id_box = gr.Textbox(
177
+ label="Internal module id",
178
+ interactive=False
179
+ )
180
+ scores_box = gr.Textbox(
181
+ label="Classifier scores",
182
+ interactive=False,
183
+ lines=10
184
+ )
185
+
186
+ classify_button = gr.Button("Classify task")
187
+ classify_button.click(
188
+ fn=classify_task,
189
+ inputs=[task_box],
190
+ outputs=[predicted_label_box, predicted_id_box, scores_box]
191
+ )
192
 
193
+ gr.Markdown(
194
+ "You can then go to the corresponding module tab below and "
195
+ "fill in its inputs using this guidance."
196
+ )
197
+
198
+ # ---- One tab per generator module ----
199
  for m in GENERATORS:
200
  with gr.Tab(m["label"]):
201
  gr.Markdown(m["description"])
 
221
  inputs=inputs + [out],
222
  outputs=chk
223
  )
224
+
225
  return demo
226
 
227
+
228
  if __name__ == "__main__":
229
  app = build_ui()
230
  app.launch()