botbottingbot commited on
Commit
c1cb680
·
verified ·
1 Parent(s): fb0b460

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -145
app.py CHANGED
@@ -1,71 +1,74 @@
1
  import json
2
  from functools import partial
3
-
4
  import gradio as gr
5
- from transformers import pipeline
6
-
7
 
8
- # -----------------------------
9
- # Load module repository
10
- # -----------------------------
11
  with open("modules.json", "r", encoding="utf-8") as f:
12
  MODULES = json.load(f)["modules"]
13
 
14
- GENERATORS = [m for m in MODULES if m["type"] == "generator"]
15
- CHECKERS = {m["id"]: m for m in MODULES if m["type"] == "checker"}
16
  GEN_BY_ID = {m["id"]: m for m in GENERATORS}
17
-
18
- # Friendly names for classifier
19
- MODULE_LABELS = {
20
- "Analysis Note": "analysis_note_v1",
21
- "Document Explainer": "document_explainer_v1",
22
- "Strategy Memo": "strategy_memo_v1",
23
- "Message / Post Reply": "message_reply_v1",
24
- "Profile / Application": "profile_application_v1",
25
- "System / Architecture Blueprint": "system_blueprint_v1",
26
- "Modular Brainstorm": "modular_brainstorm_v1",
27
- }
28
-
29
- LABEL_LIST = list(MODULE_LABELS.keys())
30
-
31
-
32
- # -----------------------------
33
- # Models
34
- # -----------------------------
35
- # 1) Generator engine (same as before)
36
- llm = pipeline("text-generation", model="gpt2", max_new_tokens=512)
37
-
38
- # 2) Task classifier (zero-shot)
39
- # You can swap this to another classification model later.
40
- classifier = pipeline(
41
- "zero-shot-classification",
42
- model="facebook/bart-large-mnli"
43
- )
44
-
45
-
46
- # -----------------------------
47
- # Low-level helpers
48
- # -----------------------------
49
  def call_llm(prompt: str) -> str:
50
- """Call the generator LLM and strip prompt prefix if echoed."""
51
- out = llm(prompt, max_new_tokens=512, do_sample=False)[0]["generated_text"]
52
- return out[len(prompt):].strip() if out.startswith(prompt) else out
 
53
 
54
 
55
- def generator_prompt(module_id: str, *inputs: str) -> str:
56
- """Build a structured prompt for a generator module."""
 
 
57
  m = GEN_BY_ID[module_id]
58
  keys = list(m["input_placeholders"].keys())
59
  vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
60
  secs = m["output_sections"]
61
 
62
  p = []
63
- p.append(f"MODULE: {m['label']}")
64
- p.append("INPUT:")
 
 
65
  for k, v in vals.items():
66
  p.append(f"{k.upper()}: {v}")
67
  p.append("")
68
- p.append("OUTPUT SECTIONS:")
69
  for s in secs:
70
  p.append(f"- {s}")
71
  p.append("")
@@ -76,27 +79,28 @@ def generator_prompt(module_id: str, *inputs: str) -> str:
76
  return "\n".join(p)
77
 
78
 
79
- def checker_prompt(checker_id: str, *vals: str) -> str:
80
- """Build a prompt for a checker module."""
81
  c = CHECKERS[checker_id]
82
  secs = c["output_sections"]
83
 
84
  if len(vals) < 2:
85
- orig = ""
86
  draft = vals[0] if vals else ""
87
  else:
88
- orig = "\n\n".join(vals[:-1])
89
  draft = vals[-1]
90
 
91
  p = []
92
- p.append(f"CHECKER: {c['label']}")
 
 
93
  p.append("ORIGINAL TASK:")
94
- p.append(orig)
95
  p.append("")
96
- p.append("DRAFT:")
97
  p.append(draft)
98
  p.append("")
99
- p.append("RESPOND WITH:")
100
  for s in secs:
101
  p.append(f"- {s}")
102
  p.append("")
@@ -107,120 +111,99 @@ def checker_prompt(checker_id: str, *vals: str) -> str:
107
  return "\n".join(p)
108
 
109
 
110
- def run_generator(mid: str, *inputs: str) -> str:
111
- return call_llm(generator_prompt(mid, *inputs))
 
 
 
 
 
 
 
112
 
 
 
113
 
114
- def run_checker(cid: str, *inputs: str) -> str:
115
- return call_llm(checker_prompt(cid, *inputs))
 
 
116
 
 
117
 
118
- # -----------------------------
119
- # Task classifier
120
- # -----------------------------
121
- def classify_task(task_text: str):
122
- """
123
- Classify a free-form task description into one of the Modular Intelligence modules.
124
 
125
- Returns:
126
- predicted_label: human-friendly label (e.g. "Strategy Memo")
127
- module_id: internal id (e.g. "strategy_memo_v1")
128
- scores_text: formatted confidence scores per label
129
- """
130
- if not task_text.strip():
131
- return "No input", "", ""
132
 
133
- res = classifier(
134
- task_text,
135
- candidate_labels=LABEL_LIST,
136
- multi_label=False
137
- )
138
- # Zero-shot pipeline returns labels sorted by score descending
139
- predicted_label = res["labels"][0]
140
- module_id = MODULE_LABELS[predicted_label]
141
 
142
- # Build a small score table
143
- lines = []
144
- for label, score in zip(res["labels"], res["scores"]):
145
- lines.append(f"{label}: {score:.2f}")
146
- scores_text = "\n".join(lines)
 
147
 
148
- return predicted_label, module_id, scores_text
 
 
 
149
 
 
 
 
150
 
151
- # -----------------------------
 
152
  # UI
153
- # -----------------------------
154
  def build_ui():
155
  with gr.Blocks(title="Modular Intelligence") as demo:
156
- gr.Markdown(
157
- "# Modular Intelligence Demo\n"
158
- "Choose a module directly, or use the **Auto-route** tab to classify your task."
159
- )
160
-
161
- # ---- Auto-route tab (task classifier) ----
162
- with gr.Tab("Auto-route (Task Classifier)"):
163
- gr.Markdown(
164
- "Paste any task or question. The classifier will suggest "
165
- "which Modular Intelligence module to use."
166
- )
167
- task_box = gr.Textbox(
168
- label="Describe what you want to do",
169
- placeholder="E.g. 'I want a structured memo on options for exiting a JV under time pressure'...",
170
- lines=6
171
- )
172
- predicted_label_box = gr.Textbox(
173
- label="Predicted module (human-readable)",
174
- interactive=False
175
- )
176
- predicted_id_box = gr.Textbox(
177
- label="Internal module id",
178
- interactive=False
179
- )
180
- scores_box = gr.Textbox(
181
- label="Classifier scores",
182
- interactive=False,
183
- lines=10
184
- )
185
-
186
- classify_button = gr.Button("Classify task")
187
- classify_button.click(
188
- fn=classify_task,
189
  inputs=[task_box],
190
- outputs=[predicted_label_box, predicted_id_box, scores_box]
191
- )
192
-
193
- gr.Markdown(
194
- "You can then go to the corresponding module tab below and "
195
- "fill in its inputs using this guidance."
196
  )
197
 
198
- # ---- One tab per generator module ----
199
  for m in GENERATORS:
200
  with gr.Tab(m["label"]):
201
- gr.Markdown(m["description"])
 
202
  inputs = []
203
- for k, ph in m["input_placeholders"].items():
204
- t = gr.Textbox(label=k, placeholder=ph, lines=4)
205
  inputs.append(t)
206
 
207
- out = gr.Textbox(label="Output", lines=16)
208
- gr.Button("Run").click(
209
  fn=partial(run_generator, m["id"]),
210
  inputs=inputs,
211
- outputs=out
212
  )
213
 
214
- if m.get("has_checker"):
215
- cid = m.get("checker_id")
216
- if cid in CHECKERS:
217
- gr.Markdown("### Checker")
218
- chk = gr.Textbox(label="Checker Output", lines=14)
219
- gr.Button("Check").click(
220
- fn=partial(run_checker, cid),
221
- inputs=inputs + [out],
222
- outputs=chk
223
- )
224
 
225
  return demo
226
 
 
1
  import json
2
  from functools import partial
 
3
  import gradio as gr
4
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
 
5
 
6
+ # -------------------------------------------------------------
7
+ # Load Modules
8
+ # -------------------------------------------------------------
9
  with open("modules.json", "r", encoding="utf-8") as f:
10
  MODULES = json.load(f)["modules"]
11
 
12
+ GENERATORS = [m for m in MODULES if m.get("type") == "generator"]
13
+ CHECKERS = {m["id"]: m for m in MODULES if m.get("type") == "checker"}
14
  GEN_BY_ID = {m["id"]: m for m in GENERATORS}
15
+ LABEL_TO_ID = {m["label"]: m["id"] for m in GENERATORS}
16
+ LABEL_LIST = list(LABEL_TO_ID.keys())
17
+
18
+ # -------------------------------------------------------------
19
+ # Load Model (base LLM)
20
+ # Swappable engine: GPT-2 / Llama / Mistral etc.
21
+ # -------------------------------------------------------------
22
+ tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
23
+ model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
24
+ llm = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=300)
25
+
26
+ # -------------------------------------------------------------
27
+ # Automatic Router Components
28
+ # -------------------------------------------------------------
29
+ from router.rules import rule_router
30
+ from router.zero_shot import classify_task # zero-shot classifier
31
+
32
+ # -------------------------------------------------------------
33
+ # Domain Adapters (LoRA)
34
+ # -------------------------------------------------------------
35
+ from domain_heads.loader import load_adapter # load domain-specific adapter
36
+
37
+ # -------------------------------------------------------------
38
+ # Reasoning Scaffolds
39
+ # -------------------------------------------------------------
40
+ from reasoning_scaffolds.cot import apply_cot
41
+ from reasoning_scaffolds.critique_loop import critique_and_refine
42
+
43
+
44
+ # -------------------------------------------------------------
45
+ # Helper: LLM call
46
+ # -------------------------------------------------------------
47
  def call_llm(prompt: str) -> str:
48
+ out = llm(prompt, do_sample=False)[0]["generated_text"]
49
+ if out.startswith(prompt):
50
+ out = out[len(prompt):]
51
+ return out.strip()
52
 
53
 
54
+ # -------------------------------------------------------------
55
+ # Build prompts
56
+ # -------------------------------------------------------------
57
+ def build_generator_prompt(module_id: str, *inputs: str) -> str:
58
  m = GEN_BY_ID[module_id]
59
  keys = list(m["input_placeholders"].keys())
60
  vals = {k: inputs[i] if i < len(inputs) else "" for i, k in enumerate(keys)}
61
  secs = m["output_sections"]
62
 
63
  p = []
64
+ p.append("You are a structured reasoning module.")
65
+ p.append(f"MODULE: {m['label']} (id={module_id})")
66
+ p.append("")
67
+ p.append("INPUTS:")
68
  for k, v in vals.items():
69
  p.append(f"{k.upper()}: {v}")
70
  p.append("")
71
+ p.append("You must respond using these sections:")
72
  for s in secs:
73
  p.append(f"- {s}")
74
  p.append("")
 
79
  return "\n".join(p)
80
 
81
 
82
+ def build_checker_prompt(checker_id: str, *vals: str) -> str:
 
83
  c = CHECKERS[checker_id]
84
  secs = c["output_sections"]
85
 
86
  if len(vals) < 2:
87
+ original_task = ""
88
  draft = vals[0] if vals else ""
89
  else:
90
+ original_task = "\n\n".join(vals[:-1])
91
  draft = vals[-1]
92
 
93
  p = []
94
+ p.append("You are a strict reviewer.")
95
+ p.append(f"CHECKER: {c['label']} (id={checker_id})")
96
+ p.append("")
97
  p.append("ORIGINAL TASK:")
98
+ p.append(original_task)
99
  p.append("")
100
+ p.append("DRAFT OUTPUT:")
101
  p.append(draft)
102
  p.append("")
103
+ p.append("You must respond using:")
104
  for s in secs:
105
  p.append(f"- {s}")
106
  p.append("")
 
111
  return "\n".join(p)
112
 
113
 
114
+ # -------------------------------------------------------------
115
+ # Generator & Checker Execution
116
+ # -------------------------------------------------------------
117
+ def run_generator(module_id: str, *inputs: str) -> str:
118
+ m = GEN_BY_ID[module_id]
119
+
120
+ # Load domain adapter if defined
121
+ if m.get("domain"):
122
+ load_adapter(model, m["domain"])
123
 
124
+ # Build prompt
125
+ prompt = build_generator_prompt(module_id, *inputs)
126
 
127
+ # Apply reasoning scaffolds (CoT + critique loop)
128
+ prompt = apply_cot(prompt)
129
+ draft = call_llm(prompt)
130
+ final = critique_and_refine(draft)
131
 
132
+ return final
133
 
 
 
 
 
 
 
134
 
135
+ def run_checker(checker_id: str, *inputs: str) -> str:
136
+ prompt = build_checker_prompt(checker_id, *inputs)
137
+ prompt = apply_cot(prompt)
138
+ reviewed = call_llm(prompt)
139
+ return reviewed
 
 
140
 
 
 
 
 
 
 
 
 
141
 
142
+ # -------------------------------------------------------------
143
+ # Hybrid Router (rules + zero-shot)
144
+ # -------------------------------------------------------------
145
+ def hybrid_route(task_text: str):
146
+ if not task_text or not task_text.strip():
147
+ return "No task provided", "", ""
148
 
149
+ # 1. Rule-based (deterministic)
150
+ rule_choice = rule_router(task_text)
151
+ if rule_choice:
152
+ return GEN_BY_ID[rule_choice]["label"], rule_choice, "Rule-based match"
153
 
154
+ # 2. Zero-shot fallback
155
+ predicted_label, module_id, scores = classify_task(task_text)
156
+ return predicted_label, module_id, scores
157
 
158
+
159
+ # -------------------------------------------------------------
160
  # UI
161
+ # -------------------------------------------------------------
162
  def build_ui():
163
  with gr.Blocks(title="Modular Intelligence") as demo:
164
+ gr.Markdown("# Modular Intelligence\nUpgraded architecture with routing, adapters, and reasoning layers.")
165
+
166
+ # -------------------- Auto-Route Tab --------------------
167
+ with gr.Tab("Auto-Route"):
168
+ task_box = gr.Textbox(label="Describe your task", lines=6)
169
+ module_name = gr.Textbox(label="Suggested Module", interactive=False)
170
+ module_id = gr.Textbox(label="Module ID", interactive=False)
171
+ scores = gr.Textbox(label="Routing Details", lines=12, interactive=False)
172
+
173
+ classify_btn = gr.Button("Classify Task")
174
+ classify_btn.click(
175
+ fn=hybrid_route,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176
  inputs=[task_box],
177
+ outputs=[module_name, module_id, scores],
 
 
 
 
 
178
  )
179
 
180
+ # -------------------- Module Tabs ------------------------
181
  for m in GENERATORS:
182
  with gr.Tab(m["label"]):
183
+ gr.Markdown(f"**Module ID:** `{m['id']}` | **Domain:** `{m.get('domain', 'general')}`")
184
+
185
  inputs = []
186
+ for key, placeholder in m["input_placeholders"].items():
187
+ t = gr.Textbox(label=key, placeholder=placeholder, lines=4)
188
  inputs.append(t)
189
 
190
+ output_box = gr.Textbox(label="Module Output", lines=20)
191
+ gr.Button("Run Module").click(
192
  fn=partial(run_generator, m["id"]),
193
  inputs=inputs,
194
+ outputs=output_box,
195
  )
196
 
197
+ checker_id = m.get("checker_id")
198
+ if checker_id and checker_id in CHECKERS:
199
+ checker_output = gr.Textbox(label="Checker Output", lines=15)
200
+ gr.Button("Run Checker").click(
201
+ fn=partial(run_checker, checker_id),
202
+ inputs=inputs + [output_box],
203
+ outputs=checker_output,
204
+ )
205
+ else:
206
+ gr.Markdown("_No checker available for this module._")
207
 
208
  return demo
209