Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -220,9 +220,9 @@ def generate_image_google(
|
|
| 220 |
seed: Optional[int],
|
| 221 |
) -> List[Image.Image]:
|
| 222 |
"""
|
| 223 |
-
This assumes
|
| 224 |
-
|
| 225 |
-
|
| 226 |
"""
|
| 227 |
genai = _configure_google(api_key)
|
| 228 |
model = genai.GenerativeModel(google_image_model)
|
|
@@ -239,20 +239,28 @@ def generate_image_google(
|
|
| 239 |
generation_config=generation_config or None,
|
| 240 |
)
|
| 241 |
|
| 242 |
-
# Extract images from candidates
|
| 243 |
-
|
| 244 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
inline = getattr(part, "inline_data", None)
|
| 246 |
if inline and getattr(inline, "data", None):
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
return images
|
| 252 |
|
| 253 |
|
| 254 |
# -----------------------
|
| 255 |
-
# Core callback
|
| 256 |
# -----------------------
|
| 257 |
|
| 258 |
def run_generation(
|
|
@@ -291,7 +299,7 @@ def run_generation(
|
|
| 291 |
full_prompt += f". Avoid: {negative_prompt.strip()}"
|
| 292 |
|
| 293 |
debug_lines.append(f"Task: {task_type}")
|
| 294 |
-
debug_lines.append(f"Provider: {provider}")
|
| 295 |
debug_lines.append(f"Preset: {preset}, Style: {style}")
|
| 296 |
debug_lines.append(f"OpenAI size: {size}, quality: {quality}")
|
| 297 |
debug_lines.append(f"Google image model: {google_image_model}")
|
|
@@ -322,16 +330,17 @@ def run_generation(
|
|
| 322 |
|
| 323 |
# IMAGE
|
| 324 |
if task_type == "Image":
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
return "Missing OpenAI API key.", [], "OpenAI key not provided."
|
| 328 |
|
| 329 |
-
|
|
|
|
|
|
|
|
|
|
| 330 |
image_model = "gpt-image-1"
|
| 331 |
if "Palantir" in preset:
|
| 332 |
image_model = "dall-e-3"
|
| 333 |
-
|
| 334 |
-
images = generate_image_openai(
|
| 335 |
api_key=openai_key.strip(),
|
| 336 |
model=image_model,
|
| 337 |
prompt=full_prompt,
|
|
@@ -340,39 +349,68 @@ def run_generation(
|
|
| 340 |
n_images=n_images,
|
| 341 |
seed=seed_val,
|
| 342 |
)
|
| 343 |
-
|
| 344 |
-
|
|
|
|
|
|
|
| 345 |
if not google_key.strip():
|
| 346 |
-
|
| 347 |
-
|
|
|
|
| 348 |
api_key=google_key.strip(),
|
| 349 |
-
google_image_model=
|
| 350 |
prompt=full_prompt,
|
| 351 |
n_images=n_images,
|
| 352 |
seed=seed_val,
|
| 353 |
)
|
| 354 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
if not text_output and task_type == "Image":
|
| 356 |
text_output = (
|
| 357 |
-
"Image(s) generated. Use Text
|
| 358 |
"generate captions, copy, or layout specs."
|
| 359 |
)
|
| 360 |
|
| 361 |
-
if
|
| 362 |
-
debug_lines.append("No images returned from provider.")
|
| 363 |
|
| 364 |
return text_output, images, "\n".join(debug_lines)
|
| 365 |
|
| 366 |
except Exception as e:
|
| 367 |
debug_lines.append(f"Exception: {e}")
|
| 368 |
-
|
|
|
|
| 369 |
|
| 370 |
|
| 371 |
# -----------------------
|
| 372 |
# UI
|
| 373 |
# -----------------------
|
| 374 |
|
| 375 |
-
with gr.Blocks() as demo:
|
| 376 |
gr.Markdown(
|
| 377 |
"""
|
| 378 |
# 🧬 ZEN Omni Studio — Text • Images • Infographics
|
|
@@ -407,8 +445,8 @@ Multi-provider creator for the ZEN ecosystem:
|
|
| 407 |
label="Task Type",
|
| 408 |
)
|
| 409 |
provider = gr.Radio(
|
| 410 |
-
["Google (Nano-Banana / Gemini)"
|
| 411 |
-
value="
|
| 412 |
label="Primary Provider",
|
| 413 |
)
|
| 414 |
|
|
@@ -514,7 +552,7 @@ Multi-provider creator for the ZEN ecosystem:
|
|
| 514 |
gr.Markdown("### 🧾 Debug / Logs")
|
| 515 |
debug_output = gr.Textbox(
|
| 516 |
label="Debug Info",
|
| 517 |
-
lines=
|
| 518 |
)
|
| 519 |
|
| 520 |
generate_btn.click(
|
|
|
|
| 220 |
seed: Optional[int],
|
| 221 |
) -> List[Image.Image]:
|
| 222 |
"""
|
| 223 |
+
This assumes a Google / Nano-Banana image-capable model that returns
|
| 224 |
+
inline image bytes in the response. If your model behaves differently,
|
| 225 |
+
tweak this function.
|
| 226 |
"""
|
| 227 |
genai = _configure_google(api_key)
|
| 228 |
model = genai.GenerativeModel(google_image_model)
|
|
|
|
| 239 |
generation_config=generation_config or None,
|
| 240 |
)
|
| 241 |
|
| 242 |
+
# Extract images from candidates, but safely
|
| 243 |
+
candidates = getattr(resp, "candidates", []) or []
|
| 244 |
+
for cand in candidates:
|
| 245 |
+
content = getattr(cand, "content", None)
|
| 246 |
+
if not content:
|
| 247 |
+
continue
|
| 248 |
+
parts = getattr(content, "parts", []) or []
|
| 249 |
+
for part in parts:
|
| 250 |
inline = getattr(part, "inline_data", None)
|
| 251 |
if inline and getattr(inline, "data", None):
|
| 252 |
+
try:
|
| 253 |
+
raw = base64.b64decode(inline.data)
|
| 254 |
+
img = Image.open(io.BytesIO(raw)).convert("RGB")
|
| 255 |
+
images.append(img)
|
| 256 |
+
except Exception:
|
| 257 |
+
continue
|
| 258 |
|
| 259 |
return images
|
| 260 |
|
| 261 |
|
| 262 |
# -----------------------
|
| 263 |
+
# Core callback with provider fallback
|
| 264 |
# -----------------------
|
| 265 |
|
| 266 |
def run_generation(
|
|
|
|
| 299 |
full_prompt += f". Avoid: {negative_prompt.strip()}"
|
| 300 |
|
| 301 |
debug_lines.append(f"Task: {task_type}")
|
| 302 |
+
debug_lines.append(f"Provider selected: {provider}")
|
| 303 |
debug_lines.append(f"Preset: {preset}, Style: {style}")
|
| 304 |
debug_lines.append(f"OpenAI size: {size}, quality: {quality}")
|
| 305 |
debug_lines.append(f"Google image model: {google_image_model}")
|
|
|
|
| 330 |
|
| 331 |
# IMAGE
|
| 332 |
if task_type == "Image":
|
| 333 |
+
primary = provider
|
| 334 |
+
secondary = "OpenAI" if provider.startswith("Google") else "Google"
|
|
|
|
| 335 |
|
| 336 |
+
# Helper to attempt OpenAI
|
| 337 |
+
def try_openai() -> Tuple[List[Image.Image], str]:
|
| 338 |
+
if not openai_key.strip():
|
| 339 |
+
raise ValueError("OpenAI key missing for OpenAI image generation.")
|
| 340 |
image_model = "gpt-image-1"
|
| 341 |
if "Palantir" in preset:
|
| 342 |
image_model = "dall-e-3"
|
| 343 |
+
imgs = generate_image_openai(
|
|
|
|
| 344 |
api_key=openai_key.strip(),
|
| 345 |
model=image_model,
|
| 346 |
prompt=full_prompt,
|
|
|
|
| 349 |
n_images=n_images,
|
| 350 |
seed=seed_val,
|
| 351 |
)
|
| 352 |
+
return imgs, image_model
|
| 353 |
+
|
| 354 |
+
# Helper to attempt Google
|
| 355 |
+
def try_google() -> List[Image.Image]:
|
| 356 |
if not google_key.strip():
|
| 357 |
+
raise ValueError("Google key missing for Google image generation.")
|
| 358 |
+
model_id = google_image_model.strip() or "nano-banana-pro"
|
| 359 |
+
return generate_image_google(
|
| 360 |
api_key=google_key.strip(),
|
| 361 |
+
google_image_model=model_id,
|
| 362 |
prompt=full_prompt,
|
| 363 |
n_images=n_images,
|
| 364 |
seed=seed_val,
|
| 365 |
)
|
| 366 |
|
| 367 |
+
# Try primary provider first, then fallback
|
| 368 |
+
image_model_used = None
|
| 369 |
+
try:
|
| 370 |
+
if primary == "OpenAI":
|
| 371 |
+
images, image_model_used = try_openai()
|
| 372 |
+
else: # Google primary
|
| 373 |
+
images = try_google()
|
| 374 |
+
except Exception as e_primary:
|
| 375 |
+
debug_lines.append(f"Primary provider {primary} error: {e_primary}")
|
| 376 |
+
# Fallback if possible
|
| 377 |
+
try:
|
| 378 |
+
if secondary == "OpenAI":
|
| 379 |
+
images, image_model_used = try_openai()
|
| 380 |
+
else:
|
| 381 |
+
images = try_google()
|
| 382 |
+
debug_lines.append(f"Fallback provider {secondary} succeeded.")
|
| 383 |
+
except Exception as e_secondary:
|
| 384 |
+
debug_lines.append(f"Fallback provider {secondary} error: {e_secondary}")
|
| 385 |
+
raise RuntimeError(
|
| 386 |
+
f"Both providers failed. Primary: {e_primary} | Secondary: {e_secondary}"
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
if image_model_used:
|
| 390 |
+
debug_lines.append(f"OpenAI image model used: {image_model_used}")
|
| 391 |
+
|
| 392 |
if not text_output and task_type == "Image":
|
| 393 |
text_output = (
|
| 394 |
+
"Image(s) generated. Use Text or Infographic Spec mode to "
|
| 395 |
"generate captions, copy, or layout specs."
|
| 396 |
)
|
| 397 |
|
| 398 |
+
if task_type == "Image" and not images:
|
| 399 |
+
debug_lines.append("No images returned from any provider.")
|
| 400 |
|
| 401 |
return text_output, images, "\n".join(debug_lines)
|
| 402 |
|
| 403 |
except Exception as e:
|
| 404 |
debug_lines.append(f"Exception: {e}")
|
| 405 |
+
# Surface the error clearly in text output as well
|
| 406 |
+
return f"Error during generation: {e}", [], "\n".join(debug_lines)
|
| 407 |
|
| 408 |
|
| 409 |
# -----------------------
|
| 410 |
# UI
|
| 411 |
# -----------------------
|
| 412 |
|
| 413 |
+
with gr.Blocks() as demo:
|
| 414 |
gr.Markdown(
|
| 415 |
"""
|
| 416 |
# 🧬 ZEN Omni Studio — Text • Images • Infographics
|
|
|
|
| 445 |
label="Task Type",
|
| 446 |
)
|
| 447 |
provider = gr.Radio(
|
| 448 |
+
["OpenAI", "Google (Nano-Banana / Gemini)"],
|
| 449 |
+
value="OpenAI",
|
| 450 |
label="Primary Provider",
|
| 451 |
)
|
| 452 |
|
|
|
|
| 552 |
gr.Markdown("### 🧾 Debug / Logs")
|
| 553 |
debug_output = gr.Textbox(
|
| 554 |
label="Debug Info",
|
| 555 |
+
lines=12,
|
| 556 |
)
|
| 557 |
|
| 558 |
generate_btn.click(
|