ZENLLC commited on
Commit
bb1ef18
·
verified ·
1 Parent(s): 83fecbd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -170,9 +170,14 @@ def generate_image_openai(
170
  "model": model,
171
  "prompt": prompt,
172
  "size": size_param,
173
- "quality": quality,
174
  "n": n_images,
175
  }
 
 
 
 
 
 
176
  if seed is not None:
177
  kwargs["seed"] = seed
178
 
@@ -220,9 +225,9 @@ def generate_image_google(
220
  seed: Optional[int],
221
  ) -> List[Image.Image]:
222
  """
223
- This assumes a Google / Nano-Banana image-capable model that returns
224
- inline image bytes in the response. If your model behaves differently,
225
- tweak this function.
226
  """
227
  genai = _configure_google(api_key)
228
  model = genai.GenerativeModel(google_image_model)
@@ -239,7 +244,6 @@ def generate_image_google(
239
  generation_config=generation_config or None,
240
  )
241
 
242
- # Extract images from candidates, but safely
243
  candidates = getattr(resp, "candidates", []) or []
244
  for cand in candidates:
245
  content = getattr(cand, "content", None)
@@ -355,7 +359,7 @@ def run_generation(
355
  def try_google() -> List[Image.Image]:
356
  if not google_key.strip():
357
  raise ValueError("Google key missing for Google image generation.")
358
- model_id = google_image_model.strip() or "nano-banana-pro"
359
  return generate_image_google(
360
  api_key=google_key.strip(),
361
  google_image_model=model_id,
@@ -364,8 +368,8 @@ def run_generation(
364
  seed=seed_val,
365
  )
366
 
367
- # Try primary provider first, then fallback
368
  image_model_used = None
 
369
  try:
370
  if primary == "OpenAI":
371
  images, image_model_used = try_openai()
@@ -402,7 +406,6 @@ def run_generation(
402
 
403
  except Exception as e:
404
  debug_lines.append(f"Exception: {e}")
405
- # Surface the error clearly in text output as well
406
  return f"Error during generation: {e}", [], "\n".join(debug_lines)
407
 
408
 
@@ -499,9 +502,9 @@ Multi-provider creator for the ZEN ecosystem:
499
  label="Aspect Ratio / Size",
500
  )
501
  quality = gr.Dropdown(
502
- ["standard", "hd"],
503
- value="hd",
504
- label="Quality",
505
  )
506
  n_images = gr.Slider(
507
  minimum=1,
@@ -526,9 +529,9 @@ Multi-provider creator for the ZEN ecosystem:
526
 
527
  gr.Markdown("### 🧪 Google Image / Text Model Hints")
528
  google_image_model = gr.Textbox(
529
- label="Google Image Model (default: nano-banana-pro)",
530
- value="nano-banana-pro",
531
- placeholder="e.g. nano-banana-pro or your exact model id",
532
  )
533
  google_text_model_hint = gr.Textbox(
534
  label="Google Text Model Hint",
 
170
  "model": model,
171
  "prompt": prompt,
172
  "size": size_param,
 
173
  "n": n_images,
174
  }
175
+
176
+ # Align with API error: supported values are low, medium, high, auto
177
+ allowed_qualities = {"low", "medium", "high", "auto"}
178
+ if quality in allowed_qualities:
179
+ kwargs["quality"] = quality
180
+
181
  if seed is not None:
182
  kwargs["seed"] = seed
183
 
 
225
  seed: Optional[int],
226
  ) -> List[Image.Image]:
227
  """
228
+ This assumes a Google / Gemini image-capable model that returns
229
+ inline image bytes in the response. If your Nano-Banana model behaves
230
+ differently, tweak this function.
231
  """
232
  genai = _configure_google(api_key)
233
  model = genai.GenerativeModel(google_image_model)
 
244
  generation_config=generation_config or None,
245
  )
246
 
 
247
  candidates = getattr(resp, "candidates", []) or []
248
  for cand in candidates:
249
  content = getattr(cand, "content", None)
 
359
  def try_google() -> List[Image.Image]:
360
  if not google_key.strip():
361
  raise ValueError("Google key missing for Google image generation.")
362
+ model_id = google_image_model.strip() or "gemini-1.5-flash"
363
  return generate_image_google(
364
  api_key=google_key.strip(),
365
  google_image_model=model_id,
 
368
  seed=seed_val,
369
  )
370
 
 
371
  image_model_used = None
372
+
373
  try:
374
  if primary == "OpenAI":
375
  images, image_model_used = try_openai()
 
406
 
407
  except Exception as e:
408
  debug_lines.append(f"Exception: {e}")
 
409
  return f"Error during generation: {e}", [], "\n".join(debug_lines)
410
 
411
 
 
502
  label="Aspect Ratio / Size",
503
  )
504
  quality = gr.Dropdown(
505
+ ["auto", "low", "medium", "high"],
506
+ value="high",
507
+ label="Quality (OpenAI)",
508
  )
509
  n_images = gr.Slider(
510
  minimum=1,
 
529
 
530
  gr.Markdown("### 🧪 Google Image / Text Model Hints")
531
  google_image_model = gr.Textbox(
532
+ label="Google Image Model (default: gemini-1.5-flash)",
533
+ value="gemini-1.5-flash",
534
+ placeholder="e.g. your Nano-Banana model id or another image-capable model",
535
  )
536
  google_text_model_hint = gr.Textbox(
537
  label="Google Text Model Hint",