Update app.py
Browse files
app.py
CHANGED
|
@@ -111,7 +111,6 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
|
|
| 111 |
|
| 112 |
parameters = {
|
| 113 |
"prompt": input,
|
| 114 |
-
"negative_prompt": filter_input + negative_input,
|
| 115 |
"height": height,
|
| 116 |
"width": width,
|
| 117 |
"num_inference_steps": steps,
|
|
@@ -120,6 +119,10 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
|
|
| 120 |
"generator": torch.Generator().manual_seed(seed),
|
| 121 |
"output_type":"pil",
|
| 122 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
|
| 124 |
images = repo(**parameters).images
|
| 125 |
image_paths = [save_image(img, seed) for img in images]
|
|
|
|
| 111 |
|
| 112 |
parameters = {
|
| 113 |
"prompt": input,
|
|
|
|
| 114 |
"height": height,
|
| 115 |
"width": width,
|
| 116 |
"num_inference_steps": steps,
|
|
|
|
| 119 |
"generator": torch.Generator().manual_seed(seed),
|
| 120 |
"output_type":"pil",
|
| 121 |
}
|
| 122 |
+
|
| 123 |
+
if model != "Large":
|
| 124 |
+
parameters["negative_prompt"] = filter_input + negative_input,
|
| 125 |
+
|
| 126 |
|
| 127 |
images = repo(**parameters).images
|
| 128 |
image_paths = [save_image(img, seed) for img in images]
|