Fabrice-TIERCELIN commited on
Commit
210fca9
·
verified ·
1 Parent(s): 1ada92d

Upload 9 files

Browse files
Files changed (5) hide show
  1. README.md +4 -6
  2. app.py +547 -245
  3. optimization.py +85 -12
  4. optimization_utils.py +28 -17
  5. requirements.txt +11 -5
README.md CHANGED
@@ -1,14 +1,12 @@
1
  ---
2
- title: FLUX.1 Kontext
3
- emoji:
4
- colorFrom: green
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.29.1
8
  app_file: app.py
9
- pinned: true
10
- license: mit
11
- short_description: 'Kontext image editing on FLUX[dev] '
12
  ---
13
 
14
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Wan 2 2 First Last Frame
3
+ emoji: 💻
4
+ colorFrom: purple
5
  colorTo: gray
6
  sdk: gradio
7
  sdk_version: 5.29.1
8
  app_file: app.py
9
+ pinned: false
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,305 +1,607 @@
1
- # PyTorch 2.8 (temporary hack)
2
  import os
 
3
  os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
 
5
- # Actual demo code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  import gradio as gr
 
 
 
 
 
 
7
  import numpy as np
8
- import spaces
9
- import torch
10
  import random
11
- from datetime import datetime
 
 
12
 
13
- from PIL import Image
14
- import tempfile
15
- import shutil
16
- from pathlib import Path
17
 
18
- from diffusers import FluxKontextPipeline
19
- from diffusers.utils import load_image
20
 
21
- from optimization import optimize_pipeline_
 
 
 
 
22
 
23
  MAX_SEED = np.iinfo(np.int32).max
24
 
25
- pipe = FluxKontextPipeline.from_pretrained("yuvraj108c/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16).to("cuda")
26
- optimize_pipeline_(pipe, image=Image.new("RGB", (512, 512)), prompt='prompt')
 
 
 
 
27
 
28
  input_image_debug_value = [None]
 
29
  prompt_debug_value = [None]
30
- def save_on_path(img: Image, filename: str, format_: str = None) -> Path:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  """
32
- Save `img` in a unique temporary folder under the given `filename`
33
- and return its absolute path.
 
 
 
 
34
  """
35
- # 1) unique temporary folder
36
- tmp_dir = Path(tempfile.mkdtemp(prefix="pil_tmp_"))
37
-
38
- # 2) full path of the future file
39
- file_path = tmp_dir / filename
40
-
41
- # 3) save
42
- img.save(file_path, format=format_ or img.format)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- return file_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- @spaces.GPU(duration=40)
47
- def infer(
48
- input_image,
 
 
 
 
 
 
49
  prompt,
50
- seed = 42,
51
- randomize_seed = False,
52
- guidance_scale = 2.5,
53
- steps = 28,
54
- width = -1,
55
- height = -1,
 
56
  progress=gr.Progress(track_tqdm=True)
57
  ):
58
- """
59
- Perform image editing using the FLUX.1 Kontext pipeline.
 
60
 
61
- This function takes an input image and a text prompt to generate a modified version
62
- of the image based on the provided instructions. It uses the FLUX.1 Kontext model
63
- for contextual image editing tasks.
 
 
 
 
 
 
 
 
 
 
64
 
65
- Args:
66
- input_image (PIL.Image.Image): The input image to be edited. Will be converted
67
- to RGB format if not already in that format.
68
- prompt (str): Text description of the desired edit to apply to the image.
69
- Examples: "Remove glasses", "Add a hat", "Change background to beach".
70
- seed (int, optional): Random seed for reproducible generation. Defaults to 42.
71
- Must be between 0 and MAX_SEED (2^31 - 1).
72
- randomize_seed (bool, optional): If True, generates a random seed instead of
73
- using the provided seed value. Defaults to False.
74
- guidance_scale (float, optional): Controls how closely the model follows the
75
- prompt. Higher values mean stronger adherence to the prompt but may reduce
76
- image quality. Range: 1.0-10.0. Defaults to 2.5.
77
- steps (int, optional): Controls how many steps to run the diffusion model for.
78
- Range: 1-30. Defaults to 28.
79
- progress (gr.Progress, optional): Gradio progress tracker for monitoring
80
- generation progress. Defaults to gr.Progress(track_tqdm=True).
81
 
82
- Returns:
83
- tuple: A 3-tuple containing:
84
- - PIL.Image.Image: The generated/edited image
85
- - int: The seed value used for generation (useful when randomize_seed=True)
86
- - gr.update: Gradio update object to make the reuse button visible
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
 
88
- Example:
89
- >>> edited_image, used_seed, button_update = infer(
90
- ... input_image=my_image,
91
- ... prompt="Add sunglasses",
92
- ... seed=123,
93
- ... randomize_seed=False,
94
- ... guidance_scale=2.5
95
- ... )
96
- """
97
- if randomize_seed:
98
- seed = random.randint(0, MAX_SEED)
99
 
100
- if input_image:
101
- input_image = input_image.convert("RGB")
102
- image = pipe(
103
- image=input_image,
104
- prompt=prompt,
105
- guidance_scale=guidance_scale,
106
- width = input_image.size[0] if width == -1 else width,
107
- height = input_image.size[1] if height == -1 else height,
108
- num_inference_steps=steps,
109
- generator=torch.Generator().manual_seed(seed),
110
- ).images[0]
111
- else:
112
- image = pipe(
113
- prompt=prompt,
114
- guidance_scale=guidance_scale,
115
- num_inference_steps=steps,
116
- generator=torch.Generator().manual_seed(seed),
117
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- image_filename = datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.webp'
120
- path = save_on_path(image, image_filename, format_="WEBP")
121
- return path, seed, gr.update(visible=True)
122
-
123
- def infer_example(input_image, prompt):
124
- if input_image_debug_value[0] is not None or prompt_debug_value[0] is not None:
125
- input_image=input_image_debug_value[0]
126
- prompt=prompt_debug_value[0]
127
- #input_image_debug_value[0]=prompt_debug_value[0]=None
128
- seed = random.randint(0, MAX_SEED)
129
- image, seed, _ = infer(input_image, prompt, seed, True)
130
- return image, seed
131
-
132
- css="""
133
- #col-container {
134
- margin: 0 auto;
135
- max-width: 960px;
136
  }
137
  """
138
 
139
- with gr.Blocks(css=css) as demo:
140
-
141
- with gr.Column(elem_id="col-container"):
142
- gr.Markdown(f"""# FLUX.1 Kontext [dev]
143
- Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
144
- """)
145
- with gr.Row():
146
- with gr.Column():
147
- input_image = gr.Image(label="Upload the image for editing", type="pil")
148
  with gr.Row():
149
- prompt = gr.Text(
150
- label="Prompt",
151
- show_label=False,
152
- max_lines=1,
153
- placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
154
- container=False,
155
- )
156
- run_button = gr.Button(value="🚀 Edit", variant = "primary", scale=0)
 
 
157
  with gr.Accordion("Advanced Settings", open=False):
158
-
159
- seed = gr.Slider(
160
- label="Seed",
161
- minimum=0,
162
- maximum=MAX_SEED,
163
- step=1,
164
- value=0,
165
- )
166
-
167
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
168
-
169
- guidance_scale = gr.Slider(
170
- label="Guidance Scale",
171
- minimum=1,
172
- maximum=10,
173
- step=0.1,
174
- value=2.5,
175
- )
176
-
177
- steps = gr.Slider(
178
- label="Steps",
179
- minimum=1,
180
- maximum=30,
181
- value=30,
182
- step=1
183
- )
184
-
185
- width = gr.Slider(
186
- label="Output width",
187
- info="-1 = original width",
188
- minimum=-1,
189
- maximum=1024,
190
- value=-1,
191
- step=1
192
- )
193
-
194
- height = gr.Slider(
195
- label="Output height",
196
- info="-1 = original height",
197
- minimum=-1,
198
- maximum=1024,
199
- value=-1,
200
- step=1
201
- )
202
-
203
- with gr.Column():
204
- result = gr.Image(label="Result", show_label=False, interactive=False)
205
- reuse_button = gr.Button("Reuse this image", visible=False)
206
-
207
-
 
 
 
 
 
 
 
208
 
209
  with gr.Row(visible=False):
 
 
 
 
 
 
210
  gr.Examples(
211
- examples=[
212
- ["monster.png", "Make this monster ride a skateboard on the beach"],
213
- ["monster.png", "Make this monster on a car"],
214
- ["monster.png", "Make this monster surprised"],
215
- ["monster.png", "Make this monster sleeping"],
216
- ["monster.png", "Put this monster in a forest"],
217
- ["monster.png", "Put this monster in the desert"],
218
- ["monster.png", "Make this monster in CGI"],
219
- ["monster.png", "Make this monster with 1 eye"],
220
- ["monster.png", "Make this monster with 2 eyes"],
221
- ["monster.png", "Make this monster orange"],
222
- ["monster.png", "Make this monster yellow"],
223
- ["monster.png", "Make this monster purple"],
224
- ["monster.png", "Make this monster brown"],
225
- ["monster.png", "Make this monster white"],
226
- ["monster.png", "Make this monster black"],
227
- ["monster.png", "Make this monster grey"],
228
- ["monster.png", "Make this monster pink"],
229
- ["monster.png", "Make this monster smaller"],
230
- ["monster.png", "Make this monster thicker"],
231
- ["monster.png", "Make this monster sad"],
232
- ["monster.png", "Really make this monster ride a skateboard on the beach"],
233
- ["monster.png", "Really make this monster on a car"],
234
- ["monster.png", "Really make this monster surprised"],
235
- ["monster.png", "Really make this monster sleeping"],
236
- ["monster.png", "Really put this monster in a forest"],
237
- ["monster.png", "Really put this monster in the desert"],
238
- ["monster.png", "Really make this monster in CGI"],
239
- ["monster.png", "Really make this monster with 1 eye"],
240
- ["monster.png", "Really make this monster with 2 eyes"],
241
- ["monster.png", "Really make this monster orange"],
242
- ["monster.png", "Really make this monster yellow"],
243
- ["monster.png", "Really make this monster purple"],
244
- ["monster.png", "Really make this monster brown"],
245
- ["monster.png", "Really make this monster white"],
246
- ["monster.png", "Really make this monster black"],
247
- ["monster.png", "Really make this monster grey"],
248
- ["monster.png", "Really make this monster pink"],
249
- ["monster.png", "Really make this monster smaller"],
250
- ["monster.png", "Really make this monster thicker"],
251
- ["monster.png", "Really make this monster sad"]
252
- ],
253
- examples_per_page=1,
254
- inputs=[input_image, prompt],
255
- outputs=[result, seed],
256
- fn=infer_example,
257
  run_on_click=True,
258
  cache_examples=True,
259
- cache_mode='lazy'
260
  )
261
- prompt_debug=gr.Textbox(label="Prompt Debug")
262
- input_image_debug=gr.Image(type="pil", label="Image Debug")
263
-
264
  gr.Examples(
265
  label = "Examples from demo",
266
- examples=[
267
- ["flowers.png", "turn the flowers into sunflowers"],
268
- ["monster.png", "make this monster ride a skateboard on the beach"],
269
- ["cat.png", "make this cat happy"]
270
  ],
271
- inputs=[input_image, prompt],
272
- outputs=[result, seed],
273
- fn=infer_example
 
274
  )
275
 
276
- def handle_field_debug_change(input_image_debug_data, prompt_debug_data):
 
 
 
 
 
277
  input_image_debug_value[0] = input_image_debug_data
 
278
  prompt_debug_value[0] = prompt_debug_data
 
 
279
  return []
280
 
281
  input_image_debug.upload(
282
  fn=handle_field_debug_change,
283
- inputs=[input_image_debug, prompt_debug],
 
 
 
 
 
 
284
  outputs=[]
285
  )
286
 
287
  prompt_debug.change(
288
  fn=handle_field_debug_change,
289
- inputs=[input_image_debug, prompt_debug],
290
  outputs=[]
291
  )
292
-
293
- gr.on(
294
- triggers=[run_button.click, prompt.submit],
295
- fn = infer,
296
- inputs = [input_image, prompt, seed, randomize_seed, guidance_scale, steps, width, height],
297
- outputs = [result, seed, reuse_button]
298
  )
299
- reuse_button.click(
300
- fn = lambda image: image,
301
- inputs = [result],
302
- outputs = [input_image]
 
303
  )
304
 
305
- demo.launch(mcp_server=True)
 
 
 
1
  import os
2
+ # PyTorch 2.8 (temporary hack)
3
  os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
 
5
+ # --- 1. Model Download and Setup (Diffusers Backend) ---
6
+ try:
7
+ import spaces
8
+ except:
9
+ class spaces():
10
+ def GPU(*args, **kwargs):
11
+ def decorator(function):
12
+ return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
13
+ return decorator
14
+
15
+ import torch
16
+ from diffusers import FlowMatchEulerDiscreteScheduler
17
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
18
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
19
+ from diffusers.utils.export_utils import export_to_video
20
  import gradio as gr
21
+ import imageio_ffmpeg
22
+ import tempfile
23
+ import shutil
24
+ import subprocess
25
+ import time
26
+ from datetime import datetime
27
  import numpy as np
28
+ from PIL import Image
 
29
  import random
30
+ import math
31
+ import gc
32
+ from gradio_client import Client, handle_file # Import for API call
33
 
34
+ # Import the optimization function from the separate file
35
+ from optimization import optimize_pipeline_
 
 
36
 
37
+ # --- Constants and Model Loading ---
38
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
39
 
40
+ # --- NEW: Flexible Dimension Constants ---
41
+ MAX_DIMENSION = 832
42
+ MIN_DIMENSION = 480
43
+ DIMENSION_MULTIPLE = 16
44
+ SQUARE_SIZE = 480
45
 
46
  MAX_SEED = np.iinfo(np.int32).max
47
 
48
+ FIXED_FPS = 24
49
+ MIN_FRAMES_MODEL = 8
50
+ MAX_FRAMES_MODEL = 81
51
+
52
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
53
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
54
 
55
  input_image_debug_value = [None]
56
+ end_image_debug_value = [None]
57
  prompt_debug_value = [None]
58
+ total_second_length_debug_value = [None]
59
+ allocation_time_debug_value = [None]
60
+
61
+ default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
62
+
63
+ print("Loading transformer...")
64
+
65
+ transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
66
+ subfolder='transformer',
67
+ torch_dtype=torch.bfloat16,
68
+ device_map='cuda',
69
+ )
70
+
71
+ print("Loadingtransformer 2...")
72
+
73
+ transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
74
+ subfolder='transformer_2',
75
+ torch_dtype=torch.bfloat16,
76
+ device_map='cuda',
77
+ )
78
+
79
+ print("Loading models into memory. This may take a few minutes...")
80
+
81
+ pipe = WanImageToVideoPipeline.from_pretrained(
82
+ MODEL_ID,
83
+ transformer = transformer,
84
+ transformer_2 = transformer_2,
85
+ torch_dtype=torch.bfloat16,
86
+ )
87
+ print("Loading scheduler...")
88
+ pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
89
+ pipe.to('cuda')
90
+
91
+ print("Clean cache...")
92
+ for i in range(3):
93
+ gc.collect()
94
+ torch.cuda.synchronize()
95
+ torch.cuda.empty_cache()
96
+
97
+ print("Optimizing pipeline...")
98
+
99
+ optimize_pipeline_(pipe,
100
+ image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
101
+ prompt='prompt',
102
+ height=MIN_DIMENSION,
103
+ width=MAX_DIMENSION,
104
+ num_frames=MAX_FRAMES_MODEL,
105
+ )
106
+ print("All models loaded and optimized. Gradio app is ready.")
107
+
108
+ # 20250508 pftq: for saving prompt to mp4 metadata comments
109
+ def set_mp4_comments_imageio_ffmpeg(input_file, comments):
110
+ try:
111
+ # Get the path to the bundled FFmpeg binary from imageio-ffmpeg
112
+ ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
113
+
114
+ # Check if input file exists
115
+ if not os.path.exists(input_file):
116
+ #print(f"Error: Input file {input_file} does not exist")
117
+ return False
118
+
119
+ # Create a temporary file path
120
+ temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
121
+
122
+ # FFmpeg command using the bundled binary
123
+ command = [
124
+ ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
125
+ '-i', input_file, # input file
126
+ '-metadata', f'comment={comments}', # set comment metadata
127
+ '-c:v', 'copy', # copy video stream without re-encoding
128
+ '-c:a', 'copy', # copy audio stream without re-encoding
129
+ '-y', # overwrite output file if it exists
130
+ temp_file # temporary output file
131
+ ]
132
+
133
+ # Run the FFmpeg command
134
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
135
+
136
+ if result.returncode == 0:
137
+ # Replace the original file with the modified one
138
+ shutil.move(temp_file, input_file)
139
+ #print(f"Successfully added comments to {input_file}")
140
+ return True
141
+ else:
142
+ # Clean up temp file if FFmpeg fails
143
+ if os.path.exists(temp_file):
144
+ os.remove(temp_file)
145
+ #print(f"Error: FFmpeg failed with message:\n{result.stderr}")
146
+ return False
147
+
148
+ except Exception as e:
149
+ # Clean up temp file in case of other errors
150
+ if 'temp_file' in locals() and os.path.exists(temp_file):
151
+ os.remove(temp_file)
152
+ print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
153
+ return False
154
+
155
+
156
+ # --- 2. Image Processing and Application Logic ---
157
+ def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
158
+ """Calls an external Gradio API to generate an image."""
159
+ if start_img is None:
160
+ raise gr.Error("Please provide a Start Frame first.")
161
+
162
+ hf_token = os.getenv("HF_TOKEN")
163
+ if not hf_token:
164
+ raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
165
+
166
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
167
+ start_img.save(tmpfile.name)
168
+ tmp_path = tmpfile.name
169
+
170
+ progress(0.1, desc="Connecting to image generation API...")
171
+ client = Client("multimodalart/nano-banana-private")
172
+
173
+ progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
174
+ try:
175
+ result = client.predict(
176
+ prompt=gen_prompt,
177
+ images=[
178
+ {"image": handle_file(tmp_path)}
179
+ ],
180
+ manual_token=hf_token,
181
+ api_name="/unified_image_generator"
182
+ )
183
+ finally:
184
+ os.remove(tmp_path)
185
+
186
+ progress(1.0, desc="Done!")
187
+ print(result)
188
+ return result
189
+
190
+ def switch_to_upload_tab():
191
+ """Returns a gr.Tabs update to switch to the first tab."""
192
+ return gr.Tabs(selected="upload_tab")
193
+
194
+
195
+ def process_image_for_video(image: Image.Image) -> Image.Image:
196
  """
197
+ Resizes an image based on the following rules for video generation:
198
+ 1. The longest side will be scaled down to MAX_DIMENSION if it's larger.
199
+ 2. The shortest side will be scaled up to MIN_DIMENSION if it's smaller.
200
+ 3. The final dimensions will be rounded to the nearest multiple of DIMENSION_MULTIPLE.
201
+ 4. Square images are resized to a fixed SQUARE_SIZE.
202
+ The aspect ratio is preserved as closely as possible.
203
  """
204
+ width, height = image.size
205
+
206
+ # Rule 4: Handle square images
207
+ if width == height:
208
+ return image.resize((SQUARE_SIZE, SQUARE_SIZE), Image.Resampling.LANCZOS)
209
+
210
+ # Determine target dimensions while preserving aspect ratio
211
+ aspect_ratio = width / height
212
+ new_width, new_height = width, height
213
+
214
+ # Rule 1: Scale down if too large
215
+ if new_width > MAX_DIMENSION or new_height > MAX_DIMENSION:
216
+ if aspect_ratio > 1: # Landscape
217
+ scale = MAX_DIMENSION / new_width
218
+ else: # Portrait
219
+ scale = MAX_DIMENSION / new_height
220
+ new_width *= scale
221
+ new_height *= scale
222
+
223
+ # Rule 2: Scale up if too small
224
+ if new_width < MIN_DIMENSION or new_height < MIN_DIMENSION:
225
+ if aspect_ratio > 1: # Landscape
226
+ scale = MIN_DIMENSION / new_height
227
+ else: # Portrait
228
+ scale = MIN_DIMENSION / new_width
229
+ new_width *= scale
230
+ new_height *= scale
231
+
232
+ # Rule 3: Round to the nearest multiple of DIMENSION_MULTIPLE
233
+ final_width = int(round(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
234
+ final_height = int(round(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
235
 
236
+ # Ensure final dimensions are at least the minimum
237
+ final_width = max(final_width, MIN_DIMENSION if aspect_ratio < 1 else SQUARE_SIZE)
238
+ final_height = max(final_height, MIN_DIMENSION if aspect_ratio > 1 else SQUARE_SIZE)
239
+
240
+
241
+ return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
242
+
243
+ def resize_and_crop_to_match(target_image, reference_image):
244
+ """Resizes and center-crops the target image to match the reference image's dimensions."""
245
+ ref_width, ref_height = reference_image.size
246
+ target_width, target_height = target_image.size
247
+ scale = max(ref_width / target_width, ref_height / target_height)
248
+ new_width, new_height = int(target_width * scale), int(target_height * scale)
249
+ resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
250
+ left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
251
+ return resized.crop((left, top, left + ref_width, top + ref_height))
252
 
253
+ def init_view():
254
+ return gr.update(interactive = True)
255
+
256
+ def output_video_change(output_video):
257
+ print('Log output: ' + str(output_video))
258
+
259
+ def generate_video(
260
+ start_image_pil,
261
+ end_image_pil,
262
  prompt,
263
+ negative_prompt=default_negative_prompt,
264
+ duration_seconds=2.1,
265
+ steps=8,
266
+ guidance_scale=1,
267
+ guidance_scale_2=1,
268
+ seed=42,
269
+ randomize_seed=True,
270
  progress=gr.Progress(track_tqdm=True)
271
  ):
272
+ start = time.time()
273
+ allocation_time = 120
274
+ factor = 1
275
 
276
+ if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None or allocation_time_debug_value[0] is not None:
277
+ start_image_pil = input_image_debug_value[0]
278
+ end_image_pil = end_image_debug_value[0]
279
+ prompt = prompt_debug_value[0]
280
+ duration_seconds = total_second_length_debug_value[0]
281
+ allocation_time = min(allocation_time_debug_value[0], 60 * 10)
282
+ factor = 3.1
283
+
284
+ if start_image_pil is None or end_image_pil is None:
285
+ raise gr.Error("Please upload both a start and an end image.")
286
+
287
+ # Step 1: Process the start image to get our target dimensions based on the new rules.
288
+ processed_start_image = process_image_for_video(start_image_pil)
289
 
290
+ # Step 2: Make the end image match the *exact* dimensions of the processed start image.
291
+ processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
 
293
+ target_height, target_width = processed_start_image.height, processed_start_image.width
294
+
295
+ # Handle seed and frame count
296
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
297
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
298
+
299
+ progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
300
+
301
+ progress(0.1, desc="Preprocessing images...")
302
+ print("Generate a video with the prompt: " + prompt)
303
+ output_frames_list = None
304
+ caught_error = None
305
+ while factor > 0 and int(allocation_time) > 0:
306
+ try:
307
+ output_frames_list = generate_video_on_gpu(
308
+ start_image_pil,
309
+ end_image_pil,
310
+ prompt,
311
+ negative_prompt,
312
+ duration_seconds,
313
+ steps,
314
+ guidance_scale,
315
+ guidance_scale_2,
316
+ seed,
317
+ randomize_seed,
318
+ progress,
319
+ allocation_time,
320
+ factor,
321
+ target_height,
322
+ target_width,
323
+ current_seed,
324
+ num_frames,
325
+ processed_start_image,
326
+ processed_end_image
327
+ )
328
+ factor = 0
329
+ caught_error = None
330
+ except BaseException as err:
331
+ print("An exception occurred: " + str(err))
332
+ caught_error = err
333
+ factor = 0
334
+ allocation_time = int(allocation_time) - 1
335
+ except:
336
+ caught_error = None
337
+ factor = 0
338
+ allocation_time = int(allocation_time) - 1
339
+
340
+ if caught_error is not None:
341
+ raise caught_error
342
+
343
+ progress(0.9, desc="Encoding and saving video...")
344
 
345
+ video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
346
+
347
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
348
+ set_mp4_comments_imageio_ffmpeg(video_path, f"Prompt: {prompt} | Negative Prompt: {negative_prompt}");
349
+ print("Video exported: " + video_path)
 
 
 
 
 
 
350
 
351
+ progress(1.0, desc="Done!")
352
+ end = time.time()
353
+ secondes = int(end - start)
354
+ minutes = math.floor(secondes / 60)
355
+ secondes = secondes - (minutes * 60)
356
+ hours = math.floor(minutes / 60)
357
+ minutes = minutes - (hours * 60)
358
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
359
+ "The video been generated in " + \
360
+ ((str(hours) + " h, ") if hours != 0 else "") + \
361
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
362
+ str(secondes) + " sec. " + \
363
+ "The video resolution is " + str(target_width) + \
364
+ " pixels large and " + str(target_height) + \
365
+ " pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels." + \
366
+ " Your prompt is saved into the metadata of the video."
367
+ return [video_path, gr.update(value = video_path, visible = True), current_seed, gr.update(value = information, visible = True), gr.update(interactive = False)]
368
+
369
+ def get_duration(
370
+ start_image_pil,
371
+ end_image_pil,
372
+ prompt,
373
+ negative_prompt,
374
+ duration_seconds,
375
+ steps,
376
+ guidance_scale,
377
+ guidance_scale_2,
378
+ seed,
379
+ randomize_seed,
380
+ progress,
381
+ allocation_time,
382
+ factor,
383
+ target_height,
384
+ target_width,
385
+ current_seed,
386
+ num_frames,
387
+ processed_start_image,
388
+ processed_end_image
389
+ ):
390
+ return allocation_time
391
+
392
+ @spaces.GPU(duration=get_duration)
393
+ def generate_video_on_gpu(
394
+ start_image_pil,
395
+ end_image_pil,
396
+ prompt,
397
+ negative_prompt,
398
+ duration_seconds,
399
+ steps,
400
+ guidance_scale,
401
+ guidance_scale_2,
402
+ seed,
403
+ randomize_seed,
404
+ progress,
405
+ allocation_time,
406
+ factor,
407
+ target_height,
408
+ target_width,
409
+ current_seed,
410
+ num_frames,
411
+ processed_start_image,
412
+ processed_end_image
413
+ ):
414
+ """
415
+ Generates a video by interpolating between a start and end image, guided by a text prompt,
416
+ using the diffusers Wan2.2 pipeline.
417
+ """
418
+
419
+ output_frames_list = pipe(
420
+ image=processed_start_image,
421
+ last_image=processed_end_image,
422
+ prompt=prompt,
423
+ negative_prompt=negative_prompt,
424
+ height=target_height,
425
+ width=target_width,
426
+ num_frames=int(num_frames * factor),
427
+ guidance_scale=float(guidance_scale),
428
+ guidance_scale_2=float(guidance_scale_2),
429
+ num_inference_steps=int(steps),
430
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
431
+ ).frames[0]
432
+
433
+ return output_frames_list
434
+
435
+
436
+ # --- 3. Gradio User Interface ---
437
+
438
+
439
+
440
+ js = """
441
+ function createGradioAnimation() {
442
+ window.addEventListener("beforeunload", function(e) {
443
+ if (document.getElementById('dummy_button_id') && !document.getElementById('dummy_button_id').disabled) {
444
+ var confirmationMessage = 'A process is still running. '
445
+ + 'If you leave before saving, your changes will be lost.';
446
 
447
+ (e || window.event).returnValue = confirmationMessage;
448
+ }
449
+ return confirmationMessage;
450
+ });
451
+ return 'Animation created';
 
 
 
 
 
 
 
 
 
 
 
 
452
  }
453
  """
454
 
455
+ # Gradio interface
456
+ with gr.Blocks(js=js) as app:
457
+ gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
458
+ gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
459
+
460
+ with gr.Row(elem_id="general_items"):
461
+ with gr.Column():
462
+ with gr.Group(elem_id="group_all"):
 
463
  with gr.Row():
464
+ start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
465
+ # Capture the Tabs component in a variable and assign IDs to tabs
466
+ with gr.Tabs(elem_id="group_tabs") as tabs:
467
+ with gr.TabItem("Upload", id="upload_tab"):
468
+ end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
469
+ with gr.TabItem("Generate", id="generate_tab"):
470
+ generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
471
+ gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
472
+ prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
473
+
474
  with gr.Accordion("Advanced Settings", open=False):
475
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
476
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
477
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
478
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
479
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
480
+ with gr.Row():
481
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
482
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
483
+
484
+ generate_button = gr.Button("Generate Video", variant="primary")
485
+ dummy_button = gr.Button(elem_id = "dummy_button_id", visible = False, interactive = False)
486
+
487
+ with gr.Column():
488
+ output_video = gr.Video(label="Generated Video", autoplay = True, loop = True)
489
+ download_button = gr.DownloadButton(label="Download", visible = True)
490
+ video_information = gr.HTML(value = "", visible = True)
491
+
492
+ # Main video generation button
493
+ ui_inputs = [
494
+ start_image,
495
+ end_image,
496
+ prompt,
497
+ negative_prompt_input,
498
+ duration_seconds_input,
499
+ steps_slider,
500
+ guidance_scale_input,
501
+ guidance_scale_2_input,
502
+ seed_input,
503
+ randomize_seed_checkbox
504
+ ]
505
+ ui_outputs = [output_video, download_button, seed_input, video_information, dummy_button]
506
+
507
+ generate_button.click(fn = init_view, inputs = [], outputs = [dummy_button], queue = False, show_progress = False).success(
508
+ fn = generate_video,
509
+ inputs = ui_inputs,
510
+ outputs = ui_outputs
511
+ )
512
+
513
+ generate_5seconds.click(
514
+ fn=switch_to_upload_tab,
515
+ inputs=None,
516
+ outputs=[tabs]
517
+ ).then(
518
+ fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
519
+ inputs=[start_image],
520
+ outputs=[end_image]
521
+ ).success(
522
+ fn=generate_video,
523
+ inputs=ui_inputs,
524
+ outputs=ui_outputs
525
+ )
526
+
527
+ output_video.change(
528
+ fn=output_video_change,
529
+ inputs=[output_video],
530
+ outputs=[]
531
+ )
532
 
533
  with gr.Row(visible=False):
534
+ prompt_debug=gr.Textbox(label="Prompt Debug")
535
+ input_image_debug=gr.Image(type="pil", label="Image Debug")
536
+ end_image_debug=gr.Image(type="pil", label="End Image Debug")
537
+ total_second_length_debug=gr.Slider(label="Duration Debug", minimum=1, maximum=120, value=3.2, step=0.1)
538
+ allocation_time_debug=gr.Slider(label="Allocation Debug", minimum=1, maximum=1200, value=600, step=1)
539
+ information_debug = gr.HTML(value = "")
540
  gr.Examples(
541
+ examples=[["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."]],
542
+ inputs=[start_image, end_image, prompt],
543
+ outputs=ui_outputs,
544
+ fn=generate_video,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
545
  run_on_click=True,
546
  cache_examples=True,
547
+ cache_mode='lazy',
548
  )
549
+
 
 
550
  gr.Examples(
551
  label = "Examples from demo",
552
+ examples = [
553
+ ["poli_tower.png", "tower_takes_off.png", "The man turns around."],
554
+ ["ugly_sonic.jpeg", "squatting_sonic.png", "पात्रं क्षेपणास्त्रं चकमाति।"],
555
+ ["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolboy puts on his schoolbag."],
556
  ],
557
+ inputs = [start_image, end_image, prompt],
558
+ outputs = ui_outputs,
559
+ fn = generate_video,
560
+ cache_examples = False,
561
  )
562
 
563
+ def handle_field_debug_change(
564
+ input_image_debug_data,
565
+ end_image_debug_data,
566
+ prompt_debug_data,
567
+ total_second_length_debug_data,
568
+ allocation_time_debug_data):
569
  input_image_debug_value[0] = input_image_debug_data
570
+ end_image_debug_value[0] = end_image_debug_data
571
  prompt_debug_value[0] = prompt_debug_data
572
+ total_second_length_debug_value[0] = total_second_length_debug_data
573
+ allocation_time_debug_value[0] = allocation_time_debug_data
574
  return []
575
 
576
  input_image_debug.upload(
577
  fn=handle_field_debug_change,
578
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, allocation_time_debug],
579
+ outputs=[]
580
+ )
581
+
582
+ end_image_debug.upload(
583
+ fn=handle_field_debug_change,
584
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, allocation_time_debug],
585
  outputs=[]
586
  )
587
 
588
  prompt_debug.change(
589
  fn=handle_field_debug_change,
590
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, allocation_time_debug],
591
  outputs=[]
592
  )
593
+
594
+ total_second_length_debug.change(
595
+ fn=handle_field_debug_change,
596
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
597
+ outputs=[]
 
598
  )
599
+
600
+ allocation_time_debug.change(
601
+ fn=handle_field_debug_change,
602
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug, allocation_time_debug],
603
+ outputs=[]
604
  )
605
 
606
+ if __name__ == "__main__":
607
+ app.launch(mcp_server=True, share=True)
optimization.py CHANGED
@@ -8,21 +8,49 @@ from typing import ParamSpec
8
  import spaces
9
  import torch
10
  from torch.utils._pytree import tree_map_only
 
 
 
11
 
12
  from optimization_utils import capture_component_call
13
  from optimization_utils import aoti_compile
 
14
 
15
 
16
  P = ParamSpec('P')
17
 
 
18
 
19
- TRANSFORMER_HIDDEN_DIM = torch.export.Dim('hidden', min=4096, max=8212)
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  TRANSFORMER_DYNAMIC_SHAPES = {
22
- 'hidden_states': {1: TRANSFORMER_HIDDEN_DIM},
23
- 'img_ids': {0: TRANSFORMER_HIDDEN_DIM},
 
 
 
24
  }
25
 
 
 
 
26
  INDUCTOR_CONFIGS = {
27
  'conv_1x1_as_mm': True,
28
  'epilogue_fusion': False,
@@ -37,24 +65,69 @@ def optimize_pipeline_(pipeline: Callable[P, Any], *args: P.args, **kwargs: P.kw
37
 
38
  @spaces.GPU(duration=1500)
39
  def compile_transformer():
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  with capture_component_call(pipeline, 'transformer') as call:
42
  pipeline(*args, **kwargs)
43
-
44
  dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
45
  dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
46
 
47
- pipeline.transformer.fuse_qkv_projections()
48
-
49
- exported = torch.export.export(
 
 
 
 
50
  mod=pipeline.transformer,
51
  args=call.args,
52
  kwargs=call.kwargs,
53
  dynamic_shapes=dynamic_shapes,
54
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
- return aoti_compile(exported, INDUCTOR_CONFIGS)
 
 
 
57
 
58
- transformer_config = pipeline.transformer.config
59
- pipeline.transformer = compile_transformer()
60
- pipeline.transformer.config = transformer_config # pyright: ignore[reportAttributeAccessIssue]
 
8
  import spaces
9
  import torch
10
  from torch.utils._pytree import tree_map_only
11
+ from torchao.quantization import quantize_
12
+ from torchao.quantization import Float8DynamicActivationFloat8WeightConfig
13
+ from torchao.quantization import Int8WeightOnlyConfig
14
 
15
  from optimization_utils import capture_component_call
16
  from optimization_utils import aoti_compile
17
+ from optimization_utils import drain_module_parameters
18
 
19
 
20
  P = ParamSpec('P')
21
 
22
+ # --- CORRECTED DYNAMIC SHAPING ---
23
 
24
+ # VAE temporal scale factor is 1, latent_frames = num_frames. Range is [8, 81].
25
+ LATENT_FRAMES_DIM = torch.export.Dim('num_latent_frames', min=8, max=81)
26
 
27
+ # The transformer has a patch_size of (1, 2, 2), which means the input latent height and width
28
+ # are effectively divided by 2. This creates constraints that fail if the symbolic tracer
29
+ # assumes odd numbers are possible.
30
+ #
31
+ # To solve this, we define the dynamic dimension for the *patched* (i.e., post-division) size,
32
+ # and then express the input shape as 2 * this dimension. This mathematically guarantees
33
+ # to the compiler that the input latent dimensions are always even, satisfying the constraints.
34
+
35
+ # App range for pixel dimensions: [480, 832]. VAE scale factor is 8.
36
+ # Latent dimension range: [480/8, 832/8] = [60, 104].
37
+ # Patched latent dimension range: [60/2, 104/2] = [30, 52].
38
+ LATENT_PATCHED_HEIGHT_DIM = torch.export.Dim('latent_patched_height', min=30, max=52)
39
+ LATENT_PATCHED_WIDTH_DIM = torch.export.Dim('latent_patched_width', min=30, max=52)
40
+
41
+ # Now, we define the dynamic shapes for the transformer's `hidden_states` input,
42
+ # which has the shape (batch_size, channels, num_frames, height, width).
43
  TRANSFORMER_DYNAMIC_SHAPES = {
44
+ 'hidden_states': {
45
+ 2: LATENT_FRAMES_DIM,
46
+ 3: 2 * LATENT_PATCHED_HEIGHT_DIM, # Guarantees even height
47
+ 4: 2 * LATENT_PATCHED_WIDTH_DIM, # Guarantees even width
48
+ },
49
  }
50
 
51
+ # --- END OF CORRECTION ---
52
+
53
+
54
  INDUCTOR_CONFIGS = {
55
  'conv_1x1_as_mm': True,
56
  'epilogue_fusion': False,
 
65
 
66
  @spaces.GPU(duration=1500)
67
  def compile_transformer():
68
+
69
+ # This LoRA fusion part remains the same
70
+ pipeline.load_lora_weights(
71
+ "Kijai/WanVideo_comfy",
72
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
73
+ adapter_name="lightx2v"
74
+ )
75
+ kwargs_lora = {}
76
+ kwargs_lora["load_into_transformer_2"] = True
77
+ pipeline.load_lora_weights(
78
+ "Kijai/WanVideo_comfy",
79
+ weight_name="Lightx2v/lightx2v_I2V_14B_480p_cfg_step_distill_rank128_bf16.safetensors",
80
+ adapter_name="lightx2v_2", **kwargs_lora
81
+ )
82
+ pipeline.set_adapters(["lightx2v", "lightx2v_2"], adapter_weights=[1., 1.])
83
+ pipeline.fuse_lora(adapter_names=["lightx2v"], lora_scale=3., components=["transformer"])
84
+ pipeline.fuse_lora(adapter_names=["lightx2v_2"], lora_scale=1., components=["transformer_2"])
85
+ pipeline.unload_lora_weights()
86
+
87
+ # Capture a single call to get the args/kwargs structure
88
  with capture_component_call(pipeline, 'transformer') as call:
89
  pipeline(*args, **kwargs)
90
+
91
  dynamic_shapes = tree_map_only((torch.Tensor, bool), lambda t: None, call.kwargs)
92
  dynamic_shapes |= TRANSFORMER_DYNAMIC_SHAPES
93
 
94
+ # Quantization remains the same
95
+ quantize_(pipeline.transformer, Float8DynamicActivationFloat8WeightConfig())
96
+ quantize_(pipeline.transformer_2, Float8DynamicActivationFloat8WeightConfig())
97
+
98
+ # --- SIMPLIFIED COMPILATION ---
99
+
100
+ exported_1 = torch.export.export(
101
  mod=pipeline.transformer,
102
  args=call.args,
103
  kwargs=call.kwargs,
104
  dynamic_shapes=dynamic_shapes,
105
  )
106
+
107
+ exported_2 = torch.export.export(
108
+ mod=pipeline.transformer_2,
109
+ args=call.args,
110
+ kwargs=call.kwargs,
111
+ dynamic_shapes=dynamic_shapes,
112
+ )
113
+
114
+ compiled_1 = aoti_compile(exported_1, INDUCTOR_CONFIGS)
115
+ compiled_2 = aoti_compile(exported_2, INDUCTOR_CONFIGS)
116
+
117
+ # Return the two compiled models
118
+ return compiled_1, compiled_2
119
+
120
+
121
+ # Quantize text encoder (same as before)
122
+ quantize_(pipeline.text_encoder, Int8WeightOnlyConfig())
123
+
124
+ # Get the two dynamically-shaped compiled models
125
+ compiled_transformer_1, compiled_transformer_2 = compile_transformer()
126
 
127
+ # --- SIMPLIFIED ASSIGNMENT ---
128
+
129
+ pipeline.transformer.forward = compiled_transformer_1
130
+ drain_module_parameters(pipeline.transformer)
131
 
132
+ pipeline.transformer_2.forward = compiled_transformer_2
133
+ drain_module_parameters(pipeline.transformer_2)
 
optimization_utils.py CHANGED
@@ -10,7 +10,6 @@ from unittest.mock import patch
10
  import torch
11
  from torch._inductor.package.package import package_aoti
12
  from torch.export.pt2_archive._package import AOTICompiledModel
13
- from torch.export.pt2_archive._package_weights import TensorProperties
14
  from torch.export.pt2_archive._package_weights import Weights
15
 
16
 
@@ -21,31 +20,33 @@ INDUCTOR_CONFIGS_OVERRIDES = {
21
  }
22
 
23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  class ZeroGPUCompiledModel:
25
- def __init__(self, archive_file: torch.types.FileLike, weights: Weights, cuda: bool = False):
26
  self.archive_file = archive_file
27
  self.weights = weights
28
- if cuda:
29
- self.weights_to_cuda_()
30
  self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
31
- def weights_to_cuda_(self):
32
- for name in self.weights:
33
- tensor, properties = self.weights.get_weight(name)
34
- self.weights[name] = (tensor.to('cuda'), properties)
35
  def __call__(self, *args, **kwargs):
36
  if (compiled_model := self.compiled_model.get()) is None:
37
- constants_map = {name: value[0] for name, value in self.weights.items()}
38
  compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
39
- compiled_model.load_constants(constants_map, check_full_update=True, user_managed=True)
40
  self.compiled_model.set(compiled_model)
41
  return compiled_model(*args, **kwargs)
42
  def __reduce__(self):
43
- weight_dict: dict[str, tuple[torch.Tensor, TensorProperties]] = {}
44
- for name in self.weights:
45
- tensor, properties = self.weights.get_weight(name)
46
- tensor_ = torch.empty_like(tensor, device='cpu').pin_memory()
47
- weight_dict[name] = (tensor_.copy_(tensor).detach().share_memory_(), properties)
48
- return ZeroGPUCompiledModel, (self.archive_file, Weights(weight_dict), True)
49
 
50
 
51
  def aoti_compile(
@@ -61,7 +62,8 @@ def aoti_compile(
61
  files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
62
  package_aoti(archive_file, files)
63
  weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
64
- return ZeroGPUCompiledModel(archive_file, weights)
 
65
 
66
 
67
  @contextlib.contextmanager
@@ -94,3 +96,12 @@ def capture_component_call(
94
  except CapturedCallException as e:
95
  captured_call.args = e.args
96
  captured_call.kwargs = e.kwargs
 
 
 
 
 
 
 
 
 
 
10
  import torch
11
  from torch._inductor.package.package import package_aoti
12
  from torch.export.pt2_archive._package import AOTICompiledModel
 
13
  from torch.export.pt2_archive._package_weights import Weights
14
 
15
 
 
20
  }
21
 
22
 
23
+ class ZeroGPUWeights:
24
+ def __init__(self, constants_map: dict[str, torch.Tensor], to_cuda: bool = False):
25
+ if to_cuda:
26
+ self.constants_map = {name: tensor.to('cuda') for name, tensor in constants_map.items()}
27
+ else:
28
+ self.constants_map = constants_map
29
+ def __reduce__(self):
30
+ constants_map: dict[str, torch.Tensor] = {}
31
+ for name, tensor in self.constants_map.items():
32
+ tensor_ = torch.empty_like(tensor, device='cpu').pin_memory()
33
+ constants_map[name] = tensor_.copy_(tensor).detach().share_memory_()
34
+ return ZeroGPUWeights, (constants_map, True)
35
+
36
+
37
  class ZeroGPUCompiledModel:
38
+ def __init__(self, archive_file: torch.types.FileLike, weights: ZeroGPUWeights):
39
  self.archive_file = archive_file
40
  self.weights = weights
 
 
41
  self.compiled_model: ContextVar[AOTICompiledModel | None] = ContextVar('compiled_model', default=None)
 
 
 
 
42
  def __call__(self, *args, **kwargs):
43
  if (compiled_model := self.compiled_model.get()) is None:
 
44
  compiled_model = cast(AOTICompiledModel, torch._inductor.aoti_load_package(self.archive_file))
45
+ compiled_model.load_constants(self.weights.constants_map, check_full_update=True, user_managed=True)
46
  self.compiled_model.set(compiled_model)
47
  return compiled_model(*args, **kwargs)
48
  def __reduce__(self):
49
+ return ZeroGPUCompiledModel, (self.archive_file, self.weights)
 
 
 
 
 
50
 
51
 
52
  def aoti_compile(
 
62
  files: list[str | Weights] = [file for file in artifacts if isinstance(file, str)]
63
  package_aoti(archive_file, files)
64
  weights, = (artifact for artifact in artifacts if isinstance(artifact, Weights))
65
+ zerogpu_weights = ZeroGPUWeights({name: weights.get_weight(name)[0] for name in weights})
66
+ return ZeroGPUCompiledModel(archive_file, zerogpu_weights)
67
 
68
 
69
  @contextlib.contextmanager
 
96
  except CapturedCallException as e:
97
  captured_call.args = e.args
98
  captured_call.kwargs = e.kwargs
99
+
100
+
101
+ def drain_module_parameters(module: torch.nn.Module):
102
+ state_dict_meta = {name: {'device': tensor.device, 'dtype': tensor.dtype} for name, tensor in module.state_dict().items()}
103
+ state_dict = {name: torch.nn.Parameter(torch.empty_like(tensor, device='cpu')) for name, tensor in module.state_dict().items()}
104
+ module.load_state_dict(state_dict, assign=True)
105
+ for name, param in state_dict.items():
106
+ meta = state_dict_meta[name]
107
+ param.data = torch.Tensor([]).to(**meta)
requirements.txt CHANGED
@@ -1,5 +1,11 @@
1
- transformers
2
- git+https://github.com/huggingface/diffusers.git
3
- accelerate
4
- safetensors
5
- sentencepiece
 
 
 
 
 
 
 
1
+ git+https://github.com/YassineT-cdc/diffusers.git@wan22-loras_after
2
+
3
+ transformers==4.57.3
4
+ accelerate==1.12.0
5
+ safetensors==0.7.0
6
+ sentencepiece==0.2.1
7
+ peft==0.18.0
8
+ ftfy==6.3.1
9
+ imageio-ffmpeg==0.6.0
10
+ opencv-python==4.12.0.88
11
+ torchao==0.14.1