Spaces:
Running
on
Zero
Running
on
Zero
No log
Browse files
app.py
CHANGED
|
@@ -61,42 +61,32 @@ allocation_time_debug_value = [None]
|
|
| 61 |
|
| 62 |
default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
|
| 63 |
|
| 64 |
-
print("Loading transformer...")
|
| 65 |
-
|
| 66 |
transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 67 |
subfolder='transformer',
|
| 68 |
torch_dtype=torch.bfloat16,
|
| 69 |
device_map='cuda',
|
| 70 |
)
|
| 71 |
|
| 72 |
-
print("Loadingtransformer 2...")
|
| 73 |
-
|
| 74 |
transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 75 |
subfolder='transformer_2',
|
| 76 |
torch_dtype=torch.bfloat16,
|
| 77 |
device_map='cuda',
|
| 78 |
)
|
| 79 |
|
| 80 |
-
print("Loading models into memory. This may take a few minutes...")
|
| 81 |
-
|
| 82 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 83 |
MODEL_ID,
|
| 84 |
transformer = transformer,
|
| 85 |
transformer_2 = transformer_2,
|
| 86 |
torch_dtype=torch.bfloat16,
|
| 87 |
)
|
| 88 |
-
print("Loading scheduler...")
|
| 89 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
|
| 90 |
pipe.to('cuda')
|
| 91 |
|
| 92 |
-
print("Clean cache...")
|
| 93 |
for i in range(3):
|
| 94 |
gc.collect()
|
| 95 |
torch.cuda.synchronize()
|
| 96 |
torch.cuda.empty_cache()
|
| 97 |
|
| 98 |
-
print("Optimizing pipeline...")
|
| 99 |
-
|
| 100 |
optimize_pipeline_(pipe,
|
| 101 |
image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
|
| 102 |
prompt='prompt',
|
|
@@ -104,7 +94,6 @@ optimize_pipeline_(pipe,
|
|
| 104 |
width=MAX_DIMENSION,
|
| 105 |
num_frames=MAX_FRAMES_MODEL,
|
| 106 |
)
|
| 107 |
-
print("All models loaded and optimized. Gradio app is ready.")
|
| 108 |
|
| 109 |
# 20250508 pftq: for saving prompt to mp4 metadata comments
|
| 110 |
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|
|
|
|
| 61 |
|
| 62 |
default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
|
| 63 |
|
|
|
|
|
|
|
| 64 |
transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 65 |
subfolder='transformer',
|
| 66 |
torch_dtype=torch.bfloat16,
|
| 67 |
device_map='cuda',
|
| 68 |
)
|
| 69 |
|
|
|
|
|
|
|
| 70 |
transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
|
| 71 |
subfolder='transformer_2',
|
| 72 |
torch_dtype=torch.bfloat16,
|
| 73 |
device_map='cuda',
|
| 74 |
)
|
| 75 |
|
|
|
|
|
|
|
| 76 |
pipe = WanImageToVideoPipeline.from_pretrained(
|
| 77 |
MODEL_ID,
|
| 78 |
transformer = transformer,
|
| 79 |
transformer_2 = transformer_2,
|
| 80 |
torch_dtype=torch.bfloat16,
|
| 81 |
)
|
|
|
|
| 82 |
pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
|
| 83 |
pipe.to('cuda')
|
| 84 |
|
|
|
|
| 85 |
for i in range(3):
|
| 86 |
gc.collect()
|
| 87 |
torch.cuda.synchronize()
|
| 88 |
torch.cuda.empty_cache()
|
| 89 |
|
|
|
|
|
|
|
| 90 |
optimize_pipeline_(pipe,
|
| 91 |
image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
|
| 92 |
prompt='prompt',
|
|
|
|
| 94 |
width=MAX_DIMENSION,
|
| 95 |
num_frames=MAX_FRAMES_MODEL,
|
| 96 |
)
|
|
|
|
| 97 |
|
| 98 |
# 20250508 pftq: for saving prompt to mp4 metadata comments
|
| 99 |
def set_mp4_comments_imageio_ffmpeg(input_file, comments):
|