|
|
"""
|
|
|
Quick loader for INT8 quantized HunyuanImage-3.0 model.
|
|
|
Generated automatically by hunyuan_quantize_int8.py
|
|
|
"""
|
|
|
|
|
|
import torch
|
|
|
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
|
|
|
|
|
def load_quantized_hunyuan_int8(model_path="A:\Comfy25\ComfyUI_windows_portable\ComfyUI\models\HunyuanImage-3-INT8"):
|
|
|
"""Load the INT8 quantized HunyuanImage-3.0 model."""
|
|
|
|
|
|
quant_config = BitsAndBytesConfig(
|
|
|
load_in_8bit=True,
|
|
|
llm_int8_threshold=6.0,
|
|
|
)
|
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
|
model_path,
|
|
|
quantization_config=quant_config,
|
|
|
device_map="auto",
|
|
|
trust_remote_code=True,
|
|
|
torch_dtype=torch.bfloat16,
|
|
|
attn_implementation="sdpa",
|
|
|
)
|
|
|
|
|
|
|
|
|
model.load_tokenizer(model_path)
|
|
|
|
|
|
return model
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
print("Loading INT8 quantized model...")
|
|
|
model = load_quantized_hunyuan_int8()
|
|
|
print("Model loaded successfully!")
|
|
|
print(f"Device map: {model.hf_device_map}")
|
|
|
|
|
|
|
|
|
if torch.cuda.is_available():
|
|
|
print(f"GPU memory allocated: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
|
|
|
print(f"GPU memory reserved: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
|
|
|
|