from diffsynth.pipelines.qwen_image import QwenImagePipeline, ModelConfig from diffsynth import load_state_dict import torch pipe = QwenImagePipeline.from_pretrained( torch_dtype=torch.bfloat16, device="cuda", model_configs=[ ModelConfig(model_id="DiffSynth-Studio/Qwen-Image-Distill-Full", origin_file_pattern="diffusion_pytorch_model*.safetensors"), ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="text_encoder/model*.safetensors"), ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="vae/diffusion_pytorch_model.safetensors"), ], tokenizer_config=ModelConfig(model_id="Qwen/Qwen-Image", origin_file_pattern="tokenizer/"), ) state_dict = load_state_dict("models/train/Qwen-Image-Distill-Full_full/epoch-1.safetensors") pipe.dit.load_state_dict(state_dict) prompt = "a dog" image = pipe(prompt, seed=0, num_inference_steps=15, cfg_scale=1) image.save("image.jpg")