| accelerate launch examples/wanvideo/model_training/train.py \ | |
| --dataset_base_path data/example_video_dataset \ | |
| --dataset_metadata_path data/example_video_dataset/metadata_vap.csv \ | |
| --data_file_keys "video,vap_video" \ | |
| --height 480 \ | |
| --width 832 \ | |
| --num_frames 49 \ | |
| --dataset_repeat 10 \ | |
| --model_id_with_origin_paths "ByteDance/Video-As-Prompt-Wan2.1-14B:transformer/diffusion_pytorch_model*.safetensors,Wan-AI/Wan2.1-I2V-14B-720P:models_t5_umt5-xxl-enc-bf16.pth,Wan-AI/Wan2.1-I2V-14B-720P:Wan2.1_VAE.pth,Wan-AI/Wan2.1-I2V-14B-720P:models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth" \ | |
| --learning_rate 1e-4 \ | |
| --num_epochs 5 \ | |
| --remove_prefix_in_ckpt "pipe.dit." \ | |
| --output_path "./models/train/Video-As-Prompt-Wan2.1-14B_lora" \ | |
| --lora_base_model "dit" \ | |
| --lora_target_modules "q,k,v,o,ffn.0,ffn.2" \ | |
| --lora_rank 32 \ | |
| --extra_inputs "vap_video,input_image" \ | |
| --use_gradient_checkpointing_offload | |