|
|
import torch, os, json |
|
|
from diffsynth import load_state_dict |
|
|
from diffsynth.pipelines.wan_video_new import WanVideoPipeline, ModelConfig |
|
|
from diffsynth.trainers.utils import DiffusionTrainingModule, ModelLogger, launch_training_task, wan_parser |
|
|
from diffsynth.trainers.unified_dataset import UnifiedDataset, LoadVideo, LoadAudio, ImageCropAndResize, ToAbsolutePath |
|
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" |
|
|
|
|
|
|
|
|
|
|
|
class WanTrainingModule(DiffusionTrainingModule): |
|
|
def __init__( |
|
|
self, |
|
|
model_paths=None, model_id_with_origin_paths=None, audio_processor_config=None, |
|
|
trainable_models=None, |
|
|
lora_base_model=None, lora_target_modules="q,k,v,o,ffn.0,ffn.2", lora_rank=32, lora_checkpoint=None, |
|
|
use_gradient_checkpointing=True, |
|
|
use_gradient_checkpointing_offload=False, |
|
|
extra_inputs=None, |
|
|
max_timestep_boundary=1.0, |
|
|
min_timestep_boundary=0.0, |
|
|
): |
|
|
super().__init__() |
|
|
|
|
|
model_configs = self.parse_model_configs(model_paths, model_id_with_origin_paths, enable_fp8_training=False) |
|
|
if audio_processor_config is not None: |
|
|
audio_processor_config = ModelConfig(model_id=audio_processor_config.split(":")[0], origin_file_pattern=audio_processor_config.split(":")[1]) |
|
|
self.pipe = WanVideoPipeline.from_pretrained(torch_dtype=torch.bfloat16, device="cpu", model_configs=model_configs, audio_processor_config=audio_processor_config) |
|
|
|
|
|
|
|
|
self.switch_pipe_to_training_mode( |
|
|
self.pipe, trainable_models, |
|
|
lora_base_model, lora_target_modules, lora_rank, lora_checkpoint=lora_checkpoint, |
|
|
enable_fp8_training=False, |
|
|
) |
|
|
|
|
|
|
|
|
self.use_gradient_checkpointing = use_gradient_checkpointing |
|
|
self.use_gradient_checkpointing_offload = use_gradient_checkpointing_offload |
|
|
self.extra_inputs = extra_inputs.split(",") if extra_inputs is not None else [] |
|
|
self.max_timestep_boundary = max_timestep_boundary |
|
|
self.min_timestep_boundary = min_timestep_boundary |
|
|
|
|
|
|
|
|
def forward_preprocess(self, data): |
|
|
|
|
|
inputs_posi = {"prompt": data["prompt"]} |
|
|
inputs_nega = {} |
|
|
|
|
|
|
|
|
inputs_shared = { |
|
|
|
|
|
|
|
|
"input_video": data["video"], |
|
|
"height": data["video"][0].size[1], |
|
|
"width": data["video"][0].size[0], |
|
|
"num_frames": len(data["video"]), |
|
|
|
|
|
|
|
|
"cfg_scale": 1, |
|
|
"tiled": False, |
|
|
"rand_device": self.pipe.device, |
|
|
"use_gradient_checkpointing": self.use_gradient_checkpointing, |
|
|
"use_gradient_checkpointing_offload": self.use_gradient_checkpointing_offload, |
|
|
"cfg_merge": False, |
|
|
"vace_scale": 1, |
|
|
"max_timestep_boundary": self.max_timestep_boundary, |
|
|
"min_timestep_boundary": self.min_timestep_boundary, |
|
|
} |
|
|
|
|
|
|
|
|
for extra_input in self.extra_inputs: |
|
|
if extra_input == "input_image": |
|
|
inputs_shared["input_image"] = data["video"][0] |
|
|
elif extra_input == "end_image": |
|
|
inputs_shared["end_image"] = data["video"][-1] |
|
|
elif extra_input == "reference_image" or extra_input == "vace_reference_image": |
|
|
inputs_shared[extra_input] = data[extra_input][0] |
|
|
else: |
|
|
inputs_shared[extra_input] = data[extra_input] |
|
|
|
|
|
|
|
|
for unit in self.pipe.units: |
|
|
inputs_shared, inputs_posi, inputs_nega = self.pipe.unit_runner(unit, self.pipe, inputs_shared, inputs_posi, inputs_nega) |
|
|
return {**inputs_shared, **inputs_posi} |
|
|
|
|
|
|
|
|
def forward(self, data, inputs=None): |
|
|
if inputs is None: inputs = self.forward_preprocess(data) |
|
|
models = {name: getattr(self.pipe, name) for name in self.pipe.in_iteration_models} |
|
|
loss = self.pipe.training_loss(**models, **inputs) |
|
|
return loss |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
parser = wan_parser() |
|
|
args = parser.parse_args() |
|
|
dataset = UnifiedDataset( |
|
|
base_path=args.dataset_base_path, |
|
|
metadata_path=args.dataset_metadata_path, |
|
|
repeat=args.dataset_repeat, |
|
|
data_file_keys=args.data_file_keys.split(","), |
|
|
main_data_operator=UnifiedDataset.default_video_operator( |
|
|
base_path=args.dataset_base_path, |
|
|
max_pixels=args.max_pixels, |
|
|
height=args.height, |
|
|
width=args.width, |
|
|
height_division_factor=16, |
|
|
width_division_factor=16, |
|
|
num_frames=args.num_frames, |
|
|
time_division_factor=4, |
|
|
time_division_remainder=1, |
|
|
), |
|
|
special_operator_map={ |
|
|
"animate_face_video": ToAbsolutePath(args.dataset_base_path) >> LoadVideo(args.num_frames, 4, 1, frame_processor=ImageCropAndResize(512, 512, None, 16, 16)), |
|
|
"input_audio": ToAbsolutePath(args.dataset_base_path) >> LoadAudio(sr=16000), |
|
|
} |
|
|
) |
|
|
model = WanTrainingModule( |
|
|
model_paths=args.model_paths, |
|
|
model_id_with_origin_paths=args.model_id_with_origin_paths, |
|
|
audio_processor_config=args.audio_processor_config, |
|
|
trainable_models=args.trainable_models, |
|
|
lora_base_model=args.lora_base_model, |
|
|
lora_target_modules=args.lora_target_modules, |
|
|
lora_rank=args.lora_rank, |
|
|
lora_checkpoint=args.lora_checkpoint, |
|
|
use_gradient_checkpointing_offload=args.use_gradient_checkpointing_offload, |
|
|
extra_inputs=args.extra_inputs, |
|
|
max_timestep_boundary=args.max_timestep_boundary, |
|
|
min_timestep_boundary=args.min_timestep_boundary, |
|
|
) |
|
|
model_logger = ModelLogger( |
|
|
args.output_path, |
|
|
remove_prefix_in_ckpt=args.remove_prefix_in_ckpt |
|
|
) |
|
|
launch_training_task(dataset, model, model_logger, args=args) |
|
|
|