|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from einops import rearrange |
|
|
from diffusers.models.transformers.transformer_lumina2 import * |
|
|
from einops import repeat |
|
|
from diffusers.models.embeddings import get_1d_rotary_pos_embed |
|
|
import itertools |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
|
|
|
class Lumina2CombinedTimestepCaptionEmbedding(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
hidden_size: int = 4096, |
|
|
cap_feat_dim: int = 2048, |
|
|
frequency_embedding_size: int = 256, |
|
|
norm_eps: float = 1e-5, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
|
|
|
self.time_proj = Timesteps( |
|
|
num_channels=frequency_embedding_size, flip_sin_to_cos=True, downscale_freq_shift=0.0 |
|
|
) |
|
|
|
|
|
self.timestep_embedder = TimestepEmbedding( |
|
|
in_channels=frequency_embedding_size, time_embed_dim=min(hidden_size, 1024) |
|
|
) |
|
|
|
|
|
self.caption_embedder = nn.Sequential( |
|
|
RMSNorm(cap_feat_dim, eps=norm_eps), nn.Linear(cap_feat_dim, hidden_size, bias=True) |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, hidden_states: torch.Tensor, timestep: torch.Tensor, encoder_hidden_states: torch.Tensor |
|
|
) -> Tuple[torch.Tensor, torch.Tensor]: |
|
|
timestep_proj = self.time_proj(timestep).type_as(hidden_states[0]) |
|
|
time_embed = self.timestep_embedder(timestep_proj) |
|
|
caption_embed = self.caption_embedder(encoder_hidden_states) |
|
|
return time_embed, caption_embed |
|
|
|
|
|
|
|
|
class Lumina2AttnProcessor2_0: |
|
|
r""" |
|
|
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). This is |
|
|
used in the Lumina2Transformer2DModel model. It applies normalization and RoPE on query and key vectors. |
|
|
""" |
|
|
|
|
|
def __init__(self): |
|
|
if not hasattr(F, "scaled_dot_product_attention"): |
|
|
raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") |
|
|
|
|
|
def __call__( |
|
|
self, |
|
|
attn: Attention, |
|
|
hidden_states: torch.Tensor, |
|
|
encoder_hidden_states: torch.Tensor, |
|
|
attention_mask: Optional[torch.Tensor] = None, |
|
|
image_rotary_emb: Optional[torch.Tensor] = None, |
|
|
base_sequence_length: Optional[int] = None, |
|
|
) -> torch.Tensor: |
|
|
batch_size, sequence_length, _ = hidden_states.shape |
|
|
|
|
|
|
|
|
query = attn.to_q(hidden_states) |
|
|
key = attn.to_k(encoder_hidden_states) |
|
|
value = attn.to_v(encoder_hidden_states) |
|
|
|
|
|
query_dim = query.shape[-1] |
|
|
inner_dim = key.shape[-1] |
|
|
head_dim = query_dim // attn.heads |
|
|
dtype = query.dtype |
|
|
|
|
|
|
|
|
kv_heads = inner_dim // head_dim |
|
|
|
|
|
query = query.view(batch_size, -1, attn.heads, head_dim) |
|
|
key = key.view(batch_size, -1, kv_heads, head_dim) |
|
|
value = value.view(batch_size, -1, kv_heads, head_dim) |
|
|
|
|
|
|
|
|
if attn.norm_q is not None: |
|
|
query = attn.norm_q(query) |
|
|
if attn.norm_k is not None: |
|
|
key = attn.norm_k(key) |
|
|
|
|
|
|
|
|
if image_rotary_emb is not None: |
|
|
query = apply_rotary_emb(query, image_rotary_emb, use_real=False) |
|
|
key = apply_rotary_emb(key, image_rotary_emb, use_real=False) |
|
|
|
|
|
query, key = query.to(dtype), key.to(dtype) |
|
|
|
|
|
|
|
|
if base_sequence_length is not None: |
|
|
softmax_scale = math.sqrt(math.log(sequence_length, base_sequence_length)) * attn.scale |
|
|
else: |
|
|
softmax_scale = attn.scale |
|
|
|
|
|
|
|
|
n_rep = attn.heads // kv_heads |
|
|
if n_rep >= 1: |
|
|
key = key.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) |
|
|
value = value.unsqueeze(3).repeat(1, 1, 1, n_rep, 1).flatten(2, 3) |
|
|
|
|
|
|
|
|
|
|
|
if attention_mask is not None: |
|
|
attention_mask = attention_mask.bool().view(batch_size, 1, 1, -1) |
|
|
|
|
|
query = query.transpose(1, 2) |
|
|
key = key.transpose(1, 2) |
|
|
value = value.transpose(1, 2) |
|
|
|
|
|
hidden_states = F.scaled_dot_product_attention( |
|
|
query, key, value, attn_mask=attention_mask, scale=softmax_scale |
|
|
) |
|
|
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) |
|
|
hidden_states = hidden_states.type_as(query) |
|
|
|
|
|
|
|
|
hidden_states = attn.to_out[0](hidden_states) |
|
|
hidden_states = attn.to_out[1](hidden_states) |
|
|
return hidden_states |
|
|
|
|
|
|
|
|
class Lumina2TransformerBlock(nn.Module): |
|
|
def __init__( |
|
|
self, |
|
|
dim: int, |
|
|
num_attention_heads: int, |
|
|
num_kv_heads: int, |
|
|
multiple_of: int, |
|
|
ffn_dim_multiplier: float, |
|
|
norm_eps: float, |
|
|
modulation: bool = True, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
self.head_dim = dim // num_attention_heads |
|
|
self.dim = dim |
|
|
self.modulation = modulation |
|
|
|
|
|
self.attn = Attention( |
|
|
query_dim=dim, |
|
|
cross_attention_dim=None, |
|
|
dim_head=dim // num_attention_heads, |
|
|
qk_norm="rms_norm", |
|
|
heads=num_attention_heads, |
|
|
kv_heads=num_kv_heads, |
|
|
eps=1e-5, |
|
|
bias=False, |
|
|
out_bias=False, |
|
|
processor=Lumina2AttnProcessor2_0(), |
|
|
) |
|
|
|
|
|
self.feed_forward = LuminaFeedForward( |
|
|
dim=dim, |
|
|
inner_dim=4 * dim, |
|
|
multiple_of=multiple_of, |
|
|
ffn_dim_multiplier=ffn_dim_multiplier, |
|
|
) |
|
|
|
|
|
if modulation: |
|
|
self.norm1 = LuminaRMSNormZero( |
|
|
embedding_dim=dim, |
|
|
norm_eps=norm_eps, |
|
|
norm_elementwise_affine=True, |
|
|
) |
|
|
else: |
|
|
self.norm1 = RMSNorm(dim, eps=norm_eps) |
|
|
self.ffn_norm1 = RMSNorm(dim, eps=norm_eps) |
|
|
|
|
|
self.norm2 = RMSNorm(dim, eps=norm_eps) |
|
|
self.ffn_norm2 = RMSNorm(dim, eps=norm_eps) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
attention_mask: torch.Tensor, |
|
|
image_rotary_emb: torch.Tensor, |
|
|
temb: Optional[torch.Tensor] = None, |
|
|
) -> torch.Tensor: |
|
|
if self.modulation: |
|
|
norm_hidden_states, gate_msa, scale_mlp, gate_mlp = self.norm1(hidden_states, temb) |
|
|
attn_output = self.attn( |
|
|
hidden_states=norm_hidden_states, |
|
|
encoder_hidden_states=norm_hidden_states, |
|
|
attention_mask=attention_mask, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
) |
|
|
hidden_states = hidden_states + gate_msa.unsqueeze(1).tanh() * self.norm2(attn_output) |
|
|
mlp_output = self.feed_forward(self.ffn_norm1(hidden_states) * (1 + scale_mlp.unsqueeze(1))) |
|
|
hidden_states = hidden_states + gate_mlp.unsqueeze(1).tanh() * self.ffn_norm2(mlp_output) |
|
|
else: |
|
|
norm_hidden_states = self.norm1(hidden_states) |
|
|
attn_output = self.attn( |
|
|
hidden_states=norm_hidden_states, |
|
|
encoder_hidden_states=norm_hidden_states, |
|
|
attention_mask=attention_mask, |
|
|
image_rotary_emb=image_rotary_emb, |
|
|
) |
|
|
hidden_states = hidden_states + self.norm2(attn_output) |
|
|
mlp_output = self.feed_forward(self.ffn_norm1(hidden_states)) |
|
|
hidden_states = hidden_states + self.ffn_norm2(mlp_output) |
|
|
|
|
|
return hidden_states |
|
|
|
|
|
|
|
|
class Lumina2RotaryPosEmbed(nn.Module): |
|
|
def __init__(self, theta: int, axes_dim: List[int], axes_lens: List[int] = (300, 512, 512), patch_size: int = 2): |
|
|
super().__init__() |
|
|
self.theta = theta |
|
|
self.axes_dim = axes_dim |
|
|
self.axes_lens = axes_lens |
|
|
self.patch_size = patch_size |
|
|
|
|
|
self.freqs_cis = self._precompute_freqs_cis(axes_dim, axes_lens, theta) |
|
|
|
|
|
def _precompute_freqs_cis(self, axes_dim: List[int], axes_lens: List[int], theta: int) -> List[torch.Tensor]: |
|
|
freqs_cis = [] |
|
|
freqs_dtype = torch.float32 if torch.backends.mps.is_available() else torch.float64 |
|
|
for i, (d, e) in enumerate(zip(axes_dim, axes_lens)): |
|
|
emb = get_1d_rotary_pos_embed(d, e, theta=self.theta, freqs_dtype=freqs_dtype) |
|
|
freqs_cis.append(emb) |
|
|
return freqs_cis |
|
|
|
|
|
def _get_freqs_cis(self, ids: torch.Tensor) -> torch.Tensor: |
|
|
device = ids.device |
|
|
if ids.device.type == "mps": |
|
|
ids = ids.to("cpu") |
|
|
|
|
|
result = [] |
|
|
for i in range(len(self.axes_dim)): |
|
|
freqs = self.freqs_cis[i].to(ids.device) |
|
|
index = ids[:, :, i : i + 1].repeat(1, 1, freqs.shape[-1]).to(torch.int64) |
|
|
result.append(torch.gather(freqs.unsqueeze(0).repeat(index.shape[0], 1, 1), dim=1, index=index)) |
|
|
return torch.cat(result, dim=-1).to(device) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
attention_mask, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
ref_img_sizes, |
|
|
img_sizes, |
|
|
device |
|
|
): |
|
|
|
|
|
batch_size = len(attention_mask) |
|
|
p = self.patch_size |
|
|
|
|
|
encoder_seq_len = attention_mask.shape[1] |
|
|
l_effective_cap_len = attention_mask.sum(dim=1).tolist() |
|
|
|
|
|
seq_lengths = [cap_len + sum(ref_img_len) + img_len for cap_len, ref_img_len, img_len in zip(l_effective_cap_len, l_effective_ref_img_len, l_effective_img_len)] |
|
|
|
|
|
max_seq_len = max(seq_lengths) |
|
|
max_ref_img_len = max([sum(ref_img_len) for ref_img_len in l_effective_ref_img_len]) |
|
|
max_img_len = max(l_effective_img_len) |
|
|
|
|
|
|
|
|
position_ids = torch.zeros(batch_size, max_seq_len, 3, dtype=torch.int32, device=device) |
|
|
|
|
|
for i, (cap_seq_len, seq_len) in enumerate(zip(l_effective_cap_len, seq_lengths)): |
|
|
|
|
|
position_ids[i, :cap_seq_len] = repeat(torch.arange(cap_seq_len, dtype=torch.int32, device=device), "l -> l 3") |
|
|
|
|
|
pe_shift = cap_seq_len |
|
|
pe_shift_len = cap_seq_len |
|
|
|
|
|
if ref_img_sizes[i] is not None: |
|
|
for ref_img_size, ref_img_len in zip(ref_img_sizes[i], l_effective_ref_img_len[i]): |
|
|
H, W = ref_img_size |
|
|
ref_H_tokens, ref_W_tokens = H // p, W // p |
|
|
assert ref_H_tokens * ref_W_tokens == ref_img_len |
|
|
|
|
|
|
|
|
row_ids = repeat(torch.arange(ref_H_tokens, dtype=torch.int32, device=device), "h -> h w", w=ref_W_tokens).flatten() |
|
|
col_ids = repeat(torch.arange(ref_W_tokens, dtype=torch.int32, device=device), "w -> h w", h=ref_H_tokens).flatten() |
|
|
position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 0] = pe_shift |
|
|
position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 1] = row_ids |
|
|
position_ids[i, pe_shift_len:pe_shift_len + ref_img_len, 2] = col_ids |
|
|
|
|
|
pe_shift += max(ref_H_tokens, ref_W_tokens) |
|
|
pe_shift_len += ref_img_len |
|
|
|
|
|
H, W = img_sizes[i] |
|
|
H_tokens, W_tokens = H // p, W // p |
|
|
assert H_tokens * W_tokens == l_effective_img_len[i] |
|
|
|
|
|
row_ids = repeat(torch.arange(H_tokens, dtype=torch.int32, device=device), "h -> h w", w=W_tokens).flatten() |
|
|
col_ids = repeat(torch.arange(W_tokens, dtype=torch.int32, device=device), "w -> h w", h=H_tokens).flatten() |
|
|
|
|
|
assert pe_shift_len + l_effective_img_len[i] == seq_len |
|
|
position_ids[i, pe_shift_len: seq_len, 0] = pe_shift |
|
|
position_ids[i, pe_shift_len: seq_len, 1] = row_ids |
|
|
position_ids[i, pe_shift_len: seq_len, 2] = col_ids |
|
|
|
|
|
|
|
|
freqs_cis = self._get_freqs_cis(position_ids) |
|
|
|
|
|
|
|
|
cap_freqs_cis = torch.zeros( |
|
|
batch_size, encoder_seq_len, freqs_cis.shape[-1], device=device, dtype=freqs_cis.dtype |
|
|
) |
|
|
ref_img_freqs_cis = torch.zeros( |
|
|
batch_size, max_ref_img_len, freqs_cis.shape[-1], device=device, dtype=freqs_cis.dtype |
|
|
) |
|
|
img_freqs_cis = torch.zeros( |
|
|
batch_size, max_img_len, freqs_cis.shape[-1], device=device, dtype=freqs_cis.dtype |
|
|
) |
|
|
|
|
|
for i, (cap_seq_len, ref_img_len, img_len, seq_len) in enumerate(zip(l_effective_cap_len, l_effective_ref_img_len, l_effective_img_len, seq_lengths)): |
|
|
cap_freqs_cis[i, :cap_seq_len] = freqs_cis[i, :cap_seq_len] |
|
|
ref_img_freqs_cis[i, :sum(ref_img_len)] = freqs_cis[i, cap_seq_len:cap_seq_len + sum(ref_img_len)] |
|
|
img_freqs_cis[i, :img_len] = freqs_cis[i, cap_seq_len + sum(ref_img_len):cap_seq_len + sum(ref_img_len) + img_len] |
|
|
|
|
|
return ( |
|
|
cap_freqs_cis, |
|
|
ref_img_freqs_cis, |
|
|
img_freqs_cis, |
|
|
freqs_cis, |
|
|
l_effective_cap_len, |
|
|
seq_lengths, |
|
|
) |
|
|
|
|
|
|
|
|
class Lumina2Transformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): |
|
|
r""" |
|
|
Lumina2NextDiT: Diffusion model with a Transformer backbone. |
|
|
|
|
|
Parameters: |
|
|
sample_size (`int`): The width of the latent images. This is fixed during training since |
|
|
it is used to learn a number of position embeddings. |
|
|
patch_size (`int`, *optional*, (`int`, *optional*, defaults to 2): |
|
|
The size of each patch in the image. This parameter defines the resolution of patches fed into the model. |
|
|
in_channels (`int`, *optional*, defaults to 4): |
|
|
The number of input channels for the model. Typically, this matches the number of channels in the input |
|
|
images. |
|
|
hidden_size (`int`, *optional*, defaults to 4096): |
|
|
The dimensionality of the hidden layers in the model. This parameter determines the width of the model's |
|
|
hidden representations. |
|
|
num_layers (`int`, *optional*, default to 32): |
|
|
The number of layers in the model. This defines the depth of the neural network. |
|
|
num_attention_heads (`int`, *optional*, defaults to 32): |
|
|
The number of attention heads in each attention layer. This parameter specifies how many separate attention |
|
|
mechanisms are used. |
|
|
num_kv_heads (`int`, *optional*, defaults to 8): |
|
|
The number of key-value heads in the attention mechanism, if different from the number of attention heads. |
|
|
If None, it defaults to num_attention_heads. |
|
|
multiple_of (`int`, *optional*, defaults to 256): |
|
|
A factor that the hidden size should be a multiple of. This can help optimize certain hardware |
|
|
configurations. |
|
|
ffn_dim_multiplier (`float`, *optional*): |
|
|
A multiplier for the dimensionality of the feed-forward network. If None, it uses a default value based on |
|
|
the model configuration. |
|
|
norm_eps (`float`, *optional*, defaults to 1e-5): |
|
|
A small value added to the denominator for numerical stability in normalization layers. |
|
|
scaling_factor (`float`, *optional*, defaults to 1.0): |
|
|
A scaling factor applied to certain parameters or layers in the model. This can be used for adjusting the |
|
|
overall scale of the model's operations. |
|
|
""" |
|
|
|
|
|
_supports_gradient_checkpointing = True |
|
|
_no_split_modules = ["Lumina2TransformerBlock"] |
|
|
_skip_layerwise_casting_patterns = ["x_embedder", "norm"] |
|
|
|
|
|
@register_to_config |
|
|
def __init__( |
|
|
self, |
|
|
sample_size: int = 128, |
|
|
patch_size: int = 2, |
|
|
in_channels: int = 16, |
|
|
out_channels: Optional[int] = None, |
|
|
hidden_size: int = 2304, |
|
|
num_layers: int = 26, |
|
|
num_refiner_layers: int = 2, |
|
|
num_attention_heads: int = 24, |
|
|
num_kv_heads: int = 8, |
|
|
multiple_of: int = 256, |
|
|
ffn_dim_multiplier: Optional[float] = None, |
|
|
norm_eps: float = 1e-5, |
|
|
scaling_factor: float = 1.0, |
|
|
axes_dim_rope: Tuple[int, int, int] = (32, 32, 32), |
|
|
axes_lens: Tuple[int, int, int] = (300, 512, 512), |
|
|
cap_feat_dim: int = 1024, |
|
|
) -> None: |
|
|
super().__init__() |
|
|
self.out_channels = out_channels or in_channels |
|
|
|
|
|
|
|
|
self.rope_embedder = Lumina2RotaryPosEmbed( |
|
|
theta=10000, axes_dim=axes_dim_rope, axes_lens=axes_lens, patch_size=patch_size |
|
|
) |
|
|
|
|
|
self.x_embedder = nn.Linear(in_features=patch_size * patch_size * in_channels, out_features=hidden_size) |
|
|
|
|
|
self.time_caption_embed = Lumina2CombinedTimestepCaptionEmbedding( |
|
|
hidden_size=hidden_size, cap_feat_dim=cap_feat_dim, norm_eps=norm_eps |
|
|
) |
|
|
|
|
|
|
|
|
self.noise_refiner = nn.ModuleList( |
|
|
[ |
|
|
Lumina2TransformerBlock( |
|
|
hidden_size, |
|
|
num_attention_heads, |
|
|
num_kv_heads, |
|
|
multiple_of, |
|
|
ffn_dim_multiplier, |
|
|
norm_eps, |
|
|
modulation=True, |
|
|
) |
|
|
for _ in range(num_refiner_layers) |
|
|
] |
|
|
) |
|
|
|
|
|
self.context_refiner = nn.ModuleList( |
|
|
[ |
|
|
Lumina2TransformerBlock( |
|
|
hidden_size, |
|
|
num_attention_heads, |
|
|
num_kv_heads, |
|
|
multiple_of, |
|
|
ffn_dim_multiplier, |
|
|
norm_eps, |
|
|
modulation=False, |
|
|
) |
|
|
for _ in range(num_refiner_layers) |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
self.layers = nn.ModuleList( |
|
|
[ |
|
|
Lumina2TransformerBlock( |
|
|
hidden_size, |
|
|
num_attention_heads, |
|
|
num_kv_heads, |
|
|
multiple_of, |
|
|
ffn_dim_multiplier, |
|
|
norm_eps, |
|
|
modulation=True, |
|
|
) |
|
|
for _ in range(num_layers) |
|
|
] |
|
|
) |
|
|
|
|
|
|
|
|
self.norm_out = LuminaLayerNormContinuous( |
|
|
embedding_dim=hidden_size, |
|
|
conditioning_embedding_dim=min(hidden_size, 1024), |
|
|
elementwise_affine=False, |
|
|
eps=1e-6, |
|
|
bias=True, |
|
|
out_dim=patch_size * patch_size * self.out_channels, |
|
|
) |
|
|
|
|
|
self.gradient_checkpointing = False |
|
|
|
|
|
self.args_dict = {"patch_size":patch_size,"in_channels":in_channels,"hidden_size":hidden_size, |
|
|
"num_attention_heads":num_attention_heads,"num_kv_heads":num_kv_heads, |
|
|
"multiple_of":multiple_of,"ffn_dim_multiplier":ffn_dim_multiplier, |
|
|
"norm_eps":norm_eps,"num_refiner_layers":num_refiner_layers} |
|
|
|
|
|
def initialize_ref_weights(self) -> None: |
|
|
""" |
|
|
Initialize the weights of the model. |
|
|
|
|
|
Uses Xavier uniform initialization for linear layers. |
|
|
""" |
|
|
patch_size, in_channels, hidden_size, num_attention_heads, num_kv_heads, multiple_of, ffn_dim_multiplier, norm_eps, num_refiner_layers = \ |
|
|
(self.args_dict[k] for k in ["patch_size","in_channels","hidden_size","num_attention_heads","num_kv_heads", |
|
|
"multiple_of","ffn_dim_multiplier","norm_eps","num_refiner_layers"]) |
|
|
with torch.no_grad(): |
|
|
self.ref_image_patch_embedder = nn.Linear( |
|
|
in_features=self.x_embedder.in_features, |
|
|
out_features=hidden_size, |
|
|
) |
|
|
self.ref_image_refiner = nn.ModuleList([ |
|
|
Lumina2TransformerBlock( |
|
|
hidden_size, |
|
|
num_attention_heads, |
|
|
num_kv_heads, |
|
|
multiple_of, |
|
|
ffn_dim_multiplier, |
|
|
norm_eps, |
|
|
modulation=True |
|
|
) |
|
|
for _ in range(num_refiner_layers) |
|
|
]) |
|
|
nn.init.xavier_uniform_(self.ref_image_patch_embedder.weight) |
|
|
nn.init.constant_(self.ref_image_patch_embedder.bias, 0.0) |
|
|
|
|
|
|
|
|
self.image_index_embedding = nn.Parameter(torch.randn(5, hidden_size)) |
|
|
nn.init.normal_(self.image_index_embedding, std=0.02) |
|
|
|
|
|
def img_patch_embed_and_refine( |
|
|
self, |
|
|
hidden_states, |
|
|
ref_image_hidden_states, |
|
|
padded_img_mask, |
|
|
padded_ref_img_mask, |
|
|
noise_rotary_emb, |
|
|
ref_img_rotary_emb, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
temb |
|
|
): |
|
|
batch_size = len(hidden_states) |
|
|
max_combined_img_len = max([img_len + sum(ref_img_len) for img_len, ref_img_len in zip(l_effective_img_len, l_effective_ref_img_len)]) |
|
|
|
|
|
hidden_states = self.x_embedder(hidden_states) |
|
|
ref_image_hidden_states = self.ref_image_patch_embedder(ref_image_hidden_states) |
|
|
|
|
|
for i in range(batch_size): |
|
|
shift = 0 |
|
|
for j, ref_img_len in enumerate(l_effective_ref_img_len[i]): |
|
|
ref_image_hidden_states[i, shift:shift + ref_img_len, :] = ref_image_hidden_states[i, shift:shift + ref_img_len, :] + self.image_index_embedding[j] |
|
|
shift += ref_img_len |
|
|
|
|
|
for layer in self.noise_refiner: |
|
|
hidden_states = layer(hidden_states, padded_img_mask, noise_rotary_emb, temb) |
|
|
|
|
|
flat_l_effective_ref_img_len = list(itertools.chain(*l_effective_ref_img_len)) |
|
|
num_ref_images = len(flat_l_effective_ref_img_len) |
|
|
max_ref_img_len = max(flat_l_effective_ref_img_len) |
|
|
|
|
|
batch_ref_img_mask = ref_image_hidden_states.new_zeros(num_ref_images, max_ref_img_len, dtype=torch.bool) |
|
|
batch_ref_image_hidden_states = ref_image_hidden_states.new_zeros(num_ref_images, max_ref_img_len, self.config.hidden_size) |
|
|
batch_ref_img_rotary_emb = hidden_states.new_zeros(num_ref_images, max_ref_img_len, ref_img_rotary_emb.shape[-1], dtype=ref_img_rotary_emb.dtype) |
|
|
batch_temb = temb.new_zeros(num_ref_images, *temb.shape[1:], dtype=temb.dtype) |
|
|
|
|
|
|
|
|
idx = 0 |
|
|
for i in range(batch_size): |
|
|
shift = 0 |
|
|
for ref_img_len in l_effective_ref_img_len[i]: |
|
|
batch_ref_img_mask[idx, :ref_img_len] = True |
|
|
batch_ref_image_hidden_states[idx, :ref_img_len] = ref_image_hidden_states[i, shift:shift + ref_img_len] |
|
|
batch_ref_img_rotary_emb[idx, :ref_img_len] = ref_img_rotary_emb[i, shift:shift + ref_img_len] |
|
|
batch_temb[idx] = temb[i] |
|
|
shift += ref_img_len |
|
|
idx += 1 |
|
|
|
|
|
|
|
|
for layer in self.ref_image_refiner: |
|
|
batch_ref_image_hidden_states = layer(batch_ref_image_hidden_states, batch_ref_img_mask, batch_ref_img_rotary_emb, batch_temb) |
|
|
|
|
|
|
|
|
idx = 0 |
|
|
for i in range(batch_size): |
|
|
shift = 0 |
|
|
for ref_img_len in l_effective_ref_img_len[i]: |
|
|
ref_image_hidden_states[i, shift:shift + ref_img_len] = batch_ref_image_hidden_states[idx, :ref_img_len] |
|
|
shift += ref_img_len |
|
|
idx += 1 |
|
|
|
|
|
combined_img_hidden_states = hidden_states.new_zeros(batch_size, max_combined_img_len, self.config.hidden_size) |
|
|
for i, (ref_img_len, img_len) in enumerate(zip(l_effective_ref_img_len, l_effective_img_len)): |
|
|
combined_img_hidden_states[i, :sum(ref_img_len)] = ref_image_hidden_states[i, :sum(ref_img_len)] |
|
|
combined_img_hidden_states[i, sum(ref_img_len):sum(ref_img_len) + img_len] = hidden_states[i, :img_len] |
|
|
|
|
|
return combined_img_hidden_states |
|
|
|
|
|
def flat_and_pad_to_seq(self, hidden_states, ref_image_hidden_states): |
|
|
batch_size = len(hidden_states) |
|
|
p = self.config.patch_size |
|
|
device = hidden_states[0].device |
|
|
|
|
|
img_sizes = [(img.size(1), img.size(2)) for img in hidden_states] |
|
|
l_effective_img_len = [(H // p) * (W // p) for (H, W) in img_sizes] |
|
|
|
|
|
if ref_image_hidden_states is not None: |
|
|
ref_img_sizes = [[(img.size(1), img.size(2)) for img in imgs] if imgs is not None else None for imgs in ref_image_hidden_states] |
|
|
l_effective_ref_img_len = [[(ref_img_size[0] // p) * (ref_img_size[1] // p) for ref_img_size in _ref_img_sizes] if _ref_img_sizes is not None else [0] for _ref_img_sizes in ref_img_sizes] |
|
|
else: |
|
|
ref_img_sizes = [None for _ in range(batch_size)] |
|
|
l_effective_ref_img_len = [[0] for _ in range(batch_size)] |
|
|
|
|
|
max_ref_img_len = max([sum(ref_img_len) for ref_img_len in l_effective_ref_img_len]) |
|
|
max_img_len = max(l_effective_img_len) |
|
|
|
|
|
|
|
|
flat_ref_img_hidden_states = [] |
|
|
for i in range(batch_size): |
|
|
if ref_img_sizes[i] is not None: |
|
|
imgs = [] |
|
|
for ref_img in ref_image_hidden_states[i]: |
|
|
C, H, W = ref_img.size() |
|
|
ref_img = rearrange(ref_img, 'c (h p1) (w p2) -> (h w) (p1 p2 c)', p1=p, p2=p) |
|
|
imgs.append(ref_img) |
|
|
|
|
|
img = torch.cat(imgs, dim=0) |
|
|
flat_ref_img_hidden_states.append(img) |
|
|
else: |
|
|
flat_ref_img_hidden_states.append(None) |
|
|
|
|
|
|
|
|
flat_hidden_states = [] |
|
|
for i in range(batch_size): |
|
|
img = hidden_states[i] |
|
|
C, H, W = img.size() |
|
|
|
|
|
img = rearrange(img, 'c (h p1) (w p2) -> (h w) (p1 p2 c)', p1=p, p2=p) |
|
|
flat_hidden_states.append(img) |
|
|
|
|
|
padded_ref_img_hidden_states = torch.zeros(batch_size, max_ref_img_len, flat_hidden_states[0].shape[-1], device=device, dtype=flat_hidden_states[0].dtype) |
|
|
padded_ref_img_mask = torch.zeros(batch_size, max_ref_img_len, dtype=torch.bool, device=device) |
|
|
for i in range(batch_size): |
|
|
if ref_img_sizes[i] is not None: |
|
|
padded_ref_img_hidden_states[i, :sum(l_effective_ref_img_len[i])] = flat_ref_img_hidden_states[i] |
|
|
padded_ref_img_mask[i, :sum(l_effective_ref_img_len[i])] = True |
|
|
|
|
|
padded_hidden_states = torch.zeros(batch_size, max_img_len, flat_hidden_states[0].shape[-1], device=device, dtype=flat_hidden_states[0].dtype) |
|
|
padded_img_mask = torch.zeros(batch_size, max_img_len, dtype=torch.bool, device=device) |
|
|
for i in range(batch_size): |
|
|
padded_hidden_states[i, :l_effective_img_len[i]] = flat_hidden_states[i] |
|
|
padded_img_mask[i, :l_effective_img_len[i]] = True |
|
|
|
|
|
return ( |
|
|
padded_hidden_states, |
|
|
padded_ref_img_hidden_states, |
|
|
padded_img_mask, |
|
|
padded_ref_img_mask, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
ref_img_sizes, |
|
|
img_sizes, |
|
|
) |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
hidden_states: torch.Tensor, |
|
|
timestep: torch.Tensor, |
|
|
encoder_hidden_states: torch.Tensor, |
|
|
encoder_attention_mask: torch.Tensor, |
|
|
ref_image_hidden_states: Optional[List[List[torch.Tensor]]] = None, |
|
|
attention_kwargs: Optional[Dict[str, Any]] = None, |
|
|
return_dict: bool = True, |
|
|
) -> Union[torch.Tensor, Transformer2DModelOutput]: |
|
|
if attention_kwargs is not None: |
|
|
attention_kwargs = attention_kwargs.copy() |
|
|
lora_scale = attention_kwargs.pop("scale", 1.0) |
|
|
else: |
|
|
lora_scale = 1.0 |
|
|
|
|
|
if USE_PEFT_BACKEND: |
|
|
|
|
|
scale_lora_layers(self, lora_scale) |
|
|
else: |
|
|
if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None: |
|
|
logger.warning( |
|
|
"Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective." |
|
|
) |
|
|
|
|
|
|
|
|
batch_size = len(hidden_states) |
|
|
is_hidden_states_tensor = isinstance(hidden_states, torch.Tensor) |
|
|
|
|
|
if is_hidden_states_tensor: |
|
|
assert hidden_states.ndim == 4 |
|
|
hidden_states = [_hidden_states for _hidden_states in hidden_states] |
|
|
|
|
|
device = hidden_states[0].device |
|
|
|
|
|
temb, encoder_hidden_states = self.time_caption_embed(hidden_states, timestep, encoder_hidden_states) |
|
|
|
|
|
( |
|
|
hidden_states, |
|
|
ref_image_hidden_states, |
|
|
img_mask, |
|
|
ref_img_mask, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
ref_img_sizes, |
|
|
img_sizes, |
|
|
) = self.flat_and_pad_to_seq(hidden_states, ref_image_hidden_states) |
|
|
|
|
|
( |
|
|
context_rotary_emb, |
|
|
ref_img_rotary_emb, |
|
|
noise_rotary_emb, |
|
|
rotary_emb, |
|
|
encoder_seq_lengths, |
|
|
seq_lengths, |
|
|
) = self.rope_embedder( |
|
|
encoder_attention_mask, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
ref_img_sizes, |
|
|
img_sizes, |
|
|
device, |
|
|
) |
|
|
|
|
|
|
|
|
for layer in self.context_refiner: |
|
|
encoder_hidden_states = layer(encoder_hidden_states, encoder_attention_mask, context_rotary_emb) |
|
|
|
|
|
combined_img_hidden_states = self.img_patch_embed_and_refine( |
|
|
hidden_states, |
|
|
ref_image_hidden_states, |
|
|
img_mask, |
|
|
ref_img_mask, |
|
|
noise_rotary_emb, |
|
|
ref_img_rotary_emb, |
|
|
l_effective_ref_img_len, |
|
|
l_effective_img_len, |
|
|
temb, |
|
|
) |
|
|
|
|
|
|
|
|
max_seq_len = max(seq_lengths) |
|
|
use_mask = len(set(seq_lengths)) > 1 |
|
|
|
|
|
attention_mask = hidden_states.new_zeros(batch_size, max_seq_len, dtype=torch.bool) |
|
|
joint_hidden_states = hidden_states.new_zeros(batch_size, max_seq_len, self.config.hidden_size) |
|
|
for i, (encoder_seq_len, seq_len) in enumerate(zip(encoder_seq_lengths, seq_lengths)): |
|
|
attention_mask[i, :seq_len] = True |
|
|
joint_hidden_states[i, :encoder_seq_len] = encoder_hidden_states[i, :encoder_seq_len] |
|
|
joint_hidden_states[i, encoder_seq_len:seq_len] = combined_img_hidden_states[i, :seq_len - encoder_seq_len] |
|
|
|
|
|
hidden_states = joint_hidden_states |
|
|
|
|
|
for layer in self.layers: |
|
|
if torch.is_grad_enabled() and self.gradient_checkpointing: |
|
|
hidden_states = self._gradient_checkpointing_func( |
|
|
layer, hidden_states, attention_mask if use_mask else None, rotary_emb, temb |
|
|
) |
|
|
else: |
|
|
hidden_states = layer(hidden_states, attention_mask if use_mask else None, rotary_emb, temb) |
|
|
|
|
|
|
|
|
hidden_states = self.norm_out(hidden_states, temb) |
|
|
|
|
|
|
|
|
p = self.config.patch_size |
|
|
output = [] |
|
|
for i, (img_size, img_len, seq_len) in enumerate(zip(img_sizes, l_effective_img_len, seq_lengths)): |
|
|
height, width = img_size |
|
|
output.append(rearrange(hidden_states[i][seq_len - img_len:seq_len], '(h w) (p1 p2 c) -> c (h p1) (w p2)', h=height // p, w=width // p, p1=p, p2=p)) |
|
|
if is_hidden_states_tensor: |
|
|
output = torch.stack(output, dim=0) |
|
|
|
|
|
if USE_PEFT_BACKEND: |
|
|
|
|
|
unscale_lora_layers(self, lora_scale) |
|
|
|
|
|
if not return_dict: |
|
|
return (output,) |
|
|
return Transformer2DModelOutput(sample=output) |
|
|
|