|
|
|
@ -12,31 +12,14 @@
|
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
|
# limitations under the License.
|
|
|
|
|
import math
|
|
|
|
|
from dataclasses import dataclass
|
|
|
|
|
from typing import Optional
|
|
|
|
|
|
|
|
|
|
import torch
|
|
|
|
|
import torch.nn.functional as F
|
|
|
|
|
from torch import nn
|
|
|
|
|
|
|
|
|
|
from ..configuration_utils import ConfigMixin, register_to_config
|
|
|
|
|
from ..models.embeddings import ImagePositionalEmbeddings
|
|
|
|
|
from ..utils import BaseOutput
|
|
|
|
|
from ..utils.import_utils import is_xformers_available
|
|
|
|
|
from .cross_attention import CrossAttention
|
|
|
|
|
from .modeling_utils import ModelMixin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass
|
|
|
|
|
class Transformer2DModelOutput(BaseOutput):
|
|
|
|
|
"""
|
|
|
|
|
Args:
|
|
|
|
|
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
|
|
|
|
|
Hidden states conditioned on `encoder_hidden_states` input. If discrete, returns probability distributions
|
|
|
|
|
for the unnoised latent pixels.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
sample: torch.FloatTensor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if is_xformers_available():
|
|
|
|
@ -46,213 +29,6 @@ else:
|
|
|
|
|
xformers = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Transformer2DModel(ModelMixin, ConfigMixin):
|
|
|
|
|
"""
|
|
|
|
|
Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual
|
|
|
|
|
embeddings) inputs.
|
|
|
|
|
|
|
|
|
|
When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard
|
|
|
|
|
transformer action. Finally, reshape to image.
|
|
|
|
|
|
|
|
|
|
When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional
|
|
|
|
|
embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict
|
|
|
|
|
classes of unnoised image.
|
|
|
|
|
|
|
|
|
|
Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised
|
|
|
|
|
image do not contain a prediction for the masked pixel as the unnoised image cannot be masked.
|
|
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
|
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
|
|
|
|
|
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
|
|
|
|
|
in_channels (`int`, *optional*):
|
|
|
|
|
Pass if the input is continuous. The number of channels in the input and output.
|
|
|
|
|
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
|
|
|
|
|
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
|
|
|
|
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
|
|
|
|
|
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
|
|
|
|
|
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
|
|
|
|
|
`ImagePositionalEmbeddings`.
|
|
|
|
|
num_vector_embeds (`int`, *optional*):
|
|
|
|
|
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
|
|
|
|
|
Includes the class for the masked latent pixel.
|
|
|
|
|
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
|
|
|
|
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
|
|
|
|
|
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
|
|
|
|
|
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
|
|
|
|
|
up to but not more than steps than `num_embeds_ada_norm`.
|
|
|
|
|
attention_bias (`bool`, *optional*):
|
|
|
|
|
Configure if the TransformerBlocks' attention should contain a bias parameter.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@register_to_config
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
num_attention_heads: int = 16,
|
|
|
|
|
attention_head_dim: int = 88,
|
|
|
|
|
in_channels: Optional[int] = None,
|
|
|
|
|
num_layers: int = 1,
|
|
|
|
|
dropout: float = 0.0,
|
|
|
|
|
norm_num_groups: int = 32,
|
|
|
|
|
cross_attention_dim: Optional[int] = None,
|
|
|
|
|
attention_bias: bool = False,
|
|
|
|
|
sample_size: Optional[int] = None,
|
|
|
|
|
num_vector_embeds: Optional[int] = None,
|
|
|
|
|
activation_fn: str = "geglu",
|
|
|
|
|
num_embeds_ada_norm: Optional[int] = None,
|
|
|
|
|
use_linear_projection: bool = False,
|
|
|
|
|
only_cross_attention: bool = False,
|
|
|
|
|
upcast_attention: bool = False,
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.use_linear_projection = use_linear_projection
|
|
|
|
|
self.num_attention_heads = num_attention_heads
|
|
|
|
|
self.attention_head_dim = attention_head_dim
|
|
|
|
|
inner_dim = num_attention_heads * attention_head_dim
|
|
|
|
|
|
|
|
|
|
# 1. Transformer2DModel can process both standard continous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
|
|
|
|
|
# Define whether input is continuous or discrete depending on configuration
|
|
|
|
|
self.is_input_continuous = in_channels is not None
|
|
|
|
|
self.is_input_vectorized = num_vector_embeds is not None
|
|
|
|
|
|
|
|
|
|
if self.is_input_continuous and self.is_input_vectorized:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
|
|
|
|
|
" sure that either `in_channels` or `num_vector_embeds` is None."
|
|
|
|
|
)
|
|
|
|
|
elif not self.is_input_continuous and not self.is_input_vectorized:
|
|
|
|
|
raise ValueError(
|
|
|
|
|
f"Has to define either `in_channels`: {in_channels} or `num_vector_embeds`: {num_vector_embeds}. Make"
|
|
|
|
|
" sure that either `in_channels` or `num_vector_embeds` is not None."
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 2. Define input layers
|
|
|
|
|
if self.is_input_continuous:
|
|
|
|
|
self.in_channels = in_channels
|
|
|
|
|
|
|
|
|
|
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
|
|
|
|
if use_linear_projection:
|
|
|
|
|
self.proj_in = nn.Linear(in_channels, inner_dim)
|
|
|
|
|
else:
|
|
|
|
|
self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
|
|
|
|
|
elif self.is_input_vectorized:
|
|
|
|
|
assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
|
|
|
|
|
assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed"
|
|
|
|
|
|
|
|
|
|
self.height = sample_size
|
|
|
|
|
self.width = sample_size
|
|
|
|
|
self.num_vector_embeds = num_vector_embeds
|
|
|
|
|
self.num_latent_pixels = self.height * self.width
|
|
|
|
|
|
|
|
|
|
self.latent_image_embedding = ImagePositionalEmbeddings(
|
|
|
|
|
num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 3. Define transformers blocks
|
|
|
|
|
self.transformer_blocks = nn.ModuleList(
|
|
|
|
|
[
|
|
|
|
|
BasicTransformerBlock(
|
|
|
|
|
inner_dim,
|
|
|
|
|
num_attention_heads,
|
|
|
|
|
attention_head_dim,
|
|
|
|
|
dropout=dropout,
|
|
|
|
|
cross_attention_dim=cross_attention_dim,
|
|
|
|
|
activation_fn=activation_fn,
|
|
|
|
|
num_embeds_ada_norm=num_embeds_ada_norm,
|
|
|
|
|
attention_bias=attention_bias,
|
|
|
|
|
only_cross_attention=only_cross_attention,
|
|
|
|
|
upcast_attention=upcast_attention,
|
|
|
|
|
)
|
|
|
|
|
for d in range(num_layers)
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 4. Define output layers
|
|
|
|
|
if self.is_input_continuous:
|
|
|
|
|
if use_linear_projection:
|
|
|
|
|
self.proj_out = nn.Linear(in_channels, inner_dim)
|
|
|
|
|
else:
|
|
|
|
|
self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
|
|
|
|
|
elif self.is_input_vectorized:
|
|
|
|
|
self.norm_out = nn.LayerNorm(inner_dim)
|
|
|
|
|
self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)
|
|
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
|
self,
|
|
|
|
|
hidden_states,
|
|
|
|
|
encoder_hidden_states=None,
|
|
|
|
|
timestep=None,
|
|
|
|
|
cross_attention_kwargs=None,
|
|
|
|
|
return_dict: bool = True,
|
|
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Args:
|
|
|
|
|
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
|
|
|
|
|
When continous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
|
|
|
|
|
hidden_states
|
|
|
|
|
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
|
|
|
|
|
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
|
|
|
|
|
self-attention.
|
|
|
|
|
timestep ( `torch.long`, *optional*):
|
|
|
|
|
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
|
|
|
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
|
|
|
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
[`~models.attention.Transformer2DModelOutput`] or `tuple`: [`~models.attention.Transformer2DModelOutput`]
|
|
|
|
|
if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample
|
|
|
|
|
tensor.
|
|
|
|
|
"""
|
|
|
|
|
# 1. Input
|
|
|
|
|
if self.is_input_continuous:
|
|
|
|
|
batch, channel, height, width = hidden_states.shape
|
|
|
|
|
residual = hidden_states
|
|
|
|
|
|
|
|
|
|
hidden_states = self.norm(hidden_states)
|
|
|
|
|
if not self.use_linear_projection:
|
|
|
|
|
hidden_states = self.proj_in(hidden_states)
|
|
|
|
|
inner_dim = hidden_states.shape[1]
|
|
|
|
|
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
|
|
|
|
|
else:
|
|
|
|
|
inner_dim = hidden_states.shape[1]
|
|
|
|
|
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
|
|
|
|
|
hidden_states = self.proj_in(hidden_states)
|
|
|
|
|
elif self.is_input_vectorized:
|
|
|
|
|
hidden_states = self.latent_image_embedding(hidden_states)
|
|
|
|
|
|
|
|
|
|
# 2. Blocks
|
|
|
|
|
for block in self.transformer_blocks:
|
|
|
|
|
hidden_states = block(
|
|
|
|
|
hidden_states,
|
|
|
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
|
|
|
timestep=timestep,
|
|
|
|
|
cross_attention_kwargs=cross_attention_kwargs,
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# 3. Output
|
|
|
|
|
if self.is_input_continuous:
|
|
|
|
|
if not self.use_linear_projection:
|
|
|
|
|
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
|
|
|
|
|
hidden_states = self.proj_out(hidden_states)
|
|
|
|
|
else:
|
|
|
|
|
hidden_states = self.proj_out(hidden_states)
|
|
|
|
|
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
|
|
|
|
|
|
|
|
|
|
output = hidden_states + residual
|
|
|
|
|
elif self.is_input_vectorized:
|
|
|
|
|
hidden_states = self.norm_out(hidden_states)
|
|
|
|
|
logits = self.out(hidden_states)
|
|
|
|
|
# (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
|
|
|
|
|
logits = logits.permute(0, 2, 1)
|
|
|
|
|
|
|
|
|
|
# log(p(x_0))
|
|
|
|
|
output = F.log_softmax(logits.double(), dim=1).float()
|
|
|
|
|
|
|
|
|
|
if not return_dict:
|
|
|
|
|
return (output,)
|
|
|
|
|
|
|
|
|
|
return Transformer2DModelOutput(sample=output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class AttentionBlock(nn.Module):
|
|
|
|
|
"""
|
|
|
|
|
An attention block that allows spatial positions to attend to each other. Originally ported from here, but adapted
|
|
|
|
@ -624,136 +400,3 @@ class AdaLayerNorm(nn.Module):
|
|
|
|
|
scale, shift = torch.chunk(emb, 2)
|
|
|
|
|
x = self.norm(x) * (1 + scale) + shift
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class DualTransformer2DModel(nn.Module):
|
|
|
|
|
"""
|
|
|
|
|
Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.
|
|
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
|
num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
|
|
|
|
|
attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
|
|
|
|
|
in_channels (`int`, *optional*):
|
|
|
|
|
Pass if the input is continuous. The number of channels in the input and output.
|
|
|
|
|
num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
|
|
|
|
|
dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.
|
|
|
|
|
cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
|
|
|
|
|
sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
|
|
|
|
|
Note that this is fixed at training time as it is used for learning a number of position embeddings. See
|
|
|
|
|
`ImagePositionalEmbeddings`.
|
|
|
|
|
num_vector_embeds (`int`, *optional*):
|
|
|
|
|
Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
|
|
|
|
|
Includes the class for the masked latent pixel.
|
|
|
|
|
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
|
|
|
|
num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
|
|
|
|
|
The number of diffusion steps used during training. Note that this is fixed at training time as it is used
|
|
|
|
|
to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
|
|
|
|
|
up to but not more than steps than `num_embeds_ada_norm`.
|
|
|
|
|
attention_bias (`bool`, *optional*):
|
|
|
|
|
Configure if the TransformerBlocks' attention should contain a bias parameter.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
def __init__(
|
|
|
|
|
self,
|
|
|
|
|
num_attention_heads: int = 16,
|
|
|
|
|
attention_head_dim: int = 88,
|
|
|
|
|
in_channels: Optional[int] = None,
|
|
|
|
|
num_layers: int = 1,
|
|
|
|
|
dropout: float = 0.0,
|
|
|
|
|
norm_num_groups: int = 32,
|
|
|
|
|
cross_attention_dim: Optional[int] = None,
|
|
|
|
|
attention_bias: bool = False,
|
|
|
|
|
sample_size: Optional[int] = None,
|
|
|
|
|
num_vector_embeds: Optional[int] = None,
|
|
|
|
|
activation_fn: str = "geglu",
|
|
|
|
|
num_embeds_ada_norm: Optional[int] = None,
|
|
|
|
|
):
|
|
|
|
|
super().__init__()
|
|
|
|
|
self.transformers = nn.ModuleList(
|
|
|
|
|
[
|
|
|
|
|
Transformer2DModel(
|
|
|
|
|
num_attention_heads=num_attention_heads,
|
|
|
|
|
attention_head_dim=attention_head_dim,
|
|
|
|
|
in_channels=in_channels,
|
|
|
|
|
num_layers=num_layers,
|
|
|
|
|
dropout=dropout,
|
|
|
|
|
norm_num_groups=norm_num_groups,
|
|
|
|
|
cross_attention_dim=cross_attention_dim,
|
|
|
|
|
attention_bias=attention_bias,
|
|
|
|
|
sample_size=sample_size,
|
|
|
|
|
num_vector_embeds=num_vector_embeds,
|
|
|
|
|
activation_fn=activation_fn,
|
|
|
|
|
num_embeds_ada_norm=num_embeds_ada_norm,
|
|
|
|
|
)
|
|
|
|
|
for _ in range(2)
|
|
|
|
|
]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Variables that can be set by a pipeline:
|
|
|
|
|
|
|
|
|
|
# The ratio of transformer1 to transformer2's output states to be combined during inference
|
|
|
|
|
self.mix_ratio = 0.5
|
|
|
|
|
|
|
|
|
|
# The shape of `encoder_hidden_states` is expected to be
|
|
|
|
|
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
|
|
|
|
|
self.condition_lengths = [77, 257]
|
|
|
|
|
|
|
|
|
|
# Which transformer to use to encode which condition.
|
|
|
|
|
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
|
|
|
|
|
self.transformer_index_for_condition = [1, 0]
|
|
|
|
|
|
|
|
|
|
def forward(
|
|
|
|
|
self,
|
|
|
|
|
hidden_states,
|
|
|
|
|
encoder_hidden_states,
|
|
|
|
|
timestep=None,
|
|
|
|
|
attention_mask=None,
|
|
|
|
|
cross_attention_kwargs=None,
|
|
|
|
|
return_dict: bool = True,
|
|
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Args:
|
|
|
|
|
hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
|
|
|
|
|
When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
|
|
|
|
|
hidden_states
|
|
|
|
|
encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
|
|
|
|
|
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
|
|
|
|
|
self-attention.
|
|
|
|
|
timestep ( `torch.long`, *optional*):
|
|
|
|
|
Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
|
|
|
|
|
attention_mask (`torch.FloatTensor`, *optional*):
|
|
|
|
|
Optional attention mask to be applied in CrossAttention
|
|
|
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
|
|
|
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
[`~models.attention.Transformer2DModelOutput`] or `tuple`: [`~models.attention.Transformer2DModelOutput`]
|
|
|
|
|
if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample
|
|
|
|
|
tensor.
|
|
|
|
|
"""
|
|
|
|
|
input_states = hidden_states
|
|
|
|
|
|
|
|
|
|
encoded_states = []
|
|
|
|
|
tokens_start = 0
|
|
|
|
|
# attention_mask is not used yet
|
|
|
|
|
for i in range(2):
|
|
|
|
|
# for each of the two transformers, pass the corresponding condition tokens
|
|
|
|
|
condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
|
|
|
|
|
transformer_index = self.transformer_index_for_condition[i]
|
|
|
|
|
encoded_state = self.transformers[transformer_index](
|
|
|
|
|
input_states,
|
|
|
|
|
encoder_hidden_states=condition_state,
|
|
|
|
|
timestep=timestep,
|
|
|
|
|
cross_attention_kwargs=cross_attention_kwargs,
|
|
|
|
|
return_dict=False,
|
|
|
|
|
)[0]
|
|
|
|
|
encoded_states.append(encoded_state - input_states)
|
|
|
|
|
tokens_start += self.condition_lengths[i]
|
|
|
|
|
|
|
|
|
|
output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
|
|
|
|
|
output_states = output_states + input_states
|
|
|
|
|
|
|
|
|
|
if not return_dict:
|
|
|
|
|
return (output_states,)
|
|
|
|
|
|
|
|
|
|
return Transformer2DModelOutput(sample=output_states)
|
|
|
|
|