1069 lines
53 KiB
Python
1069 lines
53 KiB
Python
import inspect
|
|
import re
|
|
from typing import Callable, List, Optional, Union
|
|
|
|
import numpy as np
|
|
import torch
|
|
|
|
import PIL
|
|
from diffusers.configuration_utils import FrozenDict
|
|
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
|
from diffusers.pipeline_utils import DiffusionPipeline
|
|
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
|
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
|
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
|
from diffusers.utils import deprecate, logging
|
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
|
|
|
|
|
|
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
|
|
|
re_attention = re.compile(
|
|
r"""
|
|
\\\(|
|
|
\\\)|
|
|
\\\[|
|
|
\\]|
|
|
\\\\|
|
|
\\|
|
|
\(|
|
|
\[|
|
|
:([+-]?[.\d]+)\)|
|
|
\)|
|
|
]|
|
|
[^\\()\[\]:]+|
|
|
:
|
|
""",
|
|
re.X,
|
|
)
|
|
|
|
|
|
def parse_prompt_attention(text):
|
|
"""
|
|
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
|
Accepted tokens are:
|
|
(abc) - increases attention to abc by a multiplier of 1.1
|
|
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
|
[abc] - decreases attention to abc by a multiplier of 1.1
|
|
\( - literal character '('
|
|
\[ - literal character '['
|
|
\) - literal character ')'
|
|
\] - literal character ']'
|
|
\\ - literal character '\'
|
|
anything else - just text
|
|
>>> parse_prompt_attention('normal text')
|
|
[['normal text', 1.0]]
|
|
>>> parse_prompt_attention('an (important) word')
|
|
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
|
>>> parse_prompt_attention('(unbalanced')
|
|
[['unbalanced', 1.1]]
|
|
>>> parse_prompt_attention('\(literal\]')
|
|
[['(literal]', 1.0]]
|
|
>>> parse_prompt_attention('(unnecessary)(parens)')
|
|
[['unnecessaryparens', 1.1]]
|
|
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
|
[['a ', 1.0],
|
|
['house', 1.5730000000000004],
|
|
[' ', 1.1],
|
|
['on', 1.0],
|
|
[' a ', 1.1],
|
|
['hill', 0.55],
|
|
[', sun, ', 1.1],
|
|
['sky', 1.4641000000000006],
|
|
['.', 1.1]]
|
|
"""
|
|
|
|
res = []
|
|
round_brackets = []
|
|
square_brackets = []
|
|
|
|
round_bracket_multiplier = 1.1
|
|
square_bracket_multiplier = 1 / 1.1
|
|
|
|
def multiply_range(start_position, multiplier):
|
|
for p in range(start_position, len(res)):
|
|
res[p][1] *= multiplier
|
|
|
|
for m in re_attention.finditer(text):
|
|
text = m.group(0)
|
|
weight = m.group(1)
|
|
|
|
if text.startswith("\\"):
|
|
res.append([text[1:], 1.0])
|
|
elif text == "(":
|
|
round_brackets.append(len(res))
|
|
elif text == "[":
|
|
square_brackets.append(len(res))
|
|
elif weight is not None and len(round_brackets) > 0:
|
|
multiply_range(round_brackets.pop(), float(weight))
|
|
elif text == ")" and len(round_brackets) > 0:
|
|
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
|
elif text == "]" and len(square_brackets) > 0:
|
|
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
|
else:
|
|
res.append([text, 1.0])
|
|
|
|
for pos in round_brackets:
|
|
multiply_range(pos, round_bracket_multiplier)
|
|
|
|
for pos in square_brackets:
|
|
multiply_range(pos, square_bracket_multiplier)
|
|
|
|
if len(res) == 0:
|
|
res = [["", 1.0]]
|
|
|
|
# merge runs of identical weights
|
|
i = 0
|
|
while i + 1 < len(res):
|
|
if res[i][1] == res[i + 1][1]:
|
|
res[i][0] += res[i + 1][0]
|
|
res.pop(i + 1)
|
|
else:
|
|
i += 1
|
|
|
|
return res
|
|
|
|
|
|
def get_prompts_with_weights(pipe: DiffusionPipeline, prompt: List[str], max_length: int):
|
|
r"""
|
|
Tokenize a list of prompts and return its tokens with weights of each token.
|
|
|
|
No padding, starting or ending token is included.
|
|
"""
|
|
tokens = []
|
|
weights = []
|
|
truncated = False
|
|
for text in prompt:
|
|
texts_and_weights = parse_prompt_attention(text)
|
|
text_token = []
|
|
text_weight = []
|
|
for word, weight in texts_and_weights:
|
|
# tokenize and discard the starting and the ending token
|
|
token = pipe.tokenizer(word).input_ids[1:-1]
|
|
text_token += token
|
|
# copy the weight by length of token
|
|
text_weight += [weight] * len(token)
|
|
# stop if the text is too long (longer than truncation limit)
|
|
if len(text_token) > max_length:
|
|
truncated = True
|
|
break
|
|
# truncate
|
|
if len(text_token) > max_length:
|
|
truncated = True
|
|
text_token = text_token[:max_length]
|
|
text_weight = text_weight[:max_length]
|
|
tokens.append(text_token)
|
|
weights.append(text_weight)
|
|
if truncated:
|
|
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
|
return tokens, weights
|
|
|
|
|
|
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, no_boseos_middle=True, chunk_length=77):
|
|
r"""
|
|
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
|
"""
|
|
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
|
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
|
for i in range(len(tokens)):
|
|
tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i]))
|
|
if no_boseos_middle:
|
|
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
|
else:
|
|
w = []
|
|
if len(weights[i]) == 0:
|
|
w = [1.0] * weights_length
|
|
else:
|
|
for j in range(max_embeddings_multiples):
|
|
w.append(1.0) # weight for starting token in this chunk
|
|
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
|
w.append(1.0) # weight for ending token in this chunk
|
|
w += [1.0] * (weights_length - len(w))
|
|
weights[i] = w[:]
|
|
|
|
return tokens, weights
|
|
|
|
|
|
def get_unweighted_text_embeddings(
|
|
pipe: DiffusionPipeline,
|
|
text_input: torch.Tensor,
|
|
chunk_length: int,
|
|
no_boseos_middle: Optional[bool] = True,
|
|
):
|
|
"""
|
|
When the length of tokens is a multiple of the capacity of the text encoder,
|
|
it should be split into chunks and sent to the text encoder individually.
|
|
"""
|
|
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
|
if max_embeddings_multiples > 1:
|
|
text_embeddings = []
|
|
for i in range(max_embeddings_multiples):
|
|
# extract the i-th chunk
|
|
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].clone()
|
|
|
|
# cover the head and the tail by the starting and the ending tokens
|
|
text_input_chunk[:, 0] = text_input[0, 0]
|
|
text_input_chunk[:, -1] = text_input[0, -1]
|
|
text_embedding = pipe.text_encoder(text_input_chunk)[0]
|
|
|
|
if no_boseos_middle:
|
|
if i == 0:
|
|
# discard the ending token
|
|
text_embedding = text_embedding[:, :-1]
|
|
elif i == max_embeddings_multiples - 1:
|
|
# discard the starting token
|
|
text_embedding = text_embedding[:, 1:]
|
|
else:
|
|
# discard both starting and ending tokens
|
|
text_embedding = text_embedding[:, 1:-1]
|
|
|
|
text_embeddings.append(text_embedding)
|
|
text_embeddings = torch.concat(text_embeddings, axis=1)
|
|
else:
|
|
text_embeddings = pipe.text_encoder(text_input)[0]
|
|
return text_embeddings
|
|
|
|
|
|
def get_weighted_text_embeddings(
|
|
pipe: DiffusionPipeline,
|
|
prompt: Union[str, List[str]],
|
|
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
|
max_embeddings_multiples: Optional[int] = 1,
|
|
no_boseos_middle: Optional[bool] = False,
|
|
skip_parsing: Optional[bool] = False,
|
|
skip_weighting: Optional[bool] = False,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Prompts can be assigned with local weights using brackets. For example,
|
|
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
|
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
|
|
|
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
|
|
|
Args:
|
|
pipe (`DiffusionPipeline`):
|
|
Pipe to provide access to the tokenizer and the text encoder.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
uncond_prompt (`str` or `List[str]`):
|
|
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
|
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
|
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
|
ending token in each of the chunk in the middle.
|
|
skip_parsing (`bool`, *optional*, defaults to `False`):
|
|
Skip the parsing of brackets.
|
|
skip_weighting (`bool`, *optional*, defaults to `False`):
|
|
Skip the weighting. When the parsing is skipped, it is forced True.
|
|
"""
|
|
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
|
if isinstance(prompt, str):
|
|
prompt = [prompt]
|
|
|
|
if not skip_parsing:
|
|
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
|
if uncond_prompt is not None:
|
|
if isinstance(uncond_prompt, str):
|
|
uncond_prompt = [uncond_prompt]
|
|
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
|
else:
|
|
prompt_tokens = [
|
|
token[1:-1] for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True).input_ids
|
|
]
|
|
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
|
if uncond_prompt is not None:
|
|
if isinstance(uncond_prompt, str):
|
|
uncond_prompt = [uncond_prompt]
|
|
uncond_tokens = [
|
|
token[1:-1]
|
|
for token in pipe.tokenizer(uncond_prompt, max_length=max_length, truncation=True).input_ids
|
|
]
|
|
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
|
|
|
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
|
max_length = max([len(token) for token in prompt_tokens])
|
|
if uncond_prompt is not None:
|
|
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
|
|
|
max_embeddings_multiples = min(
|
|
max_embeddings_multiples,
|
|
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
|
)
|
|
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
|
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
|
|
|
# pad the length of tokens and weights
|
|
bos = pipe.tokenizer.bos_token_id
|
|
eos = pipe.tokenizer.eos_token_id
|
|
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
|
prompt_tokens,
|
|
prompt_weights,
|
|
max_length,
|
|
bos,
|
|
eos,
|
|
no_boseos_middle=no_boseos_middle,
|
|
chunk_length=pipe.tokenizer.model_max_length,
|
|
)
|
|
prompt_tokens = torch.tensor(prompt_tokens, dtype=torch.long, device=pipe.device)
|
|
if uncond_prompt is not None:
|
|
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
|
uncond_tokens,
|
|
uncond_weights,
|
|
max_length,
|
|
bos,
|
|
eos,
|
|
no_boseos_middle=no_boseos_middle,
|
|
chunk_length=pipe.tokenizer.model_max_length,
|
|
)
|
|
uncond_tokens = torch.tensor(uncond_tokens, dtype=torch.long, device=pipe.device)
|
|
|
|
# get the embeddings
|
|
text_embeddings = get_unweighted_text_embeddings(
|
|
pipe,
|
|
prompt_tokens,
|
|
pipe.tokenizer.model_max_length,
|
|
no_boseos_middle=no_boseos_middle,
|
|
)
|
|
prompt_weights = torch.tensor(prompt_weights, dtype=text_embeddings.dtype, device=pipe.device)
|
|
if uncond_prompt is not None:
|
|
uncond_embeddings = get_unweighted_text_embeddings(
|
|
pipe,
|
|
uncond_tokens,
|
|
pipe.tokenizer.model_max_length,
|
|
no_boseos_middle=no_boseos_middle,
|
|
)
|
|
uncond_weights = torch.tensor(uncond_weights, dtype=uncond_embeddings.dtype, device=pipe.device)
|
|
|
|
# assign weights to the prompts and normalize in the sense of mean
|
|
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
|
if (not skip_parsing) and (not skip_weighting):
|
|
previous_mean = text_embeddings.mean(axis=[-2, -1])
|
|
text_embeddings *= prompt_weights.unsqueeze(-1)
|
|
text_embeddings *= (previous_mean / text_embeddings.mean(axis=[-2, -1])).unsqueeze(-1).unsqueeze(-1)
|
|
if uncond_prompt is not None:
|
|
previous_mean = uncond_embeddings.mean(axis=[-2, -1])
|
|
uncond_embeddings *= uncond_weights.unsqueeze(-1)
|
|
uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=[-2, -1])).unsqueeze(-1).unsqueeze(-1)
|
|
|
|
if uncond_prompt is not None:
|
|
return text_embeddings, uncond_embeddings
|
|
return text_embeddings, None
|
|
|
|
|
|
def preprocess_image(image):
|
|
w, h = image.size
|
|
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
|
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
|
image = np.array(image).astype(np.float32) / 255.0
|
|
image = image[None].transpose(0, 3, 1, 2)
|
|
image = torch.from_numpy(image)
|
|
return 2.0 * image - 1.0
|
|
|
|
|
|
def preprocess_mask(mask):
|
|
mask = mask.convert("L")
|
|
w, h = mask.size
|
|
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
|
mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST)
|
|
mask = np.array(mask).astype(np.float32) / 255.0
|
|
mask = np.tile(mask, (4, 1, 1))
|
|
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
|
mask = 1 - mask # repaint white, keep black
|
|
mask = torch.from_numpy(mask)
|
|
return mask
|
|
|
|
|
|
class StableDiffusionLongPromptWeightingPipeline(DiffusionPipeline):
|
|
r"""
|
|
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
|
weighting in prompt.
|
|
|
|
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
|
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
|
|
|
Args:
|
|
vae ([`AutoencoderKL`]):
|
|
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
|
text_encoder ([`CLIPTextModel`]):
|
|
Frozen text-encoder. Stable Diffusion uses the text portion of
|
|
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
|
|
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
|
|
tokenizer (`CLIPTokenizer`):
|
|
Tokenizer of class
|
|
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
|
|
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
|
|
scheduler ([`SchedulerMixin`]):
|
|
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
|
|
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
|
safety_checker ([`StableDiffusionSafetyChecker`]):
|
|
Classification module that estimates whether generated images could be considered offensive or harmful.
|
|
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details.
|
|
feature_extractor ([`CLIPFeatureExtractor`]):
|
|
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
vae: AutoencoderKL,
|
|
text_encoder: CLIPTextModel,
|
|
tokenizer: CLIPTokenizer,
|
|
unet: UNet2DConditionModel,
|
|
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
|
|
safety_checker: StableDiffusionSafetyChecker,
|
|
feature_extractor: CLIPFeatureExtractor,
|
|
):
|
|
super().__init__()
|
|
|
|
if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
|
|
deprecation_message = (
|
|
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
|
|
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
|
|
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
|
|
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
|
|
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
|
|
" file"
|
|
)
|
|
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
|
|
new_config = dict(scheduler.config)
|
|
new_config["steps_offset"] = 1
|
|
scheduler._internal_dict = FrozenDict(new_config)
|
|
|
|
if safety_checker is None:
|
|
logger.warn(
|
|
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
|
|
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
|
|
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
|
|
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
|
|
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
|
|
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
|
|
)
|
|
|
|
self.register_modules(
|
|
vae=vae,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
unet=unet,
|
|
scheduler=scheduler,
|
|
safety_checker=safety_checker,
|
|
feature_extractor=feature_extractor,
|
|
)
|
|
|
|
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
|
|
r"""
|
|
Enable sliced attention computation.
|
|
|
|
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
|
|
in several steps. This is useful to save some memory in exchange for a small speed decrease.
|
|
|
|
Args:
|
|
slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
|
|
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
|
|
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
|
|
`attention_head_dim` must be a multiple of `slice_size`.
|
|
"""
|
|
if slice_size == "auto":
|
|
# half the attention head size is usually a good trade-off between
|
|
# speed and memory
|
|
slice_size = self.unet.config.attention_head_dim // 2
|
|
self.unet.set_attention_slice(slice_size)
|
|
|
|
def disable_attention_slicing(self):
|
|
r"""
|
|
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
|
|
back to computing attention in one step.
|
|
"""
|
|
# set slice_size = `None` to disable `attention slicing`
|
|
self.enable_attention_slicing(None)
|
|
|
|
@torch.no_grad()
|
|
def __call__(
|
|
self,
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
init_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
|
mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
|
|
height: int = 512,
|
|
width: int = 512,
|
|
num_inference_steps: int = 50,
|
|
guidance_scale: float = 7.5,
|
|
strength: float = 0.8,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: float = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
latents: Optional[torch.FloatTensor] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function invoked when calling the pipeline for generation.
|
|
|
|
Args:
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
init_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
|
process.
|
|
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, to mask `init_image`. White pixels in the mask will be
|
|
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
|
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
|
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
|
height (`int`, *optional*, defaults to 512):
|
|
The height in pixels of the generated image.
|
|
width (`int`, *optional*, defaults to 512):
|
|
The width in pixels of the generated image.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to transform the reference `init_image`. Must be between 0 and 1.
|
|
`init_image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
|
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
|
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
|
`num_inference_steps`. A value of 1, therefore, essentially ignores `init_image`.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
latents (`torch.FloatTensor`, *optional*):
|
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
|
tensor will ge generated by sampling using the supplied random `generator`.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
|
|
if isinstance(prompt, str):
|
|
batch_size = 1
|
|
prompt = [prompt]
|
|
elif isinstance(prompt, list):
|
|
batch_size = len(prompt)
|
|
else:
|
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
|
|
|
if strength < 0 or strength > 1:
|
|
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
|
|
|
if height % 8 != 0 or width % 8 != 0:
|
|
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
|
|
|
if (callback_steps is None) or (
|
|
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
|
):
|
|
raise ValueError(
|
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
|
f" {type(callback_steps)}."
|
|
)
|
|
|
|
# get prompt text embeddings
|
|
|
|
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
|
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
|
# corresponds to doing no classifier free guidance.
|
|
do_classifier_free_guidance = guidance_scale > 1.0
|
|
# get unconditional embeddings for classifier free guidance
|
|
if negative_prompt is None:
|
|
negative_prompt = [""] * batch_size
|
|
elif isinstance(negative_prompt, str):
|
|
negative_prompt = [negative_prompt] * batch_size
|
|
if batch_size != len(negative_prompt):
|
|
raise ValueError(
|
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
|
" the batch size of `prompt`."
|
|
)
|
|
|
|
text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
|
|
pipe=self,
|
|
prompt=prompt,
|
|
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
**kwargs,
|
|
)
|
|
bs_embed, seq_len, _ = text_embeddings.shape
|
|
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
|
|
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
|
|
|
if do_classifier_free_guidance:
|
|
bs_embed, seq_len, _ = uncond_embeddings.shape
|
|
uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
|
|
uncond_embeddings = uncond_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
|
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
|
|
|
|
# set timesteps
|
|
self.scheduler.set_timesteps(num_inference_steps)
|
|
|
|
latents_dtype = text_embeddings.dtype
|
|
init_latents_orig = None
|
|
mask = None
|
|
noise = None
|
|
|
|
if init_image is None:
|
|
# get the initial random noise unless the user supplied it
|
|
|
|
# Unlike in other pipelines, latents need to be generated in the target device
|
|
# for 1-to-1 results reproducibility with the CompVis implementation.
|
|
# However this currently doesn't work in `mps`.
|
|
latents_shape = (
|
|
batch_size * num_images_per_prompt,
|
|
self.unet.in_channels,
|
|
height // 8,
|
|
width // 8,
|
|
)
|
|
|
|
if latents is None:
|
|
if self.device.type == "mps":
|
|
# randn does not exist on mps
|
|
latents = torch.randn(
|
|
latents_shape,
|
|
generator=generator,
|
|
device="cpu",
|
|
dtype=latents_dtype,
|
|
).to(self.device)
|
|
else:
|
|
latents = torch.randn(
|
|
latents_shape,
|
|
generator=generator,
|
|
device=self.device,
|
|
dtype=latents_dtype,
|
|
)
|
|
else:
|
|
if latents.shape != latents_shape:
|
|
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
|
|
latents = latents.to(self.device)
|
|
|
|
timesteps = self.scheduler.timesteps.to(self.device)
|
|
|
|
# scale the initial noise by the standard deviation required by the scheduler
|
|
latents = latents * self.scheduler.init_noise_sigma
|
|
else:
|
|
if isinstance(init_image, PIL.Image.Image):
|
|
init_image = preprocess_image(init_image)
|
|
# encode the init image into latents and scale the latents
|
|
init_image = init_image.to(device=self.device, dtype=latents_dtype)
|
|
init_latent_dist = self.vae.encode(init_image).latent_dist
|
|
init_latents = init_latent_dist.sample(generator=generator)
|
|
init_latents = 0.18215 * init_latents
|
|
init_latents = torch.cat([init_latents] * batch_size * num_images_per_prompt, dim=0)
|
|
init_latents_orig = init_latents
|
|
|
|
# preprocess mask
|
|
if mask_image is not None:
|
|
if isinstance(mask_image, PIL.Image.Image):
|
|
mask_image = preprocess_mask(mask_image)
|
|
mask_image = mask_image.to(device=self.device, dtype=latents_dtype)
|
|
mask = torch.cat([mask_image] * batch_size * num_images_per_prompt)
|
|
|
|
# check sizes
|
|
if not mask.shape == init_latents.shape:
|
|
raise ValueError("The mask and init_image should be the same size!")
|
|
|
|
# get the original timestep using init_timestep
|
|
offset = self.scheduler.config.get("steps_offset", 0)
|
|
init_timestep = int(num_inference_steps * strength) + offset
|
|
init_timestep = min(init_timestep, num_inference_steps)
|
|
|
|
timesteps = self.scheduler.timesteps[-init_timestep]
|
|
timesteps = torch.tensor([timesteps] * batch_size * num_images_per_prompt, device=self.device)
|
|
|
|
# add noise to latents using the timesteps
|
|
if self.device.type == "mps":
|
|
# randn does not exist on mps
|
|
noise = torch.randn(
|
|
init_latents.shape,
|
|
generator=generator,
|
|
device="cpu",
|
|
dtype=latents_dtype,
|
|
).to(self.device)
|
|
else:
|
|
noise = torch.randn(
|
|
init_latents.shape,
|
|
generator=generator,
|
|
device=self.device,
|
|
dtype=latents_dtype,
|
|
)
|
|
latents = self.scheduler.add_noise(init_latents, noise, timesteps)
|
|
|
|
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
|
timesteps = self.scheduler.timesteps[t_start:].to(self.device)
|
|
|
|
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
|
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
|
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
|
# and should be between [0, 1]
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
|
extra_step_kwargs = {}
|
|
if accepts_eta:
|
|
extra_step_kwargs["eta"] = eta
|
|
|
|
for i, t in enumerate(self.progress_bar(timesteps)):
|
|
# expand the latents if we are doing classifier free guidance
|
|
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
|
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
|
|
|
# predict the noise residual
|
|
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
|
|
|
|
# perform guidance
|
|
if do_classifier_free_guidance:
|
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
|
|
|
# compute the previous noisy sample x_t -> x_t-1
|
|
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
|
|
|
|
if mask is not None:
|
|
# masking
|
|
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, torch.tensor([t]))
|
|
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
|
|
|
# call the callback, if provided
|
|
if callback is not None and i % callback_steps == 0:
|
|
callback(i, t, latents)
|
|
|
|
latents = 1 / 0.18215 * latents
|
|
image = self.vae.decode(latents).sample
|
|
|
|
image = (image / 2 + 0.5).clamp(0, 1)
|
|
|
|
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
|
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
|
|
|
|
if self.safety_checker is not None:
|
|
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
|
|
self.device
|
|
)
|
|
image, has_nsfw_concept = self.safety_checker(
|
|
images=image,
|
|
clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype),
|
|
)
|
|
else:
|
|
has_nsfw_concept = None
|
|
|
|
if output_type == "pil":
|
|
image = self.numpy_to_pil(image)
|
|
|
|
if not return_dict:
|
|
return (image, has_nsfw_concept)
|
|
|
|
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
def text2img(
|
|
self,
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
height: int = 512,
|
|
width: int = 512,
|
|
num_inference_steps: int = 50,
|
|
guidance_scale: float = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: float = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
latents: Optional[torch.FloatTensor] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for text-to-image generation.
|
|
Args:
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
height (`int`, *optional*, defaults to 512):
|
|
The height in pixels of the generated image.
|
|
width (`int`, *optional*, defaults to 512):
|
|
The width in pixels of the generated image.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
latents (`torch.FloatTensor`, *optional*):
|
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
|
tensor will ge generated by sampling using the supplied random `generator`.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
height=height,
|
|
width=width,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
latents=latents,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|
|
|
|
def img2img(
|
|
self,
|
|
init_image: Union[torch.FloatTensor, PIL.Image.Image],
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
strength: float = 0.8,
|
|
num_inference_steps: Optional[int] = 50,
|
|
guidance_scale: Optional[float] = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: Optional[float] = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for image-to-image generation.
|
|
Args:
|
|
init_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
|
process.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to transform the reference `init_image`. Must be between 0 and 1.
|
|
`init_image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
|
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
|
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
|
`num_inference_steps`. A value of 1, therefore, essentially ignores `init_image`.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference. This parameter will be modulated by `strength`.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
init_image=init_image,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
strength=strength,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|
|
|
|
def inpaint(
|
|
self,
|
|
init_image: Union[torch.FloatTensor, PIL.Image.Image],
|
|
mask_image: Union[torch.FloatTensor, PIL.Image.Image],
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
strength: float = 0.8,
|
|
num_inference_steps: Optional[int] = 50,
|
|
guidance_scale: Optional[float] = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: Optional[float] = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for inpaint.
|
|
Args:
|
|
init_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
|
process. This is the image whose masked region will be inpainted.
|
|
mask_image (`torch.FloatTensor` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, to mask `init_image`. White pixels in the mask will be
|
|
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
|
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
|
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
|
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
|
in `num_inference_steps`. `init_image` will be used as a reference for the masked area, adding more
|
|
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
|
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
init_image=init_image,
|
|
mask_image=mask_image,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
strength=strength,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|