1148 lines
54 KiB
Python
1148 lines
54 KiB
Python
import inspect
|
|
import re
|
|
from typing import Callable, List, Optional, Union
|
|
|
|
import numpy as np
|
|
import torch
|
|
|
|
import diffusers
|
|
import PIL
|
|
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
|
|
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
|
from diffusers.utils import deprecate, logging
|
|
from packaging import version
|
|
from transformers import CLIPFeatureExtractor, CLIPTokenizer
|
|
|
|
|
|
try:
|
|
from diffusers.pipelines.onnx_utils import ORT_TO_NP_TYPE
|
|
except ImportError:
|
|
ORT_TO_NP_TYPE = {
|
|
"tensor(bool)": np.bool_,
|
|
"tensor(int8)": np.int8,
|
|
"tensor(uint8)": np.uint8,
|
|
"tensor(int16)": np.int16,
|
|
"tensor(uint16)": np.uint16,
|
|
"tensor(int32)": np.int32,
|
|
"tensor(uint32)": np.uint32,
|
|
"tensor(int64)": np.int64,
|
|
"tensor(uint64)": np.uint64,
|
|
"tensor(float16)": np.float16,
|
|
"tensor(float)": np.float32,
|
|
"tensor(double)": np.float64,
|
|
}
|
|
|
|
try:
|
|
from diffusers.utils import PIL_INTERPOLATION
|
|
except ImportError:
|
|
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
|
PIL_INTERPOLATION = {
|
|
"linear": PIL.Image.Resampling.BILINEAR,
|
|
"bilinear": PIL.Image.Resampling.BILINEAR,
|
|
"bicubic": PIL.Image.Resampling.BICUBIC,
|
|
"lanczos": PIL.Image.Resampling.LANCZOS,
|
|
"nearest": PIL.Image.Resampling.NEAREST,
|
|
}
|
|
else:
|
|
PIL_INTERPOLATION = {
|
|
"linear": PIL.Image.LINEAR,
|
|
"bilinear": PIL.Image.BILINEAR,
|
|
"bicubic": PIL.Image.BICUBIC,
|
|
"lanczos": PIL.Image.LANCZOS,
|
|
"nearest": PIL.Image.NEAREST,
|
|
}
|
|
# ------------------------------------------------------------------------------
|
|
|
|
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
|
|
|
re_attention = re.compile(
|
|
r"""
|
|
\\\(|
|
|
\\\)|
|
|
\\\[|
|
|
\\]|
|
|
\\\\|
|
|
\\|
|
|
\(|
|
|
\[|
|
|
:([+-]?[.\d]+)\)|
|
|
\)|
|
|
]|
|
|
[^\\()\[\]:]+|
|
|
:
|
|
""",
|
|
re.X,
|
|
)
|
|
|
|
|
|
def parse_prompt_attention(text):
|
|
"""
|
|
Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
|
|
Accepted tokens are:
|
|
(abc) - increases attention to abc by a multiplier of 1.1
|
|
(abc:3.12) - increases attention to abc by a multiplier of 3.12
|
|
[abc] - decreases attention to abc by a multiplier of 1.1
|
|
\( - literal character '('
|
|
\[ - literal character '['
|
|
\) - literal character ')'
|
|
\] - literal character ']'
|
|
\\ - literal character '\'
|
|
anything else - just text
|
|
>>> parse_prompt_attention('normal text')
|
|
[['normal text', 1.0]]
|
|
>>> parse_prompt_attention('an (important) word')
|
|
[['an ', 1.0], ['important', 1.1], [' word', 1.0]]
|
|
>>> parse_prompt_attention('(unbalanced')
|
|
[['unbalanced', 1.1]]
|
|
>>> parse_prompt_attention('\(literal\]')
|
|
[['(literal]', 1.0]]
|
|
>>> parse_prompt_attention('(unnecessary)(parens)')
|
|
[['unnecessaryparens', 1.1]]
|
|
>>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
|
|
[['a ', 1.0],
|
|
['house', 1.5730000000000004],
|
|
[' ', 1.1],
|
|
['on', 1.0],
|
|
[' a ', 1.1],
|
|
['hill', 0.55],
|
|
[', sun, ', 1.1],
|
|
['sky', 1.4641000000000006],
|
|
['.', 1.1]]
|
|
"""
|
|
|
|
res = []
|
|
round_brackets = []
|
|
square_brackets = []
|
|
|
|
round_bracket_multiplier = 1.1
|
|
square_bracket_multiplier = 1 / 1.1
|
|
|
|
def multiply_range(start_position, multiplier):
|
|
for p in range(start_position, len(res)):
|
|
res[p][1] *= multiplier
|
|
|
|
for m in re_attention.finditer(text):
|
|
text = m.group(0)
|
|
weight = m.group(1)
|
|
|
|
if text.startswith("\\"):
|
|
res.append([text[1:], 1.0])
|
|
elif text == "(":
|
|
round_brackets.append(len(res))
|
|
elif text == "[":
|
|
square_brackets.append(len(res))
|
|
elif weight is not None and len(round_brackets) > 0:
|
|
multiply_range(round_brackets.pop(), float(weight))
|
|
elif text == ")" and len(round_brackets) > 0:
|
|
multiply_range(round_brackets.pop(), round_bracket_multiplier)
|
|
elif text == "]" and len(square_brackets) > 0:
|
|
multiply_range(square_brackets.pop(), square_bracket_multiplier)
|
|
else:
|
|
res.append([text, 1.0])
|
|
|
|
for pos in round_brackets:
|
|
multiply_range(pos, round_bracket_multiplier)
|
|
|
|
for pos in square_brackets:
|
|
multiply_range(pos, square_bracket_multiplier)
|
|
|
|
if len(res) == 0:
|
|
res = [["", 1.0]]
|
|
|
|
# merge runs of identical weights
|
|
i = 0
|
|
while i + 1 < len(res):
|
|
if res[i][1] == res[i + 1][1]:
|
|
res[i][0] += res[i + 1][0]
|
|
res.pop(i + 1)
|
|
else:
|
|
i += 1
|
|
|
|
return res
|
|
|
|
|
|
def get_prompts_with_weights(pipe, prompt: List[str], max_length: int):
|
|
r"""
|
|
Tokenize a list of prompts and return its tokens with weights of each token.
|
|
|
|
No padding, starting or ending token is included.
|
|
"""
|
|
tokens = []
|
|
weights = []
|
|
truncated = False
|
|
for text in prompt:
|
|
texts_and_weights = parse_prompt_attention(text)
|
|
text_token = []
|
|
text_weight = []
|
|
for word, weight in texts_and_weights:
|
|
# tokenize and discard the starting and the ending token
|
|
token = pipe.tokenizer(word, return_tensors="np").input_ids[0, 1:-1]
|
|
text_token += list(token)
|
|
# copy the weight by length of token
|
|
text_weight += [weight] * len(token)
|
|
# stop if the text is too long (longer than truncation limit)
|
|
if len(text_token) > max_length:
|
|
truncated = True
|
|
break
|
|
# truncate
|
|
if len(text_token) > max_length:
|
|
truncated = True
|
|
text_token = text_token[:max_length]
|
|
text_weight = text_weight[:max_length]
|
|
tokens.append(text_token)
|
|
weights.append(text_weight)
|
|
if truncated:
|
|
logger.warning("Prompt was truncated. Try to shorten the prompt or increase max_embeddings_multiples")
|
|
return tokens, weights
|
|
|
|
|
|
def pad_tokens_and_weights(tokens, weights, max_length, bos, eos, no_boseos_middle=True, chunk_length=77):
|
|
r"""
|
|
Pad the tokens (with starting and ending tokens) and weights (with 1.0) to max_length.
|
|
"""
|
|
max_embeddings_multiples = (max_length - 2) // (chunk_length - 2)
|
|
weights_length = max_length if no_boseos_middle else max_embeddings_multiples * chunk_length
|
|
for i in range(len(tokens)):
|
|
tokens[i] = [bos] + tokens[i] + [eos] * (max_length - 1 - len(tokens[i]))
|
|
if no_boseos_middle:
|
|
weights[i] = [1.0] + weights[i] + [1.0] * (max_length - 1 - len(weights[i]))
|
|
else:
|
|
w = []
|
|
if len(weights[i]) == 0:
|
|
w = [1.0] * weights_length
|
|
else:
|
|
for j in range(max_embeddings_multiples):
|
|
w.append(1.0) # weight for starting token in this chunk
|
|
w += weights[i][j * (chunk_length - 2) : min(len(weights[i]), (j + 1) * (chunk_length - 2))]
|
|
w.append(1.0) # weight for ending token in this chunk
|
|
w += [1.0] * (weights_length - len(w))
|
|
weights[i] = w[:]
|
|
|
|
return tokens, weights
|
|
|
|
|
|
def get_unweighted_text_embeddings(
|
|
pipe,
|
|
text_input: np.array,
|
|
chunk_length: int,
|
|
no_boseos_middle: Optional[bool] = True,
|
|
):
|
|
"""
|
|
When the length of tokens is a multiple of the capacity of the text encoder,
|
|
it should be split into chunks and sent to the text encoder individually.
|
|
"""
|
|
max_embeddings_multiples = (text_input.shape[1] - 2) // (chunk_length - 2)
|
|
if max_embeddings_multiples > 1:
|
|
text_embeddings = []
|
|
for i in range(max_embeddings_multiples):
|
|
# extract the i-th chunk
|
|
text_input_chunk = text_input[:, i * (chunk_length - 2) : (i + 1) * (chunk_length - 2) + 2].copy()
|
|
|
|
# cover the head and the tail by the starting and the ending tokens
|
|
text_input_chunk[:, 0] = text_input[0, 0]
|
|
text_input_chunk[:, -1] = text_input[0, -1]
|
|
|
|
text_embedding = pipe.text_encoder(input_ids=text_input_chunk)[0]
|
|
|
|
if no_boseos_middle:
|
|
if i == 0:
|
|
# discard the ending token
|
|
text_embedding = text_embedding[:, :-1]
|
|
elif i == max_embeddings_multiples - 1:
|
|
# discard the starting token
|
|
text_embedding = text_embedding[:, 1:]
|
|
else:
|
|
# discard both starting and ending tokens
|
|
text_embedding = text_embedding[:, 1:-1]
|
|
|
|
text_embeddings.append(text_embedding)
|
|
text_embeddings = np.concatenate(text_embeddings, axis=1)
|
|
else:
|
|
text_embeddings = pipe.text_encoder(input_ids=text_input)[0]
|
|
return text_embeddings
|
|
|
|
|
|
def get_weighted_text_embeddings(
|
|
pipe,
|
|
prompt: Union[str, List[str]],
|
|
uncond_prompt: Optional[Union[str, List[str]]] = None,
|
|
max_embeddings_multiples: Optional[int] = 4,
|
|
no_boseos_middle: Optional[bool] = False,
|
|
skip_parsing: Optional[bool] = False,
|
|
skip_weighting: Optional[bool] = False,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Prompts can be assigned with local weights using brackets. For example,
|
|
prompt 'A (very beautiful) masterpiece' highlights the words 'very beautiful',
|
|
and the embedding tokens corresponding to the words get multiplied by a constant, 1.1.
|
|
|
|
Also, to regularize of the embedding, the weighted embedding would be scaled to preserve the original mean.
|
|
|
|
Args:
|
|
pipe (`OnnxStableDiffusionPipeline`):
|
|
Pipe to provide access to the tokenizer and the text encoder.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
uncond_prompt (`str` or `List[str]`):
|
|
The unconditional prompt or prompts for guide the image generation. If unconditional prompt
|
|
is provided, the embeddings of prompt and uncond_prompt are concatenated.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `1`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
no_boseos_middle (`bool`, *optional*, defaults to `False`):
|
|
If the length of text token is multiples of the capacity of text encoder, whether reserve the starting and
|
|
ending token in each of the chunk in the middle.
|
|
skip_parsing (`bool`, *optional*, defaults to `False`):
|
|
Skip the parsing of brackets.
|
|
skip_weighting (`bool`, *optional*, defaults to `False`):
|
|
Skip the weighting. When the parsing is skipped, it is forced True.
|
|
"""
|
|
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
|
if isinstance(prompt, str):
|
|
prompt = [prompt]
|
|
|
|
if not skip_parsing:
|
|
prompt_tokens, prompt_weights = get_prompts_with_weights(pipe, prompt, max_length - 2)
|
|
if uncond_prompt is not None:
|
|
if isinstance(uncond_prompt, str):
|
|
uncond_prompt = [uncond_prompt]
|
|
uncond_tokens, uncond_weights = get_prompts_with_weights(pipe, uncond_prompt, max_length - 2)
|
|
else:
|
|
prompt_tokens = [
|
|
token[1:-1]
|
|
for token in pipe.tokenizer(prompt, max_length=max_length, truncation=True, return_tensors="np").input_ids
|
|
]
|
|
prompt_weights = [[1.0] * len(token) for token in prompt_tokens]
|
|
if uncond_prompt is not None:
|
|
if isinstance(uncond_prompt, str):
|
|
uncond_prompt = [uncond_prompt]
|
|
uncond_tokens = [
|
|
token[1:-1]
|
|
for token in pipe.tokenizer(
|
|
uncond_prompt,
|
|
max_length=max_length,
|
|
truncation=True,
|
|
return_tensors="np",
|
|
).input_ids
|
|
]
|
|
uncond_weights = [[1.0] * len(token) for token in uncond_tokens]
|
|
|
|
# round up the longest length of tokens to a multiple of (model_max_length - 2)
|
|
max_length = max([len(token) for token in prompt_tokens])
|
|
if uncond_prompt is not None:
|
|
max_length = max(max_length, max([len(token) for token in uncond_tokens]))
|
|
|
|
max_embeddings_multiples = min(
|
|
max_embeddings_multiples,
|
|
(max_length - 1) // (pipe.tokenizer.model_max_length - 2) + 1,
|
|
)
|
|
max_embeddings_multiples = max(1, max_embeddings_multiples)
|
|
max_length = (pipe.tokenizer.model_max_length - 2) * max_embeddings_multiples + 2
|
|
|
|
# pad the length of tokens and weights
|
|
bos = pipe.tokenizer.bos_token_id
|
|
eos = pipe.tokenizer.eos_token_id
|
|
prompt_tokens, prompt_weights = pad_tokens_and_weights(
|
|
prompt_tokens,
|
|
prompt_weights,
|
|
max_length,
|
|
bos,
|
|
eos,
|
|
no_boseos_middle=no_boseos_middle,
|
|
chunk_length=pipe.tokenizer.model_max_length,
|
|
)
|
|
prompt_tokens = np.array(prompt_tokens, dtype=np.int32)
|
|
if uncond_prompt is not None:
|
|
uncond_tokens, uncond_weights = pad_tokens_and_weights(
|
|
uncond_tokens,
|
|
uncond_weights,
|
|
max_length,
|
|
bos,
|
|
eos,
|
|
no_boseos_middle=no_boseos_middle,
|
|
chunk_length=pipe.tokenizer.model_max_length,
|
|
)
|
|
uncond_tokens = np.array(uncond_tokens, dtype=np.int32)
|
|
|
|
# get the embeddings
|
|
text_embeddings = get_unweighted_text_embeddings(
|
|
pipe,
|
|
prompt_tokens,
|
|
pipe.tokenizer.model_max_length,
|
|
no_boseos_middle=no_boseos_middle,
|
|
)
|
|
prompt_weights = np.array(prompt_weights, dtype=text_embeddings.dtype)
|
|
if uncond_prompt is not None:
|
|
uncond_embeddings = get_unweighted_text_embeddings(
|
|
pipe,
|
|
uncond_tokens,
|
|
pipe.tokenizer.model_max_length,
|
|
no_boseos_middle=no_boseos_middle,
|
|
)
|
|
uncond_weights = np.array(uncond_weights, dtype=uncond_embeddings.dtype)
|
|
|
|
# assign weights to the prompts and normalize in the sense of mean
|
|
# TODO: should we normalize by chunk or in a whole (current implementation)?
|
|
if (not skip_parsing) and (not skip_weighting):
|
|
previous_mean = text_embeddings.mean(axis=(-2, -1))
|
|
text_embeddings *= prompt_weights[:, :, None]
|
|
text_embeddings *= (previous_mean / text_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
|
if uncond_prompt is not None:
|
|
previous_mean = uncond_embeddings.mean(axis=(-2, -1))
|
|
uncond_embeddings *= uncond_weights[:, :, None]
|
|
uncond_embeddings *= (previous_mean / uncond_embeddings.mean(axis=(-2, -1)))[:, None, None]
|
|
|
|
# For classifier free guidance, we need to do two forward passes.
|
|
# Here we concatenate the unconditional and text embeddings into a single batch
|
|
# to avoid doing two forward passes
|
|
if uncond_prompt is not None:
|
|
return text_embeddings, uncond_embeddings
|
|
|
|
return text_embeddings
|
|
|
|
|
|
def preprocess_image(image):
|
|
w, h = image.size
|
|
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
|
image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"])
|
|
image = np.array(image).astype(np.float32) / 255.0
|
|
image = image[None].transpose(0, 3, 1, 2)
|
|
return 2.0 * image - 1.0
|
|
|
|
|
|
def preprocess_mask(mask, scale_factor=8):
|
|
mask = mask.convert("L")
|
|
w, h = mask.size
|
|
w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32
|
|
mask = mask.resize((w // scale_factor, h // scale_factor), resample=PIL_INTERPOLATION["nearest"])
|
|
mask = np.array(mask).astype(np.float32) / 255.0
|
|
mask = np.tile(mask, (4, 1, 1))
|
|
mask = mask[None].transpose(0, 1, 2, 3) # what does this step do?
|
|
mask = 1 - mask # repaint white, keep black
|
|
return mask
|
|
|
|
|
|
class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline):
|
|
r"""
|
|
Pipeline for text-to-image generation using Stable Diffusion without tokens length limit, and support parsing
|
|
weighting in prompt.
|
|
|
|
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
|
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
|
"""
|
|
if version.parse(version.parse(diffusers.__version__).base_version) >= version.parse("0.9.0"):
|
|
|
|
def __init__(
|
|
self,
|
|
vae_encoder: OnnxRuntimeModel,
|
|
vae_decoder: OnnxRuntimeModel,
|
|
text_encoder: OnnxRuntimeModel,
|
|
tokenizer: CLIPTokenizer,
|
|
unet: OnnxRuntimeModel,
|
|
scheduler: SchedulerMixin,
|
|
safety_checker: OnnxRuntimeModel,
|
|
feature_extractor: CLIPFeatureExtractor,
|
|
requires_safety_checker: bool = True,
|
|
):
|
|
super().__init__(
|
|
vae_encoder=vae_encoder,
|
|
vae_decoder=vae_decoder,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
unet=unet,
|
|
scheduler=scheduler,
|
|
safety_checker=safety_checker,
|
|
feature_extractor=feature_extractor,
|
|
requires_safety_checker=requires_safety_checker,
|
|
)
|
|
self.__init__additional__()
|
|
|
|
else:
|
|
|
|
def __init__(
|
|
self,
|
|
vae_encoder: OnnxRuntimeModel,
|
|
vae_decoder: OnnxRuntimeModel,
|
|
text_encoder: OnnxRuntimeModel,
|
|
tokenizer: CLIPTokenizer,
|
|
unet: OnnxRuntimeModel,
|
|
scheduler: SchedulerMixin,
|
|
safety_checker: OnnxRuntimeModel,
|
|
feature_extractor: CLIPFeatureExtractor,
|
|
):
|
|
super().__init__(
|
|
vae_encoder=vae_encoder,
|
|
vae_decoder=vae_decoder,
|
|
text_encoder=text_encoder,
|
|
tokenizer=tokenizer,
|
|
unet=unet,
|
|
scheduler=scheduler,
|
|
safety_checker=safety_checker,
|
|
feature_extractor=feature_extractor,
|
|
)
|
|
self.__init__additional__()
|
|
|
|
def __init__additional__(self):
|
|
self.unet_in_channels = 4
|
|
self.vae_scale_factor = 8
|
|
|
|
def _encode_prompt(
|
|
self,
|
|
prompt,
|
|
num_images_per_prompt,
|
|
do_classifier_free_guidance,
|
|
negative_prompt,
|
|
max_embeddings_multiples,
|
|
):
|
|
r"""
|
|
Encodes the prompt into text encoder hidden states.
|
|
|
|
Args:
|
|
prompt (`str` or `list(int)`):
|
|
prompt to be encoded
|
|
num_images_per_prompt (`int`):
|
|
number of images that should be generated per prompt
|
|
do_classifier_free_guidance (`bool`):
|
|
whether to use classifier free guidance or not
|
|
negative_prompt (`str` or `List[str]`):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
"""
|
|
batch_size = len(prompt) if isinstance(prompt, list) else 1
|
|
|
|
if negative_prompt is None:
|
|
negative_prompt = [""] * batch_size
|
|
elif isinstance(negative_prompt, str):
|
|
negative_prompt = [negative_prompt] * batch_size
|
|
if batch_size != len(negative_prompt):
|
|
raise ValueError(
|
|
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
|
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
|
" the batch size of `prompt`."
|
|
)
|
|
|
|
text_embeddings, uncond_embeddings = get_weighted_text_embeddings(
|
|
pipe=self,
|
|
prompt=prompt,
|
|
uncond_prompt=negative_prompt if do_classifier_free_guidance else None,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
)
|
|
|
|
text_embeddings = text_embeddings.repeat(num_images_per_prompt, 0)
|
|
if do_classifier_free_guidance:
|
|
uncond_embeddings = uncond_embeddings.repeat(num_images_per_prompt, 0)
|
|
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
|
|
|
|
return text_embeddings
|
|
|
|
def check_inputs(self, prompt, height, width, strength, callback_steps):
|
|
if not isinstance(prompt, str) and not isinstance(prompt, list):
|
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
|
|
|
if strength < 0 or strength > 1:
|
|
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
|
|
|
|
if height % 8 != 0 or width % 8 != 0:
|
|
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
|
|
|
|
if (callback_steps is None) or (
|
|
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
|
|
):
|
|
raise ValueError(
|
|
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
|
|
f" {type(callback_steps)}."
|
|
)
|
|
|
|
def get_timesteps(self, num_inference_steps, strength, is_text2img):
|
|
if is_text2img:
|
|
return self.scheduler.timesteps, num_inference_steps
|
|
else:
|
|
# get the original timestep using init_timestep
|
|
offset = self.scheduler.config.get("steps_offset", 0)
|
|
init_timestep = int(num_inference_steps * strength) + offset
|
|
init_timestep = min(init_timestep, num_inference_steps)
|
|
|
|
t_start = max(num_inference_steps - init_timestep + offset, 0)
|
|
timesteps = self.scheduler.timesteps[t_start:]
|
|
return timesteps, num_inference_steps - t_start
|
|
|
|
def run_safety_checker(self, image):
|
|
if self.safety_checker is not None:
|
|
safety_checker_input = self.feature_extractor(
|
|
self.numpy_to_pil(image), return_tensors="np"
|
|
).pixel_values.astype(image.dtype)
|
|
# There will throw an error if use safety_checker directly and batchsize>1
|
|
images, has_nsfw_concept = [], []
|
|
for i in range(image.shape[0]):
|
|
image_i, has_nsfw_concept_i = self.safety_checker(
|
|
clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1]
|
|
)
|
|
images.append(image_i)
|
|
has_nsfw_concept.append(has_nsfw_concept_i[0])
|
|
image = np.concatenate(images)
|
|
else:
|
|
has_nsfw_concept = None
|
|
return image, has_nsfw_concept
|
|
|
|
def decode_latents(self, latents):
|
|
latents = 1 / 0.18215 * latents
|
|
# image = self.vae_decoder(latent_sample=latents)[0]
|
|
# it seems likes there is a strange result for using half-precision vae decoder if batchsize>1
|
|
image = np.concatenate(
|
|
[self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])]
|
|
)
|
|
image = np.clip(image / 2 + 0.5, 0, 1)
|
|
image = image.transpose((0, 2, 3, 1))
|
|
return image
|
|
|
|
def prepare_extra_step_kwargs(self, generator, eta):
|
|
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
|
|
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
|
|
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
|
|
# and should be between [0, 1]
|
|
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
|
extra_step_kwargs = {}
|
|
if accepts_eta:
|
|
extra_step_kwargs["eta"] = eta
|
|
|
|
# check if the scheduler accepts generator
|
|
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
|
|
if accepts_generator:
|
|
extra_step_kwargs["generator"] = generator
|
|
return extra_step_kwargs
|
|
|
|
def prepare_latents(self, image, timestep, batch_size, height, width, dtype, generator, latents=None):
|
|
if image is None:
|
|
shape = (
|
|
batch_size,
|
|
self.unet_in_channels,
|
|
height // self.vae_scale_factor,
|
|
width // self.vae_scale_factor,
|
|
)
|
|
|
|
if latents is None:
|
|
latents = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
|
else:
|
|
if latents.shape != shape:
|
|
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
|
|
|
|
# scale the initial noise by the standard deviation required by the scheduler
|
|
latents = (torch.from_numpy(latents) * self.scheduler.init_noise_sigma).numpy()
|
|
return latents, None, None
|
|
else:
|
|
init_latents = self.vae_encoder(sample=image)[0]
|
|
init_latents = 0.18215 * init_latents
|
|
init_latents = np.concatenate([init_latents] * batch_size, axis=0)
|
|
init_latents_orig = init_latents
|
|
shape = init_latents.shape
|
|
|
|
# add noise to latents using the timesteps
|
|
noise = torch.randn(shape, generator=generator, device="cpu").numpy().astype(dtype)
|
|
latents = self.scheduler.add_noise(
|
|
torch.from_numpy(init_latents), torch.from_numpy(noise), timestep
|
|
).numpy()
|
|
return latents, init_latents_orig, noise
|
|
|
|
@torch.no_grad()
|
|
def __call__(
|
|
self,
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
image: Union[np.ndarray, PIL.Image.Image] = None,
|
|
mask_image: Union[np.ndarray, PIL.Image.Image] = None,
|
|
height: int = 512,
|
|
width: int = 512,
|
|
num_inference_steps: int = 50,
|
|
guidance_scale: float = 7.5,
|
|
strength: float = 0.8,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: float = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
latents: Optional[np.ndarray] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
|
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function invoked when calling the pipeline for generation.
|
|
|
|
Args:
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
image (`np.ndarray` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
|
process.
|
|
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
|
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
|
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
|
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
|
height (`int`, *optional*, defaults to 512):
|
|
The height in pixels of the generated image.
|
|
width (`int`, *optional*, defaults to 512):
|
|
The width in pixels of the generated image.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
|
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
|
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
|
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
|
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
latents (`np.ndarray`, *optional*):
|
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
|
tensor will ge generated by sampling using the supplied random `generator`.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
|
is_cancelled_callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. If the function returns
|
|
`True`, the inference will be cancelled.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
|
|
Returns:
|
|
`None` if cancelled by `is_cancelled_callback`,
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
message = "Please use `image` instead of `init_image`."
|
|
init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs)
|
|
image = init_image or image
|
|
|
|
# 0. Default height and width to unet
|
|
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
|
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
|
|
|
# 1. Check inputs. Raise error if not correct
|
|
self.check_inputs(prompt, height, width, strength, callback_steps)
|
|
|
|
# 2. Define call parameters
|
|
batch_size = 1 if isinstance(prompt, str) else len(prompt)
|
|
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
|
|
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
|
|
# corresponds to doing no classifier free guidance.
|
|
do_classifier_free_guidance = guidance_scale > 1.0
|
|
|
|
# 3. Encode input prompt
|
|
text_embeddings = self._encode_prompt(
|
|
prompt,
|
|
num_images_per_prompt,
|
|
do_classifier_free_guidance,
|
|
negative_prompt,
|
|
max_embeddings_multiples,
|
|
)
|
|
dtype = text_embeddings.dtype
|
|
|
|
# 4. Preprocess image and mask
|
|
if isinstance(image, PIL.Image.Image):
|
|
image = preprocess_image(image)
|
|
if image is not None:
|
|
image = image.astype(dtype)
|
|
if isinstance(mask_image, PIL.Image.Image):
|
|
mask_image = preprocess_mask(mask_image, self.vae_scale_factor)
|
|
if mask_image is not None:
|
|
mask = mask_image.astype(dtype)
|
|
mask = np.concatenate([mask] * batch_size * num_images_per_prompt)
|
|
else:
|
|
mask = None
|
|
|
|
# 5. set timesteps
|
|
self.scheduler.set_timesteps(num_inference_steps)
|
|
timestep_dtype = next(
|
|
(input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)"
|
|
)
|
|
timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype]
|
|
timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, image is None)
|
|
latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
|
|
|
|
# 6. Prepare latent variables
|
|
latents, init_latents_orig, noise = self.prepare_latents(
|
|
image,
|
|
latent_timestep,
|
|
batch_size * num_images_per_prompt,
|
|
height,
|
|
width,
|
|
dtype,
|
|
generator,
|
|
latents,
|
|
)
|
|
|
|
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
|
|
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
|
|
|
|
# 8. Denoising loop
|
|
for i, t in enumerate(self.progress_bar(timesteps)):
|
|
# expand the latents if we are doing classifier free guidance
|
|
latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents
|
|
latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t)
|
|
latent_model_input = latent_model_input.numpy()
|
|
|
|
# predict the noise residual
|
|
noise_pred = self.unet(
|
|
sample=latent_model_input,
|
|
timestep=np.array([t], dtype=timestep_dtype),
|
|
encoder_hidden_states=text_embeddings,
|
|
)
|
|
noise_pred = noise_pred[0]
|
|
|
|
# perform guidance
|
|
if do_classifier_free_guidance:
|
|
noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2)
|
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
|
|
|
|
# compute the previous noisy sample x_t -> x_t-1
|
|
scheduler_output = self.scheduler.step(
|
|
torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs
|
|
)
|
|
latents = scheduler_output.prev_sample.numpy()
|
|
|
|
if mask is not None:
|
|
# masking
|
|
init_latents_proper = self.scheduler.add_noise(
|
|
torch.from_numpy(init_latents_orig),
|
|
torch.from_numpy(noise),
|
|
t,
|
|
).numpy()
|
|
latents = (init_latents_proper * mask) + (latents * (1 - mask))
|
|
|
|
# call the callback, if provided
|
|
if i % callback_steps == 0:
|
|
if callback is not None:
|
|
callback(i, t, latents)
|
|
if is_cancelled_callback is not None and is_cancelled_callback():
|
|
return None
|
|
|
|
# 9. Post-processing
|
|
image = self.decode_latents(latents)
|
|
|
|
# 10. Run safety checker
|
|
image, has_nsfw_concept = self.run_safety_checker(image)
|
|
|
|
# 11. Convert to PIL
|
|
if output_type == "pil":
|
|
image = self.numpy_to_pil(image)
|
|
|
|
if not return_dict:
|
|
return image, has_nsfw_concept
|
|
|
|
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
|
|
|
|
def text2img(
|
|
self,
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
height: int = 512,
|
|
width: int = 512,
|
|
num_inference_steps: int = 50,
|
|
guidance_scale: float = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: float = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
latents: Optional[np.ndarray] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for text-to-image generation.
|
|
Args:
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
height (`int`, *optional*, defaults to 512):
|
|
The height in pixels of the generated image.
|
|
width (`int`, *optional*, defaults to 512):
|
|
The width in pixels of the generated image.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
latents (`np.ndarray`, *optional*):
|
|
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
|
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
|
tensor will ge generated by sampling using the supplied random `generator`.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
height=height,
|
|
width=width,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
latents=latents,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|
|
|
|
def img2img(
|
|
self,
|
|
image: Union[np.ndarray, PIL.Image.Image],
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
strength: float = 0.8,
|
|
num_inference_steps: Optional[int] = 50,
|
|
guidance_scale: Optional[float] = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: Optional[float] = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for image-to-image generation.
|
|
Args:
|
|
image (`np.ndarray` or `PIL.Image.Image`):
|
|
`Image`, or ndarray representing an image batch, that will be used as the starting point for the
|
|
process.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1.
|
|
`image` will be used as a starting point, adding more noise to it the larger the `strength`. The
|
|
number of denoising steps depends on the amount of noise initially added. When `strength` is 1, added
|
|
noise will be maximum and the denoising process will run for the full number of iterations specified in
|
|
`num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
|
expense of slower inference. This parameter will be modulated by `strength`.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
image=image,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
strength=strength,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|
|
|
|
def inpaint(
|
|
self,
|
|
image: Union[np.ndarray, PIL.Image.Image],
|
|
mask_image: Union[np.ndarray, PIL.Image.Image],
|
|
prompt: Union[str, List[str]],
|
|
negative_prompt: Optional[Union[str, List[str]]] = None,
|
|
strength: float = 0.8,
|
|
num_inference_steps: Optional[int] = 50,
|
|
guidance_scale: Optional[float] = 7.5,
|
|
num_images_per_prompt: Optional[int] = 1,
|
|
eta: Optional[float] = 0.0,
|
|
generator: Optional[torch.Generator] = None,
|
|
max_embeddings_multiples: Optional[int] = 3,
|
|
output_type: Optional[str] = "pil",
|
|
return_dict: bool = True,
|
|
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
|
callback_steps: Optional[int] = 1,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
Function for inpaint.
|
|
Args:
|
|
image (`np.ndarray` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the
|
|
process. This is the image whose masked region will be inpainted.
|
|
mask_image (`np.ndarray` or `PIL.Image.Image`):
|
|
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
|
|
replaced by noise and therefore repainted, while black pixels will be preserved. If `mask_image` is a
|
|
PIL image, it will be converted to a single channel (luminance) before use. If it's a tensor, it should
|
|
contain one color channel (L) instead of 3, so the expected shape would be `(B, H, W, 1)`.
|
|
prompt (`str` or `List[str]`):
|
|
The prompt or prompts to guide the image generation.
|
|
negative_prompt (`str` or `List[str]`, *optional*):
|
|
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
|
|
if `guidance_scale` is less than `1`).
|
|
strength (`float`, *optional*, defaults to 0.8):
|
|
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength`
|
|
is 1, the denoising process will be run on the masked area for the full number of iterations specified
|
|
in `num_inference_steps`. `image` will be used as a reference for the masked area, adding more
|
|
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur.
|
|
num_inference_steps (`int`, *optional*, defaults to 50):
|
|
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at
|
|
the expense of slower inference. This parameter will be modulated by `strength`, as explained above.
|
|
guidance_scale (`float`, *optional*, defaults to 7.5):
|
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
|
usually at the expense of lower image quality.
|
|
num_images_per_prompt (`int`, *optional*, defaults to 1):
|
|
The number of images to generate per prompt.
|
|
eta (`float`, *optional*, defaults to 0.0):
|
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
|
|
[`schedulers.DDIMScheduler`], will be ignored for others.
|
|
generator (`torch.Generator`, *optional*):
|
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
|
deterministic.
|
|
max_embeddings_multiples (`int`, *optional*, defaults to `3`):
|
|
The max multiple length of prompt embeddings compared to the max output length of text encoder.
|
|
output_type (`str`, *optional*, defaults to `"pil"`):
|
|
The output format of the generate image. Choose between
|
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
|
return_dict (`bool`, *optional*, defaults to `True`):
|
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
|
|
plain tuple.
|
|
callback (`Callable`, *optional*):
|
|
A function that will be called every `callback_steps` steps during inference. The function will be
|
|
called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`.
|
|
callback_steps (`int`, *optional*, defaults to 1):
|
|
The frequency at which the `callback` function will be called. If not specified, the callback will be
|
|
called at every step.
|
|
Returns:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
|
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
|
|
When returning a tuple, the first element is a list with the generated images, and the second element is a
|
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
|
(nsfw) content, according to the `safety_checker`.
|
|
"""
|
|
return self.__call__(
|
|
prompt=prompt,
|
|
negative_prompt=negative_prompt,
|
|
image=image,
|
|
mask_image=mask_image,
|
|
num_inference_steps=num_inference_steps,
|
|
guidance_scale=guidance_scale,
|
|
strength=strength,
|
|
num_images_per_prompt=num_images_per_prompt,
|
|
eta=eta,
|
|
generator=generator,
|
|
max_embeddings_multiples=max_embeddings_multiples,
|
|
output_type=output_type,
|
|
return_dict=return_dict,
|
|
callback=callback,
|
|
callback_steps=callback_steps,
|
|
**kwargs,
|
|
)
|