Post release 0.14
This commit is contained in:
parent
b2c1e0d6d4
commit
3231712b7d
|
@ -22,7 +22,7 @@ from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
|||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
||||
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
|
||||
from diffusers.utils import deprecate, logging
|
||||
from diffusers.utils import logging
|
||||
|
||||
|
||||
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
||||
|
@ -184,10 +184,6 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
accelerator = Accelerator(
|
||||
gradient_accumulation_steps=1,
|
||||
mixed_precision="fp16",
|
||||
|
@ -346,7 +342,6 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
guidance_scale: float = 7.5,
|
||||
eta: float = 0.0,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
|
|
@ -12,7 +12,7 @@ import diffusers
|
|||
from diffusers import SchedulerMixin, StableDiffusionPipeline
|
||||
from diffusers.models import AutoencoderKL, UNet2DConditionModel
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
|
||||
from diffusers.utils import deprecate, logging
|
||||
from diffusers.utils import logging
|
||||
|
||||
|
||||
try:
|
||||
|
@ -252,7 +252,6 @@ def get_weighted_text_embeddings(
|
|||
no_boseos_middle: Optional[bool] = False,
|
||||
skip_parsing: Optional[bool] = False,
|
||||
skip_weighting: Optional[bool] = False,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Prompts can be assigned with local weights using brackets. For example,
|
||||
|
@ -682,7 +681,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -758,10 +756,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
# 0. Default height and width to unet
|
||||
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
||||
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
||||
|
@ -884,7 +878,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function for text-to-image generation.
|
||||
|
@ -960,7 +953,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback=callback,
|
||||
is_cancelled_callback=is_cancelled_callback,
|
||||
callback_steps=callback_steps,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def img2img(
|
||||
|
@ -980,7 +972,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function for image-to-image generation.
|
||||
|
@ -1056,7 +1047,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback=callback,
|
||||
is_cancelled_callback=is_cancelled_callback,
|
||||
callback_steps=callback_steps,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
def inpaint(
|
||||
|
@ -1077,7 +1067,6 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
is_cancelled_callback: Optional[Callable[[], bool]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function for inpaint.
|
||||
|
@ -1158,5 +1147,4 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline):
|
|||
callback=callback,
|
||||
is_cancelled_callback=is_cancelled_callback,
|
||||
callback_steps=callback_steps,
|
||||
**kwargs,
|
||||
)
|
||||
|
|
|
@ -11,7 +11,7 @@ from transformers import CLIPFeatureExtractor, CLIPTokenizer
|
|||
import diffusers
|
||||
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, SchedulerMixin
|
||||
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
|
||||
from diffusers.utils import deprecate, logging
|
||||
from diffusers.utils import logging
|
||||
|
||||
|
||||
try:
|
||||
|
@ -744,10 +744,6 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
# 0. Default height and width to unet
|
||||
height = height or self.unet.config.sample_size * self.vae_scale_factor
|
||||
width = width or self.unet.config.sample_size * self.vae_scale_factor
|
||||
|
|
|
@ -47,7 +47,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ from diffusers.utils import check_min_version
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
# Cache compiled models across invocations of this script.
|
||||
cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache"))
|
||||
|
|
|
@ -54,7 +54,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ from diffusers.utils import check_min_version
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ else:
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ else:
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ from diffusers.utils import check_min_version, is_tensorboard_available, is_wand
|
|||
|
||||
|
||||
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
|
||||
check_min_version("0.13.0")
|
||||
check_min_version("0.14.0.dev0")
|
||||
|
||||
logger = get_logger(__name__, log_level="INFO")
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -219,7 +219,7 @@ install_requires = [
|
|||
|
||||
setup(
|
||||
name="diffusers",
|
||||
version="0.13.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
version="0.14.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
description="Diffusers",
|
||||
long_description=open("README.md", "r", encoding="utf-8").read(),
|
||||
long_description_content_type="text/markdown",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
__version__ = "0.13.0"
|
||||
__version__ = "0.14.0.dev0"
|
||||
|
||||
from .configuration_utils import ConfigMixin
|
||||
from .utils import (
|
||||
|
|
|
@ -15,7 +15,7 @@ from ...schedulers import (
|
|||
LMSDiscreteScheduler,
|
||||
PNDMScheduler,
|
||||
)
|
||||
from ...utils import PIL_INTERPOLATION, deprecate, randn_tensor
|
||||
from ...utils import PIL_INTERPOLATION, randn_tensor
|
||||
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
||||
|
||||
|
||||
|
@ -72,7 +72,6 @@ class LDMSuperResolutionPipeline(DiffusionPipeline):
|
|||
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
||||
output_type: Optional[str] = "pil",
|
||||
return_dict: bool = True,
|
||||
**kwargs,
|
||||
) -> Union[Tuple, ImagePipelineOutput]:
|
||||
r"""
|
||||
Args:
|
||||
|
@ -100,10 +99,6 @@ class LDMSuperResolutionPipeline(DiffusionPipeline):
|
|||
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is
|
||||
True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
batch_size = 1
|
||||
elif isinstance(image, torch.Tensor):
|
||||
|
|
|
@ -582,7 +582,6 @@ class CycleDiffusionPipeline(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -646,10 +645,6 @@ class CycleDiffusionPipeline(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
# 1. Check inputs
|
||||
self.check_inputs(prompt, strength, callback_steps)
|
||||
|
||||
|
|
|
@ -253,7 +253,6 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -309,10 +308,6 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
if isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif isinstance(prompt, list):
|
||||
|
|
|
@ -240,7 +240,6 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -301,10 +300,6 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
if isinstance(prompt, str):
|
||||
batch_size = 1
|
||||
elif isinstance(prompt, list):
|
||||
|
|
|
@ -572,7 +572,6 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -639,10 +638,6 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
# 1. Check inputs. Raise error if not correct
|
||||
self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
|
||||
|
||||
|
|
|
@ -530,7 +530,6 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
|
|||
return_dict: bool = True,
|
||||
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
|
||||
callback_steps: int = 1,
|
||||
**kwargs,
|
||||
):
|
||||
r"""
|
||||
Function invoked when calling the pipeline for generation.
|
||||
|
@ -603,10 +602,6 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline):
|
|||
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
|
||||
(nsfw) content, according to the `safety_checker`.
|
||||
"""
|
||||
message = "Please use `image` instead of `init_image`."
|
||||
init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs)
|
||||
image = init_image or image
|
||||
|
||||
# 1. Check inputs
|
||||
self.check_inputs(prompt, strength, callback_steps)
|
||||
|
||||
|
|
Loading…
Reference in New Issue