From 09779cbb4046b0afa7cc3da043c928dc4866d59a Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Wed, 25 Jan 2023 18:59:02 +0200 Subject: [PATCH] [Bump version] 0.13.0dev0 & Deprecate `predict_epsilon` (#2109) * [Bump version] 0.13 * Bump model up * up --- examples/community/imagic_stable_diffusion.py | 2 +- examples/community/lpw_stable_diffusion.py | 2 +- .../community/lpw_stable_diffusion_onnx.py | 2 +- examples/dreambooth/train_dreambooth.py | 2 +- examples/dreambooth/train_dreambooth_flax.py | 2 +- examples/dreambooth/train_dreambooth_lora.py | 2 +- examples/text_to_image/train_text_to_image.py | 2 +- .../text_to_image/train_text_to_image_flax.py | 2 +- .../text_to_image/train_text_to_image_lora.py | 2 +- .../textual_inversion/textual_inversion.py | 2 +- .../textual_inversion_flax.py | 2 +- .../train_unconditional.py | 2 +- .../train_unconditional_ort.py | 2 +- setup.py | 2 +- src/diffusers/__init__.py | 2 +- .../pipeline_alt_diffusion_img2img.py | 2 +- src/diffusers/pipelines/ddpm/pipeline_ddpm.py | 28 +---------------- ...peline_latent_diffusion_superresolution.py | 2 +- .../pipeline_cycle_diffusion.py | 2 +- .../pipeline_onnx_stable_diffusion_img2img.py | 2 +- ...ne_onnx_stable_diffusion_inpaint_legacy.py | 2 +- .../pipeline_stable_diffusion_img2img.py | 2 +- ...ipeline_stable_diffusion_inpaint_legacy.py | 2 +- src/diffusers/schedulers/scheduling_ddim.py | 12 +------ .../schedulers/scheduling_ddim_flax.py | 11 ------- src/diffusers/schedulers/scheduling_ddpm.py | 25 ++------------- .../schedulers/scheduling_ddpm_flax.py | 11 ------- .../scheduling_dpmsolver_multistep.py | 11 ------- .../scheduling_dpmsolver_multistep_flax.py | 11 ------- tests/pipelines/ddpm/test_ddpm.py | 27 ---------------- tests/test_config.py | 11 ------- tests/test_scheduler.py | 31 +------------------ tests/test_scheduler_flax.py | 18 +---------- 33 files changed, 28 insertions(+), 212 deletions(-) diff --git a/examples/community/imagic_stable_diffusion.py b/examples/community/imagic_stable_diffusion.py index c40f1a05..d10c1bfa 100644 --- a/examples/community/imagic_stable_diffusion.py +++ b/examples/community/imagic_stable_diffusion.py @@ -185,7 +185,7 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image accelerator = Accelerator( diff --git a/examples/community/lpw_stable_diffusion.py b/examples/community/lpw_stable_diffusion.py index 90a33074..cd094967 100644 --- a/examples/community/lpw_stable_diffusion.py +++ b/examples/community/lpw_stable_diffusion.py @@ -759,7 +759,7 @@ class StableDiffusionLongPromptWeightingPipeline(StableDiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 0. Default height and width to unet diff --git a/examples/community/lpw_stable_diffusion_onnx.py b/examples/community/lpw_stable_diffusion_onnx.py index f37bd6e8..80204ccb 100644 --- a/examples/community/lpw_stable_diffusion_onnx.py +++ b/examples/community/lpw_stable_diffusion_onnx.py @@ -745,7 +745,7 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 0. Default height and width to unet diff --git a/examples/dreambooth/train_dreambooth.py b/examples/dreambooth/train_dreambooth.py index e699e2e6..fb8eef9b 100644 --- a/examples/dreambooth/train_dreambooth.py +++ b/examples/dreambooth/train_dreambooth.py @@ -46,7 +46,7 @@ from transformers import AutoTokenizer, PretrainedConfig # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__) diff --git a/examples/dreambooth/train_dreambooth_flax.py b/examples/dreambooth/train_dreambooth_flax.py index c557c2a0..247de851 100644 --- a/examples/dreambooth/train_dreambooth_flax.py +++ b/examples/dreambooth/train_dreambooth_flax.py @@ -36,7 +36,7 @@ from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") # Cache compiled models across invocations of this script. cc.initialize_cache(os.path.expanduser("~/.cache/jax/compilation_cache")) diff --git a/examples/dreambooth/train_dreambooth_lora.py b/examples/dreambooth/train_dreambooth_lora.py index 4c5a2bef..d30cc1c9 100644 --- a/examples/dreambooth/train_dreambooth_lora.py +++ b/examples/dreambooth/train_dreambooth_lora.py @@ -54,7 +54,7 @@ from transformers import AutoTokenizer, PretrainedConfig # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__) diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py index b152cf4c..4dc3caa1 100644 --- a/examples/text_to_image/train_text_to_image.py +++ b/examples/text_to_image/train_text_to_image.py @@ -45,7 +45,7 @@ from transformers import CLIPTextModel, CLIPTokenizer # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__, log_level="INFO") diff --git a/examples/text_to_image/train_text_to_image_flax.py b/examples/text_to_image/train_text_to_image_flax.py index 088753f1..901b212d 100644 --- a/examples/text_to_image/train_text_to_image_flax.py +++ b/examples/text_to_image/train_text_to_image_flax.py @@ -34,7 +34,7 @@ from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py index 32616016..324d40c2 100644 --- a/examples/text_to_image/train_text_to_image_lora.py +++ b/examples/text_to_image/train_text_to_image_lora.py @@ -47,7 +47,7 @@ from transformers import CLIPTextModel, CLIPTokenizer # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__, log_level="INFO") diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index 6ce85884..591ef208 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -68,7 +68,7 @@ else: # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__) diff --git a/examples/textual_inversion/textual_inversion_flax.py b/examples/textual_inversion/textual_inversion_flax.py index b4baf408..bf00d430 100644 --- a/examples/textual_inversion/textual_inversion_flax.py +++ b/examples/textual_inversion/textual_inversion_flax.py @@ -57,7 +57,7 @@ else: # ------------------------------------------------------------------------------ # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/unconditional_image_generation/train_unconditional.py b/examples/unconditional_image_generation/train_unconditional.py index 5eb53752..3b017740 100644 --- a/examples/unconditional_image_generation/train_unconditional.py +++ b/examples/unconditional_image_generation/train_unconditional.py @@ -33,7 +33,7 @@ from tqdm.auto import tqdm # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__, log_level="INFO") diff --git a/examples/unconditional_image_generation/train_unconditional_ort.py b/examples/unconditional_image_generation/train_unconditional_ort.py index c7f4cb5e..532a1e87 100644 --- a/examples/unconditional_image_generation/train_unconditional_ort.py +++ b/examples/unconditional_image_generation/train_unconditional_ort.py @@ -30,7 +30,7 @@ from tqdm.auto import tqdm # Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.12.0") +check_min_version("0.13.0.dev0") logger = get_logger(__name__) diff --git a/setup.py b/setup.py index fd399050..80acc539 100644 --- a/setup.py +++ b/setup.py @@ -219,7 +219,7 @@ install_requires = [ setup( name="diffusers", - version="0.12.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="0.13.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="Diffusers", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", diff --git a/src/diffusers/__init__.py b/src/diffusers/__init__.py index b3afb475..f9803380 100644 --- a/src/diffusers/__init__.py +++ b/src/diffusers/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.12.0" +__version__ = "0.13.0.dev0" from .configuration_utils import ConfigMixin from .utils import ( diff --git a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py index 6b64edad..39c314c4 100644 --- a/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py +++ b/src/diffusers/pipelines/alt_diffusion/pipeline_alt_diffusion_img2img.py @@ -606,7 +606,7 @@ class AltDiffusionImg2ImgPipeline(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 1. Check inputs. Raise error if not correct diff --git a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py index 32b42cc6..3525c156 100644 --- a/src/diffusers/pipelines/ddpm/pipeline_ddpm.py +++ b/src/diffusers/pipelines/ddpm/pipeline_ddpm.py @@ -17,8 +17,7 @@ from typing import List, Optional, Tuple, Union import torch -from ...configuration_utils import FrozenDict -from ...utils import deprecate, randn_tensor +from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput @@ -46,7 +45,6 @@ class DDPMPipeline(DiffusionPipeline): num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True, - **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: @@ -68,30 +66,6 @@ class DDPMPipeline(DiffusionPipeline): [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - - if predict_epsilon is not None: - new_config = dict(self.scheduler.config) - new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample" - self.scheduler._internal_dict = FrozenDict(new_config) - - if generator is not None and generator.device.type != self.device.type and self.device.type != "mps": - message = ( - f"The `generator` device is `{generator.device}` and does not match the pipeline " - f"device `{self.device}`, so the `generator` will be ignored. " - f'Please use `torch.Generator(device="{self.device}")` instead.' - ) - deprecate( - "generator.device == 'cpu'", - "0.13.0", - message, - ) - generator = None - # Sample gaussian noise to begin loop if isinstance(self.unet.sample_size, int): image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size) diff --git a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py index fa0b143f..1e9bea27 100644 --- a/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py +++ b/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion_superresolution.py @@ -102,7 +102,7 @@ class LDMSuperResolutionPipeline(DiffusionPipeline): True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image if isinstance(image, PIL.Image.Image): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py index d242a9f9..7300abbf 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_cycle_diffusion.py @@ -623,7 +623,7 @@ class CycleDiffusionPipeline(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 1. Check inputs diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py index 5fccbe71..473d532c 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py @@ -311,7 +311,7 @@ class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image if isinstance(prompt, str): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py index 25b86e8e..2110171b 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_inpaint_legacy.py @@ -303,7 +303,7 @@ class OnnxStableDiffusionInpaintPipelineLegacy(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image if isinstance(prompt, str): diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py index 5f103b36..a5e56b2c 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_img2img.py @@ -616,7 +616,7 @@ class StableDiffusionImg2ImgPipeline(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 1. Check inputs. Raise error if not correct diff --git a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py index 9f032daa..975e8240 100644 --- a/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py +++ b/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_inpaint_legacy.py @@ -556,7 +556,7 @@ class StableDiffusionInpaintPipelineLegacy(DiffusionPipeline): (nsfw) content, according to the `safety_checker`. """ message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.13.0", message, take_from=kwargs) + init_image = deprecate("init_image", "0.14.0", message, take_from=kwargs) image = init_image or image # 1. Check inputs diff --git a/src/diffusers/schedulers/scheduling_ddim.py b/src/diffusers/schedulers/scheduling_ddim.py index 632d4e28..95423251 100644 --- a/src/diffusers/schedulers/scheduling_ddim.py +++ b/src/diffusers/schedulers/scheduling_ddim.py @@ -23,7 +23,7 @@ import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput, deprecate, randn_tensor +from ..utils import BaseOutput, randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @@ -113,7 +113,6 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] order = 1 @register_to_config @@ -128,16 +127,7 @@ class DDIMScheduler(SchedulerMixin, ConfigMixin): set_alpha_to_one: bool = True, steps_offset: int = 0, prediction_type: str = "epsilon", - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDIMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_ddim_flax.py b/src/diffusers/schedulers/scheduling_ddim_flax.py index 52a997fa..565b7ff3 100644 --- a/src/diffusers/schedulers/scheduling_ddim_flax.py +++ b/src/diffusers/schedulers/scheduling_ddim_flax.py @@ -22,7 +22,6 @@ import flax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import deprecate from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, @@ -103,7 +102,6 @@ class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] dtype: jnp.dtype @@ -123,16 +121,7 @@ class FlaxDDIMScheduler(FlaxSchedulerMixin, ConfigMixin): steps_offset: int = 0, prediction_type: str = "epsilon", dtype: jnp.dtype = jnp.float32, - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - f" {self.__class__.__name__}.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDIMSchedulerState: diff --git a/src/diffusers/schedulers/scheduling_ddpm.py b/src/diffusers/schedulers/scheduling_ddpm.py index b58ed833..2f802ba1 100644 --- a/src/diffusers/schedulers/scheduling_ddpm.py +++ b/src/diffusers/schedulers/scheduling_ddpm.py @@ -21,8 +21,8 @@ from typing import List, Optional, Tuple, Union import numpy as np import torch -from ..configuration_utils import ConfigMixin, FrozenDict, register_to_config -from ..utils import BaseOutput, deprecate, randn_tensor +from ..configuration_utils import ConfigMixin, register_to_config +from ..utils import BaseOutput, randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin @@ -106,7 +106,6 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] order = 1 @register_to_config @@ -120,16 +119,7 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): variance_type: str = "fixed_small", clip_sample: bool = True, prediction_type: str = "epsilon", - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": @@ -239,7 +229,6 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): sample: torch.FloatTensor, generator=None, return_dict: bool = True, - **kwargs, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion @@ -259,16 +248,6 @@ class DDPMScheduler(SchedulerMixin, ConfigMixin): returning a tuple, the first element is the sample tensor. """ - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DDPMScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - new_config = dict(self.config) - new_config["prediction_type"] = "epsilon" if predict_epsilon else "sample" - self._internal_dict = FrozenDict(new_config) - t = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: diff --git a/src/diffusers/schedulers/scheduling_ddpm_flax.py b/src/diffusers/schedulers/scheduling_ddpm_flax.py index 8223b340..faf59b10 100644 --- a/src/diffusers/schedulers/scheduling_ddpm_flax.py +++ b/src/diffusers/schedulers/scheduling_ddpm_flax.py @@ -22,7 +22,6 @@ import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import deprecate from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, @@ -86,7 +85,6 @@ class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] dtype: jnp.dtype @@ -106,16 +104,7 @@ class FlaxDDPMScheduler(FlaxSchedulerMixin, ConfigMixin): clip_sample: bool = True, prediction_type: str = "epsilon", dtype: jnp.dtype = jnp.float32, - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - f" {self.__class__.__name__}.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DDPMSchedulerState: diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py b/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py index 8acb87d7..0630ea1d 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py @@ -21,7 +21,6 @@ import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import deprecate from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput @@ -118,7 +117,6 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] order = 1 @register_to_config @@ -137,16 +135,7 @@ class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): algorithm_type: str = "dpmsolver++", solver_type: str = "midpoint", lower_order_final: bool = True, - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - " DPMSolverMultistepScheduler.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": diff --git a/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py b/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py index ed2ed5f5..cadf782f 100644 --- a/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py +++ b/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py @@ -22,7 +22,6 @@ import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import deprecate from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, @@ -141,7 +140,6 @@ class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): """ _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers] - _deprecated_kwargs = ["predict_epsilon"] dtype: jnp.dtype @@ -166,16 +164,7 @@ class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin): solver_type: str = "midpoint", lower_order_final: bool = True, dtype: jnp.dtype = jnp.float32, - **kwargs, ): - message = ( - "Please make sure to instantiate your scheduler with `prediction_type` instead. E.g. `scheduler =" - f" {self.__class__.__name__}.from_pretrained(, prediction_type='epsilon')`." - ) - predict_epsilon = deprecate("predict_epsilon", "0.13.0", message, take_from=kwargs) - if predict_epsilon is not None: - self.register_to_config(prediction_type="epsilon" if predict_epsilon else "sample") - self.dtype = dtype def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState: diff --git a/tests/pipelines/ddpm/test_ddpm.py b/tests/pipelines/ddpm/test_ddpm.py index 4b6b7a55..8287212a 100644 --- a/tests/pipelines/ddpm/test_ddpm.py +++ b/tests/pipelines/ddpm/test_ddpm.py @@ -19,7 +19,6 @@ import numpy as np import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel -from diffusers.utils import deprecate from diffusers.utils.testing_utils import require_torch_gpu, slow, torch_device @@ -67,32 +66,6 @@ class DDPMPipelineFastTests(unittest.TestCase): assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - def test_inference_deprecated_predict_epsilon(self): - deprecate("remove this test", "0.13.0", "remove") - unet = self.dummy_uncond_unet - scheduler = DDPMScheduler(predict_epsilon=False) - - ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) - ddpm.to(torch_device) - ddpm.set_progress_bar_config(disable=None) - - # Warmup pass when using mps (see #372) - if torch_device == "mps": - _ = ddpm(num_inference_steps=1) - - generator = torch.manual_seed(0) - image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images - - generator = torch.manual_seed(0) - image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", predict_epsilon=False)[0] - - image_slice = image[0, -3:, -3:, -1] - image_eps_slice = image_eps[0, -3:, -3:, -1] - - assert image.shape == (1, 32, 32, 3) - tolerance = 1e-2 if torch_device != "mps" else 3e-2 - assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance - def test_inference_predict_sample(self): unet = self.dummy_uncond_unet scheduler = DDPMScheduler(prediction_type="sample") diff --git a/tests/test_config.py b/tests/test_config.py index 1f8ef55a..e5ae467e 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -26,7 +26,6 @@ from diffusers import ( logging, ) from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.utils import deprecate from diffusers.utils.testing_utils import CaptureLogger @@ -202,20 +201,10 @@ class ConfigTester(unittest.TestCase): with CaptureLogger(logger) as cap_logger_2: ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) - with CaptureLogger(logger) as cap_logger: - deprecate("remove this case", "0.13.0", "remove") - ddpm_3 = DDPMScheduler.from_pretrained( - "hf-internal-testing/tiny-stable-diffusion-torch", - subfolder="scheduler", - predict_epsilon=False, - beta_end=8, - ) - assert ddpm.__class__ == DDPMScheduler assert ddpm.config.prediction_type == "sample" assert ddpm.config.beta_end == 8 assert ddpm_2.config.beta_start == 88 - assert ddpm_3.config.prediction_type == "sample" # no warning should be thrown assert cap_logger.out == "" diff --git a/tests/test_scheduler.py b/tests/test_scheduler.py index 0d38447a..d49d599c 100755 --- a/tests/test_scheduler.py +++ b/tests/test_scheduler.py @@ -45,7 +45,7 @@ from diffusers import ( ) from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin -from diffusers.utils import deprecate, torch_device +from diffusers.utils import torch_device from diffusers.utils.testing_utils import CaptureLogger @@ -645,35 +645,6 @@ class DDPMSchedulerTest(SchedulerCommonTest): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) - def test_deprecated_predict_epsilon(self): - deprecate("remove this test", "0.13.0", "remove") - for predict_epsilon in [True, False]: - self.check_over_configs(predict_epsilon=predict_epsilon) - - def test_deprecated_epsilon(self): - deprecate("remove this test", "0.13.0", "remove") - scheduler_class = self.scheduler_classes[0] - scheduler_config = self.get_scheduler_config() - - sample = self.dummy_sample_deter - residual = 0.1 * self.dummy_sample_deter - time_step = 4 - - scheduler = scheduler_class(**scheduler_config) - scheduler_eps = scheduler_class(predict_epsilon=False, **scheduler_config) - - kwargs = {} - if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): - kwargs["generator"] = torch.manual_seed(0) - output = scheduler.step(residual, time_step, sample, predict_epsilon=False, **kwargs).prev_sample - - kwargs = {} - if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): - kwargs["generator"] = torch.manual_seed(0) - output_eps = scheduler_eps.step(residual, time_step, sample, predict_epsilon=False, **kwargs).prev_sample - - assert (output - output_eps).abs().sum() < 1e-5 - def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) diff --git a/tests/test_scheduler_flax.py b/tests/test_scheduler_flax.py index 1da75f05..1c6de2ec 100644 --- a/tests/test_scheduler_flax.py +++ b/tests/test_scheduler_flax.py @@ -18,7 +18,7 @@ import unittest from typing import Dict, List, Tuple from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler -from diffusers.utils import deprecate, is_flax_available +from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax @@ -626,22 +626,6 @@ class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) - def test_deprecated_predict_epsilon(self): - deprecate("remove this test", "0.13.0", "remove") - for predict_epsilon in [True, False]: - self.check_over_configs(predict_epsilon=predict_epsilon) - - def test_deprecated_predict_epsilon_to_prediction_type(self): - deprecate("remove this test", "0.13.0", "remove") - for scheduler_class in self.scheduler_classes: - scheduler_config = self.get_scheduler_config(predict_epsilon=True) - scheduler = scheduler_class.from_config(scheduler_config) - assert scheduler.prediction_type == "epsilon" - - scheduler_config = self.get_scheduler_config(predict_epsilon=False) - scheduler = scheduler_class.from_config(scheduler_config) - assert scheduler.prediction_type == "sample" - @require_flax class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest):