From ed22b4fd07f9384f4dd45b8de83102cddc536967 Mon Sep 17 00:00:00 2001 From: Anton Lozhkov Date: Wed, 17 Aug 2022 15:22:04 +0200 Subject: [PATCH] Revive `make quality` (#203) * Revive Make utils * Add datasets for training too --- Makefile | 5 - setup.py | 17 +-- src/diffusers/dependency_versions_table.py | 4 +- src/diffusers/optimization.py | 1 - src/diffusers/pipelines/__init__.py | 6 +- src/diffusers/pipelines/ddim/__init__.py | 1 + src/diffusers/pipelines/ddpm/__init__.py | 1 + .../pipelines/latent_diffusion/__init__.py | 1 + .../latent_diffusion_uncond/__init__.py | 1 + src/diffusers/pipelines/pndm/__init__.py | 1 + .../pipelines/score_sde_ve/__init__.py | 1 + .../pipelines/stable_diffusion/__init__.py | 1 + .../pipelines/stochatic_karras_ve/__init__.py | 1 + .../schedulers/scheduling_lms_discrete.py | 2 +- src/diffusers/utils/dummy_scipy_objects.py | 14 --- tests/test_layers_utils.py | 110 +----------------- 16 files changed, 27 insertions(+), 140 deletions(-) diff --git a/Makefile b/Makefile index ec8237e1..fa346868 100644 --- a/Makefile +++ b/Makefile @@ -79,11 +79,6 @@ test: test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/ -# Run tests for SageMaker DLC release - -test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker] - TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker - # Release stuff diff --git a/setup.py b/setup.py index 19684043..93a5dd76 100644 --- a/setup.py +++ b/setup.py @@ -77,19 +77,22 @@ from setuptools import find_packages, setup # 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py _deps = [ "Pillow", + "accelerate>=0.11.0", "black~=22.0,>=22.3", + "datasets", "filelock", "flake8>=3.8.3", + "hf-doc-builder>=0.3.0", "huggingface-hub", "importlib_metadata", "isort>=5.5.4", + "modelcards==0.1.4", "numpy", "pytest", "regex!=2019.12.17", "requests", - "torch>=1.4", "tensorboard", - "modelcards==0.1.4" + "torch>=1.4", ] # this is a lookup table with items like: @@ -161,12 +164,10 @@ extras = {} extras = {} extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"] -extras["docs"] = [] -extras["training"] = ["tensorboard", "modelcards"] -extras["test"] = [ - "pytest", -] -extras["dev"] = extras["quality"] + extras["test"] + extras["training"] +extras["docs"] = ["hf-doc-builder"] +extras["training"] = ["accelerate", "datasets", "tensorboard", "modelcards"] +extras["test"] = ["pytest"] +extras["dev"] = extras["quality"] + extras["test"] + extras["training"] + extras["docs"] install_requires = [ deps["importlib_metadata"], diff --git a/src/diffusers/dependency_versions_table.py b/src/diffusers/dependency_versions_table.py index 56ee4b78..9874686d 100644 --- a/src/diffusers/dependency_versions_table.py +++ b/src/diffusers/dependency_versions_table.py @@ -3,17 +3,19 @@ # 2. run `make deps_table_update`` deps = { "Pillow": "Pillow", + "accelerate": "accelerate>=0.11.0", "black": "black~=22.0,>=22.3", "filelock": "filelock", "flake8": "flake8>=3.8.3", + "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub", "importlib_metadata": "importlib_metadata", "isort": "isort>=5.5.4", + "modelcards": "modelcards==0.1.4", "numpy": "numpy", "pytest": "pytest", "regex": "regex!=2019.12.17", "requests": "requests", "torch": "torch>=1.4", "tensorboard": "tensorboard", - "modelcards": "modelcards==0.1.4", } diff --git a/src/diffusers/optimization.py b/src/diffusers/optimization.py index 84712bf8..e7b836b4 100644 --- a/src/diffusers/optimization.py +++ b/src/diffusers/optimization.py @@ -18,7 +18,6 @@ import math from enum import Enum from typing import Optional, Union -import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR diff --git a/src/diffusers/pipelines/__init__.py b/src/diffusers/pipelines/__init__.py index 19dafb47..e4e1fffa 100644 --- a/src/diffusers/pipelines/__init__.py +++ b/src/diffusers/pipelines/__init__.py @@ -1,4 +1,8 @@ -from ..utils import is_inflect_available, is_transformers_available, is_unidecode_available +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +from ..utils import is_transformers_available from .ddim import DDIMPipeline from .ddpm import DDPMPipeline from .latent_diffusion_uncond import LDMPipeline diff --git a/src/diffusers/pipelines/ddim/__init__.py b/src/diffusers/pipelines/ddim/__init__.py index 85e8118e..8fd31868 100644 --- a/src/diffusers/pipelines/ddim/__init__.py +++ b/src/diffusers/pipelines/ddim/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_ddim import DDIMPipeline diff --git a/src/diffusers/pipelines/ddpm/__init__.py b/src/diffusers/pipelines/ddpm/__init__.py index bb228ee0..8889bdae 100644 --- a/src/diffusers/pipelines/ddpm/__init__.py +++ b/src/diffusers/pipelines/ddpm/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_ddpm import DDPMPipeline diff --git a/src/diffusers/pipelines/latent_diffusion/__init__.py b/src/diffusers/pipelines/latent_diffusion/__init__.py index ae3ab390..c481b38c 100644 --- a/src/diffusers/pipelines/latent_diffusion/__init__.py +++ b/src/diffusers/pipelines/latent_diffusion/__init__.py @@ -1,3 +1,4 @@ +# flake8: noqa from ...utils import is_transformers_available diff --git a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py b/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py index 1b9fc527..0826ca75 100644 --- a/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py +++ b/src/diffusers/pipelines/latent_diffusion_uncond/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_latent_diffusion_uncond import LDMPipeline diff --git a/src/diffusers/pipelines/pndm/__init__.py b/src/diffusers/pipelines/pndm/__init__.py index 488eb4f5..6fc46aaa 100644 --- a/src/diffusers/pipelines/pndm/__init__.py +++ b/src/diffusers/pipelines/pndm/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_pndm import PNDMPipeline diff --git a/src/diffusers/pipelines/score_sde_ve/__init__.py b/src/diffusers/pipelines/score_sde_ve/__init__.py index c7c2a85c..000d61f6 100644 --- a/src/diffusers/pipelines/score_sde_ve/__init__.py +++ b/src/diffusers/pipelines/score_sde_ve/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_score_sde_ve import ScoreSdeVePipeline diff --git a/src/diffusers/pipelines/stable_diffusion/__init__.py b/src/diffusers/pipelines/stable_diffusion/__init__.py index 718ae587..5e48f6f5 100644 --- a/src/diffusers/pipelines/stable_diffusion/__init__.py +++ b/src/diffusers/pipelines/stable_diffusion/__init__.py @@ -1,3 +1,4 @@ +# flake8: noqa from ...utils import is_transformers_available diff --git a/src/diffusers/pipelines/stochatic_karras_ve/__init__.py b/src/diffusers/pipelines/stochatic_karras_ve/__init__.py index 5a63c1d2..db258204 100644 --- a/src/diffusers/pipelines/stochatic_karras_ve/__init__.py +++ b/src/diffusers/pipelines/stochatic_karras_ve/__init__.py @@ -1 +1,2 @@ +# flake8: noqa from .pipeline_stochastic_karras_ve import KarrasVePipeline diff --git a/src/diffusers/schedulers/scheduling_lms_discrete.py b/src/diffusers/schedulers/scheduling_lms_discrete.py index f13c9b9b..55dd3dbe 100644 --- a/src/diffusers/schedulers/scheduling_lms_discrete.py +++ b/src/diffusers/schedulers/scheduling_lms_discrete.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Union +from typing import Union import numpy as np import torch diff --git a/src/diffusers/utils/dummy_scipy_objects.py b/src/diffusers/utils/dummy_scipy_objects.py index 8e0b8cb3..889baf67 100644 --- a/src/diffusers/utils/dummy_scipy_objects.py +++ b/src/diffusers/utils/dummy_scipy_objects.py @@ -8,17 +8,3 @@ class LMSDiscreteScheduler(metaclass=DummyObject): def __init__(self, *args, **kwargs): requires_backends(self, ["scipy"]) - - -class LDMTextToImagePipeline(metaclass=DummyObject): - _backends = ["scipy"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["scipy"]) - - -class StableDiffusionPipeline(metaclass=DummyObject): - _backends = ["scipy"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["scipy"]) diff --git a/tests/test_layers_utils.py b/tests/test_layers_utils.py index 5e9adb82..a94ecd58 100755 --- a/tests/test_layers_utils.py +++ b/tests/test_layers_utils.py @@ -14,16 +14,13 @@ # limitations under the License. -import inspect -import tempfile import unittest import numpy as np import torch from diffusers.models.embeddings import get_timestep_embedding -from diffusers.models.resnet import Downsample1D, Downsample2D, Upsample1D, Upsample2D -from diffusers.testing_utils import floats_tensor, slow, torch_device +from diffusers.models.resnet import Downsample2D, Upsample2D torch.backends.cuda.matmul.allow_tf32 = False @@ -219,108 +216,3 @@ class Downsample2DBlockTests(unittest.TestCase): output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - -class Upsample1DBlockTests(unittest.TestCase): - def test_upsample_default(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 32) - upsample = Upsample1D(channels=32, use_conv=False) - with torch.no_grad(): - upsampled = upsample(sample) - - assert upsampled.shape == (1, 32, 64) - output_slice = upsampled[0, -1, -8:] - expected_slice = torch.tensor([-1.6340, -1.6340, 0.5374, 0.5374, 1.0826, 1.0826, -1.7105, -1.7105]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - def test_upsample_with_conv(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 32) - upsample = Upsample1D(channels=32, use_conv=True) - with torch.no_grad(): - upsampled = upsample(sample) - - assert upsampled.shape == (1, 32, 64) - output_slice = upsampled[0, -1, -8:] - expected_slice = torch.tensor([-0.4546, -0.5010, -0.2996, 0.2844, 0.4040, -0.7772, -0.6862, 0.3612]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - def test_upsample_with_conv_out_dim(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 32) - upsample = Upsample1D(channels=32, use_conv=True, out_channels=64) - with torch.no_grad(): - upsampled = upsample(sample) - - assert upsampled.shape == (1, 64, 64) - output_slice = upsampled[0, -1, -8:] - expected_slice = torch.tensor([-0.0516, -0.0972, 0.9740, 1.1883, 0.4539, -0.5285, -0.5851, 0.1152]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - def test_upsample_with_transpose(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 32) - upsample = Upsample1D(channels=32, use_conv=False, use_conv_transpose=True) - with torch.no_grad(): - upsampled = upsample(sample) - - assert upsampled.shape == (1, 32, 64) - output_slice = upsampled[0, -1, -8:] - expected_slice = torch.tensor([-0.2238, -0.5842, -0.7165, 0.6699, 0.1033, -0.4269, -0.8974, -0.3716]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - -class Downsample1DBlockTests(unittest.TestCase): - def test_downsample_default(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 64) - downsample = Downsample1D(channels=32, use_conv=False) - with torch.no_grad(): - downsampled = downsample(sample) - - assert downsampled.shape == (1, 32, 32) - output_slice = downsampled[0, -1, -8:] - expected_slice = torch.tensor([-0.8796, 1.0945, -0.3434, 0.2910, 0.3391, -0.4488, -0.9568, -0.2909]) - max_diff = (output_slice.flatten() - expected_slice).abs().sum().item() - assert max_diff <= 1e-3 - # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1) - - def test_downsample_with_conv(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 64) - downsample = Downsample1D(channels=32, use_conv=True) - with torch.no_grad(): - downsampled = downsample(sample) - - assert downsampled.shape == (1, 32, 32) - output_slice = downsampled[0, -1, -8:] - - expected_slice = torch.tensor( - [0.1723, 0.0811, -0.6205, -0.3045, 0.0666, -0.2381, -0.0238, 0.2834], - ) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - def test_downsample_with_conv_pad1(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 64) - downsample = Downsample1D(channels=32, use_conv=True, padding=1) - with torch.no_grad(): - downsampled = downsample(sample) - - assert downsampled.shape == (1, 32, 32) - output_slice = downsampled[0, -1, -8:] - expected_slice = torch.tensor([0.1723, 0.0811, -0.6205, -0.3045, 0.0666, -0.2381, -0.0238, 0.2834]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) - - def test_downsample_with_conv_out_dim(self): - torch.manual_seed(0) - sample = torch.randn(1, 32, 64) - downsample = Downsample1D(channels=32, use_conv=True, out_channels=16) - with torch.no_grad(): - downsampled = downsample(sample) - - assert downsampled.shape == (1, 16, 32) - output_slice = downsampled[0, -1, -8:] - expected_slice = torch.tensor([1.1067, -0.5255, -0.4451, 0.0487, -0.3664, -0.7945, -0.4495, -0.3129]) - assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)