Revive `make quality` (#203)
* Revive Make utils * Add datasets for training too
This commit is contained in:
parent
f9522d825c
commit
ed22b4fd07
5
Makefile
5
Makefile
|
@ -79,11 +79,6 @@ test:
|
||||||
test-examples:
|
test-examples:
|
||||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
|
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
|
||||||
|
|
||||||
# Run tests for SageMaker DLC release
|
|
||||||
|
|
||||||
test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker]
|
|
||||||
TEST_SAGEMAKER=True python -m pytest -n auto -s -v ./tests/sagemaker
|
|
||||||
|
|
||||||
|
|
||||||
# Release stuff
|
# Release stuff
|
||||||
|
|
||||||
|
|
17
setup.py
17
setup.py
|
@ -77,19 +77,22 @@ from setuptools import find_packages, setup
|
||||||
# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
|
# 2. once modified, run: `make deps_table_update` to update src/diffusers/dependency_versions_table.py
|
||||||
_deps = [
|
_deps = [
|
||||||
"Pillow",
|
"Pillow",
|
||||||
|
"accelerate>=0.11.0",
|
||||||
"black~=22.0,>=22.3",
|
"black~=22.0,>=22.3",
|
||||||
|
"datasets",
|
||||||
"filelock",
|
"filelock",
|
||||||
"flake8>=3.8.3",
|
"flake8>=3.8.3",
|
||||||
|
"hf-doc-builder>=0.3.0",
|
||||||
"huggingface-hub",
|
"huggingface-hub",
|
||||||
"importlib_metadata",
|
"importlib_metadata",
|
||||||
"isort>=5.5.4",
|
"isort>=5.5.4",
|
||||||
|
"modelcards==0.1.4",
|
||||||
"numpy",
|
"numpy",
|
||||||
"pytest",
|
"pytest",
|
||||||
"regex!=2019.12.17",
|
"regex!=2019.12.17",
|
||||||
"requests",
|
"requests",
|
||||||
"torch>=1.4",
|
|
||||||
"tensorboard",
|
"tensorboard",
|
||||||
"modelcards==0.1.4"
|
"torch>=1.4",
|
||||||
]
|
]
|
||||||
|
|
||||||
# this is a lookup table with items like:
|
# this is a lookup table with items like:
|
||||||
|
@ -161,12 +164,10 @@ extras = {}
|
||||||
|
|
||||||
extras = {}
|
extras = {}
|
||||||
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
|
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
|
||||||
extras["docs"] = []
|
extras["docs"] = ["hf-doc-builder"]
|
||||||
extras["training"] = ["tensorboard", "modelcards"]
|
extras["training"] = ["accelerate", "datasets", "tensorboard", "modelcards"]
|
||||||
extras["test"] = [
|
extras["test"] = ["pytest"]
|
||||||
"pytest",
|
extras["dev"] = extras["quality"] + extras["test"] + extras["training"] + extras["docs"]
|
||||||
]
|
|
||||||
extras["dev"] = extras["quality"] + extras["test"] + extras["training"]
|
|
||||||
|
|
||||||
install_requires = [
|
install_requires = [
|
||||||
deps["importlib_metadata"],
|
deps["importlib_metadata"],
|
||||||
|
|
|
@ -3,17 +3,19 @@
|
||||||
# 2. run `make deps_table_update``
|
# 2. run `make deps_table_update``
|
||||||
deps = {
|
deps = {
|
||||||
"Pillow": "Pillow",
|
"Pillow": "Pillow",
|
||||||
|
"accelerate": "accelerate>=0.11.0",
|
||||||
"black": "black~=22.0,>=22.3",
|
"black": "black~=22.0,>=22.3",
|
||||||
"filelock": "filelock",
|
"filelock": "filelock",
|
||||||
"flake8": "flake8>=3.8.3",
|
"flake8": "flake8>=3.8.3",
|
||||||
|
"hf-doc-builder": "hf-doc-builder>=0.3.0",
|
||||||
"huggingface-hub": "huggingface-hub",
|
"huggingface-hub": "huggingface-hub",
|
||||||
"importlib_metadata": "importlib_metadata",
|
"importlib_metadata": "importlib_metadata",
|
||||||
"isort": "isort>=5.5.4",
|
"isort": "isort>=5.5.4",
|
||||||
|
"modelcards": "modelcards==0.1.4",
|
||||||
"numpy": "numpy",
|
"numpy": "numpy",
|
||||||
"pytest": "pytest",
|
"pytest": "pytest",
|
||||||
"regex": "regex!=2019.12.17",
|
"regex": "regex!=2019.12.17",
|
||||||
"requests": "requests",
|
"requests": "requests",
|
||||||
"torch": "torch>=1.4",
|
"torch": "torch>=1.4",
|
||||||
"tensorboard": "tensorboard",
|
"tensorboard": "tensorboard",
|
||||||
"modelcards": "modelcards==0.1.4",
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,6 @@ import math
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
import torch
|
|
||||||
from torch.optim import Optimizer
|
from torch.optim import Optimizer
|
||||||
from torch.optim.lr_scheduler import LambdaLR
|
from torch.optim.lr_scheduler import LambdaLR
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
from ..utils import is_inflect_available, is_transformers_available, is_unidecode_available
|
# flake8: noqa
|
||||||
|
# There's no way to ignore "F401 '...' imported but unused" warnings in this
|
||||||
|
# module, but to preserve other warnings. So, don't check this module at all.
|
||||||
|
|
||||||
|
from ..utils import is_transformers_available
|
||||||
from .ddim import DDIMPipeline
|
from .ddim import DDIMPipeline
|
||||||
from .ddpm import DDPMPipeline
|
from .ddpm import DDPMPipeline
|
||||||
from .latent_diffusion_uncond import LDMPipeline
|
from .latent_diffusion_uncond import LDMPipeline
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_ddim import DDIMPipeline
|
from .pipeline_ddim import DDIMPipeline
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_ddpm import DDPMPipeline
|
from .pipeline_ddpm import DDPMPipeline
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
# flake8: noqa
|
||||||
from ...utils import is_transformers_available
|
from ...utils import is_transformers_available
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_latent_diffusion_uncond import LDMPipeline
|
from .pipeline_latent_diffusion_uncond import LDMPipeline
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_pndm import PNDMPipeline
|
from .pipeline_pndm import PNDMPipeline
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_score_sde_ve import ScoreSdeVePipeline
|
from .pipeline_score_sde_ve import ScoreSdeVePipeline
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
# flake8: noqa
|
||||||
from ...utils import is_transformers_available
|
from ...utils import is_transformers_available
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1 +1,2 @@
|
||||||
|
# flake8: noqa
|
||||||
from .pipeline_stochastic_karras_ve import KarrasVePipeline
|
from .pipeline_stochastic_karras_ve import KarrasVePipeline
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from typing import List, Union
|
from typing import Union
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
|
@ -8,17 +8,3 @@ class LMSDiscreteScheduler(metaclass=DummyObject):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
requires_backends(self, ["scipy"])
|
requires_backends(self, ["scipy"])
|
||||||
|
|
||||||
|
|
||||||
class LDMTextToImagePipeline(metaclass=DummyObject):
|
|
||||||
_backends = ["scipy"]
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
requires_backends(self, ["scipy"])
|
|
||||||
|
|
||||||
|
|
||||||
class StableDiffusionPipeline(metaclass=DummyObject):
|
|
||||||
_backends = ["scipy"]
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
requires_backends(self, ["scipy"])
|
|
||||||
|
|
|
@ -14,16 +14,13 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from diffusers.models.embeddings import get_timestep_embedding
|
from diffusers.models.embeddings import get_timestep_embedding
|
||||||
from diffusers.models.resnet import Downsample1D, Downsample2D, Upsample1D, Upsample2D
|
from diffusers.models.resnet import Downsample2D, Upsample2D
|
||||||
from diffusers.testing_utils import floats_tensor, slow, torch_device
|
|
||||||
|
|
||||||
|
|
||||||
torch.backends.cuda.matmul.allow_tf32 = False
|
torch.backends.cuda.matmul.allow_tf32 = False
|
||||||
|
@ -219,108 +216,3 @@ class Downsample2DBlockTests(unittest.TestCase):
|
||||||
output_slice = downsampled[0, -1, -3:, -3:]
|
output_slice = downsampled[0, -1, -3:, -3:]
|
||||||
expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522])
|
expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522])
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
||||||
|
|
||||||
|
|
||||||
class Upsample1DBlockTests(unittest.TestCase):
|
|
||||||
def test_upsample_default(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 32)
|
|
||||||
upsample = Upsample1D(channels=32, use_conv=False)
|
|
||||||
with torch.no_grad():
|
|
||||||
upsampled = upsample(sample)
|
|
||||||
|
|
||||||
assert upsampled.shape == (1, 32, 64)
|
|
||||||
output_slice = upsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([-1.6340, -1.6340, 0.5374, 0.5374, 1.0826, 1.0826, -1.7105, -1.7105])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
def test_upsample_with_conv(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 32)
|
|
||||||
upsample = Upsample1D(channels=32, use_conv=True)
|
|
||||||
with torch.no_grad():
|
|
||||||
upsampled = upsample(sample)
|
|
||||||
|
|
||||||
assert upsampled.shape == (1, 32, 64)
|
|
||||||
output_slice = upsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([-0.4546, -0.5010, -0.2996, 0.2844, 0.4040, -0.7772, -0.6862, 0.3612])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
def test_upsample_with_conv_out_dim(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 32)
|
|
||||||
upsample = Upsample1D(channels=32, use_conv=True, out_channels=64)
|
|
||||||
with torch.no_grad():
|
|
||||||
upsampled = upsample(sample)
|
|
||||||
|
|
||||||
assert upsampled.shape == (1, 64, 64)
|
|
||||||
output_slice = upsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([-0.0516, -0.0972, 0.9740, 1.1883, 0.4539, -0.5285, -0.5851, 0.1152])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
def test_upsample_with_transpose(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 32)
|
|
||||||
upsample = Upsample1D(channels=32, use_conv=False, use_conv_transpose=True)
|
|
||||||
with torch.no_grad():
|
|
||||||
upsampled = upsample(sample)
|
|
||||||
|
|
||||||
assert upsampled.shape == (1, 32, 64)
|
|
||||||
output_slice = upsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([-0.2238, -0.5842, -0.7165, 0.6699, 0.1033, -0.4269, -0.8974, -0.3716])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
|
|
||||||
class Downsample1DBlockTests(unittest.TestCase):
|
|
||||||
def test_downsample_default(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 64)
|
|
||||||
downsample = Downsample1D(channels=32, use_conv=False)
|
|
||||||
with torch.no_grad():
|
|
||||||
downsampled = downsample(sample)
|
|
||||||
|
|
||||||
assert downsampled.shape == (1, 32, 32)
|
|
||||||
output_slice = downsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([-0.8796, 1.0945, -0.3434, 0.2910, 0.3391, -0.4488, -0.9568, -0.2909])
|
|
||||||
max_diff = (output_slice.flatten() - expected_slice).abs().sum().item()
|
|
||||||
assert max_diff <= 1e-3
|
|
||||||
# assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1)
|
|
||||||
|
|
||||||
def test_downsample_with_conv(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 64)
|
|
||||||
downsample = Downsample1D(channels=32, use_conv=True)
|
|
||||||
with torch.no_grad():
|
|
||||||
downsampled = downsample(sample)
|
|
||||||
|
|
||||||
assert downsampled.shape == (1, 32, 32)
|
|
||||||
output_slice = downsampled[0, -1, -8:]
|
|
||||||
|
|
||||||
expected_slice = torch.tensor(
|
|
||||||
[0.1723, 0.0811, -0.6205, -0.3045, 0.0666, -0.2381, -0.0238, 0.2834],
|
|
||||||
)
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
def test_downsample_with_conv_pad1(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 64)
|
|
||||||
downsample = Downsample1D(channels=32, use_conv=True, padding=1)
|
|
||||||
with torch.no_grad():
|
|
||||||
downsampled = downsample(sample)
|
|
||||||
|
|
||||||
assert downsampled.shape == (1, 32, 32)
|
|
||||||
output_slice = downsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([0.1723, 0.0811, -0.6205, -0.3045, 0.0666, -0.2381, -0.0238, 0.2834])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
||||||
def test_downsample_with_conv_out_dim(self):
|
|
||||||
torch.manual_seed(0)
|
|
||||||
sample = torch.randn(1, 32, 64)
|
|
||||||
downsample = Downsample1D(channels=32, use_conv=True, out_channels=16)
|
|
||||||
with torch.no_grad():
|
|
||||||
downsampled = downsample(sample)
|
|
||||||
|
|
||||||
assert downsampled.shape == (1, 16, 32)
|
|
||||||
output_slice = downsampled[0, -1, -8:]
|
|
||||||
expected_slice = torch.tensor([1.1067, -0.5255, -0.4451, 0.0487, -0.3664, -0.7945, -0.4495, -0.3129])
|
|
||||||
assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3)
|
|
||||||
|
|
Loading…
Reference in New Issue