diff --git a/tests/models/test_models_unet_3d_condition.py b/tests/models/test_models_unet_3d_condition.py index a92b8edd..ea71ae4a 100644 --- a/tests/models/test_models_unet_3d_condition.py +++ b/tests/models/test_models_unet_3d_condition.py @@ -23,6 +23,7 @@ from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.utils import ( floats_tensor, logging, + skip_mps, torch_device, ) from diffusers.utils.import_utils import is_xformers_available @@ -60,6 +61,7 @@ def create_lora_layers(model): return lora_attn_procs +@skip_mps class UNet3DConditionModelTests(ModelTesterMixin, unittest.TestCase): model_class = UNet3DConditionModel diff --git a/tests/pipelines/text_to_video/test_text_to_video.py b/tests/pipelines/text_to_video/test_text_to_video.py index eb43a360..e4331fda 100644 --- a/tests/pipelines/text_to_video/test_text_to_video.py +++ b/tests/pipelines/text_to_video/test_text_to_video.py @@ -35,6 +35,7 @@ from ...test_pipelines_common import PipelineTesterMixin torch.backends.cuda.matmul.allow_tf32 = False +@skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS @@ -155,12 +156,12 @@ class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): def test_num_images_per_prompt(self): pass - @skip_mps def test_progress_bar(self): return super().test_progress_bar() @slow +@skip_mps class TextToVideoSDPipelineSlowTests(unittest.TestCase): def test_full_model(self): expected_video = load_numpy(