No more use_auth_token=True (#733)
* up * uP * uP * make style * Apply suggestions from code review * up * finish
This commit is contained in:
parent
3dcc75cbd4
commit
78744b6a8f
10
README.md
10
README.md
|
@ -78,7 +78,7 @@ You need to accept the model license before downloading or using the Stable Diff
|
|||
# make sure you're logged in with `huggingface-cli login`
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -114,7 +114,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
|
@ -140,7 +139,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
scheduler=lms,
|
||||
use_auth_token=True
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
|
@ -169,10 +167,9 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|||
model_id_or_path,
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
)
|
||||
# or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4
|
||||
# and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`.
|
||||
# and pass `model_id_or_path="./stable-diffusion-v1-4"`.
|
||||
pipe = pipe.to(device)
|
||||
|
||||
# let's download an initial image
|
||||
|
@ -219,10 +216,9 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|||
model_id_or_path,
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
)
|
||||
# or download via git clone https://huggingface.co/CompVis/stable-diffusion-v1-4
|
||||
# and pass `model_id_or_path="./stable-diffusion-v1-4"` without having to use `use_auth_token=True`.
|
||||
# and pass `model_id_or_path="./stable-diffusion-v1-4"`.
|
||||
pipe = pipe.to(device)
|
||||
|
||||
prompt = "a cat sitting on a bench"
|
||||
|
|
|
@ -101,7 +101,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
|
|||
from torch import autocast
|
||||
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -126,7 +126,7 @@ from diffusers import StableDiffusionImg2ImgPipeline
|
|||
# load the pipeline
|
||||
device = "cuda"
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
# let's download an initial image
|
||||
|
@ -177,7 +177,7 @@ mask_image = download_image(mask_url).resize((512, 512))
|
|||
|
||||
device = "cuda"
|
||||
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
prompt = "a cat sitting on a bench"
|
||||
|
|
|
@ -56,7 +56,7 @@ If you use a CUDA GPU, you can take advantage of `torch.autocast` to perform inf
|
|||
from torch import autocast
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -75,7 +75,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -97,7 +96,6 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
)
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
|
@ -152,8 +150,6 @@ def generate_inputs():
|
|||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
# scheduler=scheduler,
|
||||
use_auth_token=True,
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
@ -218,8 +214,6 @@ class UNet2DConditionOutput:
|
|||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
# scheduler=scheduler,
|
||||
use_auth_token=True,
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
).to("cuda")
|
||||
|
|
|
@ -31,7 +31,7 @@ We recommend to "prime" the pipeline using an additional one-time pass through i
|
|||
# make sure you're logged in with `huggingface-cli login`
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("mps")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
|
|
@ -31,7 +31,6 @@ pipe = StableDiffusionOnnxPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="onnx",
|
||||
provider="CUDAExecutionProvider",
|
||||
use_auth_token=True,
|
||||
)
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
|
|
@ -25,7 +25,7 @@ from diffusers import StableDiffusionImg2ImgPipeline
|
|||
# load the pipeline
|
||||
device = "cuda"
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
# let's download an initial image
|
||||
|
|
|
@ -37,7 +37,7 @@ mask_image = download_image(mask_url).resize((512, 512))
|
|||
|
||||
device = "cuda"
|
||||
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
).to(device)
|
||||
|
||||
prompt = "a cat sitting on a bench"
|
||||
|
|
|
@ -83,7 +83,6 @@ def main(args):
|
|||
args.dataset_name,
|
||||
args.dataset_config_name,
|
||||
cache_dir=args.cache_dir,
|
||||
use_auth_token=True if args.use_auth_token else None,
|
||||
split="train",
|
||||
)
|
||||
else:
|
||||
|
@ -222,7 +221,6 @@ if __name__ == "__main__":
|
|||
parser.add_argument("--ema_power", type=float, default=3 / 4)
|
||||
parser.add_argument("--ema_max_decay", type=float, default=0.9999)
|
||||
parser.add_argument("--push_to_hub", action="store_true")
|
||||
parser.add_argument("--use_auth_token", action="store_true")
|
||||
parser.add_argument("--hub_token", type=str, default=None)
|
||||
parser.add_argument("--hub_model_id", type=str, default=None)
|
||||
parser.add_argument("--hub_private_repo", action="store_true")
|
||||
|
|
|
@ -70,7 +70,7 @@ def onnx_export(
|
|||
|
||||
@torch.no_grad()
|
||||
def convert_models(model_path: str, output_path: str, opset: int):
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(model_path, use_auth_token=True)
|
||||
pipeline = StableDiffusionPipeline.from_pretrained(model_path)
|
||||
output_path = Path(output_path)
|
||||
|
||||
# TEXT ENCODER
|
||||
|
|
2
setup.py
2
setup.py
|
@ -86,7 +86,7 @@ _deps = [
|
|||
"flake8>=3.8.3",
|
||||
"flax>=0.4.1",
|
||||
"hf-doc-builder>=0.3.0",
|
||||
"huggingface-hub>=0.9.1",
|
||||
"huggingface-hub>=0.10.0",
|
||||
"importlib_metadata",
|
||||
"isort>=5.5.4",
|
||||
"jax>=0.2.8,!=0.3.2,<=0.3.6",
|
||||
|
|
|
@ -145,7 +145,8 @@ class ConfigMixin:
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True`` is required when you want to use a private model.
|
||||
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
|
||||
models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -238,7 +239,7 @@ class ConfigMixin:
|
|||
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier"
|
||||
" listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a"
|
||||
" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli"
|
||||
" login` and pass `use_auth_token=True`."
|
||||
" login`."
|
||||
)
|
||||
except RevisionNotFoundError:
|
||||
raise EnvironmentError(
|
||||
|
|
|
@ -10,7 +10,7 @@ deps = {
|
|||
"flake8": "flake8>=3.8.3",
|
||||
"flax": "flax>=0.4.1",
|
||||
"hf-doc-builder": "hf-doc-builder>=0.3.0",
|
||||
"huggingface-hub": "huggingface-hub>=0.9.1",
|
||||
"huggingface-hub": "huggingface-hub>=0.10.0",
|
||||
"importlib_metadata": "importlib_metadata",
|
||||
"isort": "isort>=5.5.4",
|
||||
"jax": "jax>=0.2.8,!=0.3.2,<=0.3.6",
|
||||
|
|
|
@ -198,7 +198,7 @@ def get_cached_module_file(
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True` is required when you want to use a private model.
|
||||
Passing `` is required when you want to use a private model.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -306,7 +306,7 @@ def get_class_from_dynamic_module(
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True` is required when you want to use a private model.
|
||||
Passing `` is required when you want to use a private model.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
|
|
@ -357,7 +357,7 @@ class FlaxModelMixin:
|
|||
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
|
||||
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
|
||||
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
|
||||
"login` and pass `use_auth_token=True`."
|
||||
"login`."
|
||||
)
|
||||
except RevisionNotFoundError:
|
||||
raise EnvironmentError(
|
||||
|
|
|
@ -270,7 +270,8 @@ class ModelMixin(torch.nn.Module):
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True`` is required when you want to use a private model.
|
||||
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
|
||||
models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -338,7 +339,7 @@ class ModelMixin(torch.nn.Module):
|
|||
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
|
||||
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
|
||||
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
|
||||
"login` and pass `use_auth_token=True`."
|
||||
"login`."
|
||||
)
|
||||
except RevisionNotFoundError:
|
||||
raise EnvironmentError(
|
||||
|
|
|
@ -249,8 +249,8 @@ class FlaxDiffusionPipeline(ConfigMixin):
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True`` is required when you want to use a private model, *e.g.*
|
||||
`"CompVis/stable-diffusion-v1-4"`
|
||||
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
|
||||
models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"CompVis/stable-diffusion-v1-4"`
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -272,15 +272,13 @@ class FlaxDiffusionPipeline(ConfigMixin):
|
|||
>>> # Download pipeline that requires an authorization token
|
||||
>>> # For more information on access tokens, please refer to this section
|
||||
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
|
||||
>>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
>>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
|
||||
>>> # Download pipeline, but overwrite scheduler
|
||||
>>> from diffusers import LMSDiscreteScheduler
|
||||
|
||||
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
|
||||
>>> pipeline = FlaxDiffusionPipeline.from_pretrained(
|
||||
... "CompVis/stable-diffusion-v1-4", scheduler=scheduler, use_auth_token=True
|
||||
... )
|
||||
>>> pipeline = FlaxDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler)
|
||||
```
|
||||
"""
|
||||
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
||||
|
|
|
@ -240,8 +240,8 @@ class DiffusionPipeline(ConfigMixin):
|
|||
|
||||
<Tip>
|
||||
|
||||
Passing `use_auth_token=True`` is required when you want to use a private model, *e.g.*
|
||||
`"CompVis/stable-diffusion-v1-4"`
|
||||
It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
|
||||
models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"CompVis/stable-diffusion-v1-4"`
|
||||
|
||||
</Tip>
|
||||
|
||||
|
@ -263,15 +263,13 @@ class DiffusionPipeline(ConfigMixin):
|
|||
>>> # Download pipeline that requires an authorization token
|
||||
>>> # For more information on access tokens, please refer to this section
|
||||
>>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
|
||||
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
|
||||
>>> # Download pipeline, but overwrite scheduler
|
||||
>>> from diffusers import LMSDiscreteScheduler
|
||||
|
||||
>>> scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
|
||||
>>> pipeline = DiffusionPipeline.from_pretrained(
|
||||
... "CompVis/stable-diffusion-v1-4", scheduler=scheduler, use_auth_token=True
|
||||
... )
|
||||
>>> pipeline = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", scheduler=scheduler)
|
||||
```
|
||||
"""
|
||||
cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
|
||||
|
|
|
@ -88,7 +88,7 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
|
|||
# make sure you're logged in with `huggingface-cli login`
|
||||
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -114,7 +114,6 @@ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
).to(device)
|
||||
|
||||
# let's download an initial image
|
||||
|
@ -164,7 +163,6 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
|||
"CompVis/stable-diffusion-v1-4",
|
||||
revision="fp16",
|
||||
torch_dtype=torch.float16,
|
||||
use_auth_token=True
|
||||
).to(device)
|
||||
|
||||
prompt = "a cat sitting on a bench"
|
||||
|
|
|
@ -61,7 +61,7 @@ pipe = StableDiffusionPipeline.from_pretrained("./stable-diffusion-v1-4")
|
|||
# make sure you're logged in with `huggingface-cli login`
|
||||
from diffusers import StableDiffusionPipeline
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True)
|
||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||
pipe = pipe.to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -81,7 +81,6 @@ scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="sca
|
|||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
scheduler=scheduler,
|
||||
use_auth_token=True
|
||||
).to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
@ -105,7 +104,6 @@ lms = LMSDiscreteScheduler(
|
|||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4",
|
||||
scheduler=lms,
|
||||
use_auth_token=True
|
||||
).to("cuda")
|
||||
|
||||
prompt = "a photo of an astronaut riding a horse on mars"
|
||||
|
|
|
@ -1001,7 +1001,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
|
||||
def test_stable_diffusion(self):
|
||||
# make sure here that pndm scheduler skips prk
|
||||
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True)
|
||||
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
|
||||
sd_pipe = sd_pipe.to(torch_device)
|
||||
sd_pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
|
@ -1023,7 +1023,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
@slow
|
||||
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
|
||||
def test_stable_diffusion_fast_ddim(self):
|
||||
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True)
|
||||
sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
|
||||
sd_pipe = sd_pipe.to(torch_device)
|
||||
sd_pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
|
@ -1158,9 +1158,9 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
@unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
|
||||
def test_lms_stable_diffusion_pipeline(self):
|
||||
model_id = "CompVis/stable-diffusion-v1-1"
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True).to(torch_device)
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_id).to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler", use_auth_token=True)
|
||||
scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler")
|
||||
pipe.scheduler = scheduler
|
||||
|
||||
prompt = "a photograph of an astronaut riding a horse"
|
||||
|
@ -1179,9 +1179,9 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
def test_stable_diffusion_memory_chunking(self):
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
model_id = "CompVis/stable-diffusion-v1-4"
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
).to(torch_device)
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
|
||||
torch_device
|
||||
)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
prompt = "a photograph of an astronaut riding a horse"
|
||||
|
@ -1219,9 +1219,9 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
def test_stable_diffusion_text2img_pipeline_fp16(self):
|
||||
torch.cuda.reset_peak_memory_stats()
|
||||
model_id = "CompVis/stable-diffusion-v1-4"
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True
|
||||
).to(torch_device)
|
||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
|
||||
torch_device
|
||||
)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
prompt = "a photograph of an astronaut riding a horse"
|
||||
|
@ -1258,7 +1258,6 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
model_id,
|
||||
safety_checker=self.dummy_safety_checker,
|
||||
use_auth_token=True,
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1291,7 +1290,6 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
||||
model_id,
|
||||
safety_checker=self.dummy_safety_checker,
|
||||
use_auth_token=True,
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1335,7 +1333,6 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
model_id,
|
||||
scheduler=lms,
|
||||
safety_checker=self.dummy_safety_checker,
|
||||
use_auth_token=True,
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1379,7 +1376,6 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
model_id,
|
||||
safety_checker=self.dummy_safety_checker,
|
||||
use_auth_token=True,
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1426,7 +1422,6 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
model_id,
|
||||
scheduler=lms,
|
||||
safety_checker=self.dummy_safety_checker,
|
||||
use_auth_token=True,
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1452,7 +1447,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
@slow
|
||||
def test_stable_diffusion_onnx(self):
|
||||
sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider", use_auth_token=True
|
||||
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
|
||||
)
|
||||
|
||||
prompt = "A painting of a squirrel eating a burger"
|
||||
|
@ -1487,7 +1482,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
test_callback_fn.has_been_called = False
|
||||
|
||||
pipe = StableDiffusionPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1533,7 +1528,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
init_image = init_image.resize((768, 512))
|
||||
|
||||
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1586,7 +1581,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
)
|
||||
|
||||
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16
|
||||
"CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
|
||||
)
|
||||
pipe.to(torch_device)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
@ -1629,7 +1624,7 @@ class PipelineTesterMixin(unittest.TestCase):
|
|||
test_callback_fn.has_been_called = False
|
||||
|
||||
pipe = StableDiffusionOnnxPipeline.from_pretrained(
|
||||
"CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="onnx", provider="CPUExecutionProvider"
|
||||
"CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
|
||||
)
|
||||
pipe.set_progress_bar_config(disable=None)
|
||||
|
||||
|
|
Loading…
Reference in New Issue