parent
5493524b71
commit
4deb16e830
29
README.md
29
README.md
|
@ -74,11 +74,14 @@ You need to accept the model license before downloading or using the Stable Diff
|
||||||
|
|
||||||
### Text-to-Image generation with Stable Diffusion
|
### Text-to-Image generation with Stable Diffusion
|
||||||
|
|
||||||
|
We recommend using the model in [half-precision (`fp16`)](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/) as it gives almost always the same results as full
|
||||||
|
precision while being roughly twice as fast and requiring half the amount of GPU RAM.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# make sure you're logged in with `huggingface-cli login`
|
# make sure you're logged in with `huggingface-cli login`
|
||||||
from diffusers import StableDiffusionPipeline
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_type=torch.float16, revision="fp16")
|
||||||
pipe = pipe.to("cuda")
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
prompt = "a photo of an astronaut riding a horse on mars"
|
prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
|
@ -105,8 +108,8 @@ prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
image = pipe(prompt).images[0]
|
image = pipe(prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
If you are limited by GPU memory, you might want to consider using the model in `fp16` as
|
If you are limited by GPU memory, you might want to consider chunking the attention computation in addition
|
||||||
well as chunking the attention computation.
|
to using `fp16`.
|
||||||
The following snippet should result in less than 4GB VRAM.
|
The following snippet should result in less than 4GB VRAM.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -122,7 +125,7 @@ pipe.enable_attention_slicing()
|
||||||
image = pipe(prompt).images[0]
|
image = pipe(prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, if you wish to use a different scheduler, you can simply instantiate
|
If you wish to use a different scheduler, you can simply instantiate
|
||||||
it before the pipeline and pass it to `from_pretrained`.
|
it before the pipeline and pass it to `from_pretrained`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -148,6 +151,24 @@ image = pipe(prompt).images[0]
|
||||||
image.save("astronaut_rides_horse.png")
|
image.save("astronaut_rides_horse.png")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to run Stable Diffusion on CPU or you want to have maximum precision on GPU,
|
||||||
|
please run the model in the default *full-precision* setting:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# make sure you're logged in with `huggingface-cli login`
|
||||||
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
|
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||||
|
|
||||||
|
# disable the following line if you run on CPU
|
||||||
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
|
prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
|
image = pipe(prompt).images[0]
|
||||||
|
|
||||||
|
image.save("astronaut_rides_horse.png")
|
||||||
|
```
|
||||||
|
|
||||||
### Image-to-Image text-guided generation with Stable Diffusion
|
### Image-to-Image text-guided generation with Stable Diffusion
|
||||||
|
|
||||||
The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
|
The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
|
||||||
|
|
|
@ -98,14 +98,12 @@ logic including pre-processing, an unrolled diffusion loop, and post-processing
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# make sure you're logged in with `huggingface-cli login`
|
# make sure you're logged in with `huggingface-cli login`
|
||||||
from torch import autocast
|
|
||||||
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
|
||||||
|
|
||||||
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
|
||||||
pipe = pipe.to("cuda")
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
prompt = "a photo of an astronaut riding a horse on mars"
|
prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
with autocast("cuda"):
|
|
||||||
image = pipe(prompt).images[0]
|
image = pipe(prompt).images[0]
|
||||||
|
|
||||||
image.save("astronaut_rides_horse.png")
|
image.save("astronaut_rides_horse.png")
|
||||||
|
@ -116,7 +114,6 @@ image.save("astronaut_rides_horse.png")
|
||||||
The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
|
The `StableDiffusionImg2ImgPipeline` lets you pass a text prompt and an initial image to condition the generation of new images.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from torch import autocast
|
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
@ -138,7 +135,6 @@ init_image = init_image.resize((768, 512))
|
||||||
|
|
||||||
prompt = "A fantasy landscape, trending on artstation"
|
prompt = "A fantasy landscape, trending on artstation"
|
||||||
|
|
||||||
with autocast("cuda"):
|
|
||||||
images = pipe(prompt=prompt, init_image=init_image, strength=0.75, guidance_scale=7.5).images
|
images = pipe(prompt=prompt, init_image=init_image, strength=0.75, guidance_scale=7.5).images
|
||||||
|
|
||||||
images[0].save("fantasy_landscape.png")
|
images[0].save("fantasy_landscape.png")
|
||||||
|
@ -157,7 +153,6 @@ The `StableDiffusionInpaintPipeline` lets you edit specific parts of an image by
|
||||||
```python
|
```python
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from torch import autocast
|
|
||||||
import requests
|
import requests
|
||||||
import PIL
|
import PIL
|
||||||
|
|
||||||
|
@ -181,7 +176,6 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||||
).to(device)
|
).to(device)
|
||||||
|
|
||||||
prompt = "a cat sitting on a bench"
|
prompt = "a cat sitting on a bench"
|
||||||
with autocast("cuda"):
|
|
||||||
images = pipe(prompt=prompt, init_image=init_image, mask_image=mask_image, strength=0.75).images
|
images = pipe(prompt=prompt, init_image=init_image, mask_image=mask_image, strength=0.75).images
|
||||||
|
|
||||||
images[0].save("cat_on_bench.png")
|
images[0].save("cat_on_bench.png")
|
||||||
|
|
|
@ -68,7 +68,7 @@ Despite the precision loss, in our experience the final image results look the s
|
||||||
|
|
||||||
## Half precision weights
|
## Half precision weights
|
||||||
|
|
||||||
To save more GPU memory, you can load the model weights directly in half precision. This involves loading the float16 version of the weights, which was saved to a branch named `fp16`, and telling PyTorch to use the `float16` type when loading them:
|
To save more GPU memory and get even more speed, you can load and run the model weights directly in half precision. This involves loading the float16 version of the weights, which was saved to a branch named `fp16`, and telling PyTorch to use the `float16` type when loading them:
|
||||||
|
|
||||||
```Python
|
```Python
|
||||||
pipe = StableDiffusionPipeline.from_pretrained(
|
pipe = StableDiffusionPipeline.from_pretrained(
|
||||||
|
@ -76,6 +76,10 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
||||||
revision="fp16",
|
revision="fp16",
|
||||||
torch_dtype=torch.float16,
|
torch_dtype=torch.float16,
|
||||||
)
|
)
|
||||||
|
pipe = pipe.to("cuda")
|
||||||
|
|
||||||
|
prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
|
image = pipe(prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Sliced attention for additional memory savings
|
## Sliced attention for additional memory savings
|
||||||
|
@ -101,7 +105,6 @@ pipe = pipe.to("cuda")
|
||||||
|
|
||||||
prompt = "a photo of an astronaut riding a horse on mars"
|
prompt = "a photo of an astronaut riding a horse on mars"
|
||||||
pipe.enable_attention_slicing()
|
pipe.enable_attention_slicing()
|
||||||
with torch.autocast("cuda"):
|
|
||||||
image = pipe(prompt).images[0]
|
image = pipe(prompt).images[0]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -109,7 +109,6 @@ A full training run takes ~1 hour on one V100 GPU.
|
||||||
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
|
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from torch import autocast
|
|
||||||
from diffusers import StableDiffusionPipeline
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
model_id = "path-to-your-trained-model"
|
model_id = "path-to-your-trained-model"
|
||||||
|
@ -117,7 +116,6 @@ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float
|
||||||
|
|
||||||
prompt = "A <cat-toy> backpack"
|
prompt = "A <cat-toy> backpack"
|
||||||
|
|
||||||
with autocast("cuda"):
|
|
||||||
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
||||||
|
|
||||||
image.save("cat-backpack.png")
|
image.save("cat-backpack.png")
|
||||||
|
|
|
@ -15,7 +15,6 @@ specific language governing permissions and limitations under the License.
|
||||||
The [`StableDiffusionImg2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images.
|
The [`StableDiffusionImg2ImgPipeline`] lets you pass a text prompt and an initial image to condition the generation of new images.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from torch import autocast
|
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
@ -37,7 +36,6 @@ init_image = init_image.resize((768, 512))
|
||||||
|
|
||||||
prompt = "A fantasy landscape, trending on artstation"
|
prompt = "A fantasy landscape, trending on artstation"
|
||||||
|
|
||||||
with autocast("cuda"):
|
|
||||||
images = pipe(prompt=prompt, init_image=init_image, strength=0.75, guidance_scale=7.5).images
|
images = pipe(prompt=prompt, init_image=init_image, strength=0.75, guidance_scale=7.5).images
|
||||||
|
|
||||||
images[0].save("fantasy_landscape.png")
|
images[0].save("fantasy_landscape.png")
|
||||||
|
|
|
@ -17,7 +17,6 @@ The [`StableDiffusionInpaintPipeline`] lets you edit specific parts of an image
|
||||||
```python
|
```python
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
from torch import autocast
|
|
||||||
import requests
|
import requests
|
||||||
import PIL
|
import PIL
|
||||||
|
|
||||||
|
@ -41,7 +40,6 @@ pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
||||||
).to(device)
|
).to(device)
|
||||||
|
|
||||||
prompt = "a cat sitting on a bench"
|
prompt = "a cat sitting on a bench"
|
||||||
with autocast("cuda"):
|
|
||||||
images = pipe(prompt=prompt, init_image=init_image, mask_image=mask_image, strength=0.75).images
|
images = pipe(prompt=prompt, init_image=init_image, mask_image=mask_image, strength=0.75).images
|
||||||
|
|
||||||
images[0].save("cat_on_bench.png")
|
images[0].save("cat_on_bench.png")
|
||||||
|
|
|
@ -125,8 +125,6 @@ accelerate launch train_dreambooth.py \
|
||||||
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt.
|
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
||||||
from torch import autocast
|
|
||||||
from diffusers import StableDiffusionPipeline
|
from diffusers import StableDiffusionPipeline
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
|
@ -134,8 +132,6 @@ model_id = "path-to-your-trained-model"
|
||||||
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
|
||||||
|
|
||||||
prompt = "A photo of sks dog in a bucket"
|
prompt = "A photo of sks dog in a bucket"
|
||||||
|
|
||||||
with autocast("cuda"):
|
|
||||||
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
||||||
|
|
||||||
image.save("dog-bucket.png")
|
image.save("dog-bucket.png")
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
import argparse
|
import argparse
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
from contextlib import nullcontext
|
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
@ -346,11 +345,9 @@ def main():
|
||||||
sample_dataloader = accelerator.prepare(sample_dataloader)
|
sample_dataloader = accelerator.prepare(sample_dataloader)
|
||||||
pipeline.to(accelerator.device)
|
pipeline.to(accelerator.device)
|
||||||
|
|
||||||
context = torch.autocast("cuda") if accelerator.device.type == "cuda" else nullcontext
|
|
||||||
for example in tqdm(
|
for example in tqdm(
|
||||||
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
|
||||||
):
|
):
|
||||||
with context:
|
|
||||||
images = pipeline(example["prompt"]).images
|
images = pipeline(example["prompt"]).images
|
||||||
|
|
||||||
for i, image in enumerate(images):
|
for i, image in enumerate(images):
|
||||||
|
|
|
@ -74,8 +74,6 @@ A full training run takes ~1 hour on one V100 GPU.
|
||||||
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
|
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
||||||
from torch import autocast
|
|
||||||
from diffusers import StableDiffusionPipeline
|
from diffusers import StableDiffusionPipeline
|
||||||
|
|
||||||
model_id = "path-to-your-trained-model"
|
model_id = "path-to-your-trained-model"
|
||||||
|
@ -83,7 +81,6 @@ pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float1
|
||||||
|
|
||||||
prompt = "A <cat-toy> backpack"
|
prompt = "A <cat-toy> backpack"
|
||||||
|
|
||||||
with autocast("cuda"):
|
|
||||||
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
|
||||||
|
|
||||||
image.save("cat-backpack.png")
|
image.save("cat-backpack.png")
|
||||||
|
|
Loading…
Reference in New Issue