Attention slicing (#407)

uup
This commit is contained in:
Patrick von Platen 2022-09-07 22:48:13 +02:00 committed by GitHub
parent 1a431ae886
commit 8ff777d3c1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 2 additions and 2 deletions

View File

@ -87,8 +87,8 @@ class StableDiffusionPipeline(DiffusionPipeline):
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
back to computing attention in one step.
"""
# set slice_size = `None` to disable `set_attention_slice`
self.enable_attention_slice(None)
# set slice_size = `None` to disable `attention slicing`
self.enable_attention_slicing(None)
@torch.no_grad()
def __call__(