Merge branch 'master' into dev/deepdanbooru
This commit is contained in:
commit
0ec80f0125
|
@ -127,7 +127,7 @@ if not is_installed("gfpgan"):
|
||||||
if not is_installed("clip"):
|
if not is_installed("clip"):
|
||||||
run_pip(f"install {clip_package}", "clip")
|
run_pip(f"install {clip_package}", "clip")
|
||||||
|
|
||||||
if not is_installed("xformers") and xformers:
|
if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
|
||||||
if platform.system() == "Windows":
|
if platform.system() == "Windows":
|
||||||
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
|
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
|
||||||
elif platform.system() == "Linux":
|
elif platform.system() == "Linux":
|
||||||
|
|
|
@ -13,13 +13,14 @@ import lark
|
||||||
|
|
||||||
schedule_parser = lark.Lark(r"""
|
schedule_parser = lark.Lark(r"""
|
||||||
!start: (prompt | /[][():]/+)*
|
!start: (prompt | /[][():]/+)*
|
||||||
prompt: (emphasized | scheduled | plain | WHITESPACE)*
|
prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)*
|
||||||
!emphasized: "(" prompt ")"
|
!emphasized: "(" prompt ")"
|
||||||
| "(" prompt ":" prompt ")"
|
| "(" prompt ":" prompt ")"
|
||||||
| "[" prompt "]"
|
| "[" prompt "]"
|
||||||
scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
|
scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
|
||||||
|
alternate: "[" prompt ("|" prompt)+ "]"
|
||||||
WHITESPACE: /\s+/
|
WHITESPACE: /\s+/
|
||||||
plain: /([^\\\[\]():]|\\.)+/
|
plain: /([^\\\[\]():|]|\\.)+/
|
||||||
%import common.SIGNED_NUMBER -> NUMBER
|
%import common.SIGNED_NUMBER -> NUMBER
|
||||||
""")
|
""")
|
||||||
|
|
||||||
|
@ -59,6 +60,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
|
||||||
tree.children[-1] *= steps
|
tree.children[-1] *= steps
|
||||||
tree.children[-1] = min(steps, int(tree.children[-1]))
|
tree.children[-1] = min(steps, int(tree.children[-1]))
|
||||||
l.append(tree.children[-1])
|
l.append(tree.children[-1])
|
||||||
|
def alternate(self, tree):
|
||||||
|
l.extend(range(1, steps+1))
|
||||||
CollectSteps().visit(tree)
|
CollectSteps().visit(tree)
|
||||||
return sorted(set(l))
|
return sorted(set(l))
|
||||||
|
|
||||||
|
@ -67,6 +70,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
|
||||||
def scheduled(self, args):
|
def scheduled(self, args):
|
||||||
before, after, _, when = args
|
before, after, _, when = args
|
||||||
yield before or () if step <= when else after
|
yield before or () if step <= when else after
|
||||||
|
def alternate(self, args):
|
||||||
|
yield next(args[(step - 1)%len(args)])
|
||||||
def start(self, args):
|
def start(self, args):
|
||||||
def flatten(x):
|
def flatten(x):
|
||||||
if type(x) == str:
|
if type(x) == str:
|
||||||
|
|
|
@ -22,12 +22,16 @@ def apply_optimizations():
|
||||||
undo_optimizations()
|
undo_optimizations()
|
||||||
|
|
||||||
ldm.modules.diffusionmodules.model.nonlinearity = silu
|
ldm.modules.diffusionmodules.model.nonlinearity = silu
|
||||||
if cmd_opts.xformers and shared.xformers_available and not torch.version.hip:
|
|
||||||
|
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)):
|
||||||
|
print("Applying xformers cross attention optimization.")
|
||||||
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
|
||||||
elif cmd_opts.opt_split_attention_v1:
|
elif cmd_opts.opt_split_attention_v1:
|
||||||
|
print("Applying v1 cross attention optimization.")
|
||||||
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
|
||||||
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
|
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
|
||||||
|
print("Applying cross attention optimization.")
|
||||||
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
|
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
|
||||||
|
|
||||||
|
|
|
@ -211,6 +211,7 @@ def cross_attention_attnblock_forward(self, x):
|
||||||
return h3
|
return h3
|
||||||
|
|
||||||
def xformers_attnblock_forward(self, x):
|
def xformers_attnblock_forward(self, x):
|
||||||
|
try:
|
||||||
h_ = x
|
h_ = x
|
||||||
h_ = self.norm(h_)
|
h_ = self.norm(h_)
|
||||||
q1 = self.q(h_).contiguous()
|
q1 = self.q(h_).contiguous()
|
||||||
|
@ -218,4 +219,6 @@ def xformers_attnblock_forward(self, x):
|
||||||
v = self.v(h_).contiguous()
|
v = self.v(h_).contiguous()
|
||||||
out = xformers.ops.memory_efficient_attention(q1, k1, v)
|
out = xformers.ops.memory_efficient_attention(q1, k1, v)
|
||||||
out = self.proj_out(out)
|
out = self.proj_out(out)
|
||||||
return x+out
|
return x + out
|
||||||
|
except NotImplementedError:
|
||||||
|
return cross_attention_attnblock_forward(self, x)
|
||||||
|
|
|
@ -44,6 +44,7 @@ parser.add_argument("--scunet-models-path", type=str, help="Path to directory wi
|
||||||
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
|
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
|
||||||
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
|
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
|
||||||
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
|
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
|
||||||
|
parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
|
||||||
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
|
parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
|
||||||
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
|
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
|
||||||
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
|
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
|
||||||
|
|
6
webui.py
6
webui.py
|
@ -5,6 +5,8 @@ import importlib
|
||||||
import signal
|
import signal
|
||||||
import threading
|
import threading
|
||||||
|
|
||||||
|
from fastapi.middleware.gzip import GZipMiddleware
|
||||||
|
|
||||||
from modules.paths import script_path
|
from modules.paths import script_path
|
||||||
|
|
||||||
from modules import devices, sd_samplers
|
from modules import devices, sd_samplers
|
||||||
|
@ -93,7 +95,7 @@ def webui():
|
||||||
|
|
||||||
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
|
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
|
||||||
|
|
||||||
demo.launch(
|
app,local_url,share_url = demo.launch(
|
||||||
share=cmd_opts.share,
|
share=cmd_opts.share,
|
||||||
server_name="0.0.0.0" if cmd_opts.listen else None,
|
server_name="0.0.0.0" if cmd_opts.listen else None,
|
||||||
server_port=cmd_opts.port,
|
server_port=cmd_opts.port,
|
||||||
|
@ -103,6 +105,8 @@ def webui():
|
||||||
prevent_thread_lock=True
|
prevent_thread_lock=True
|
||||||
)
|
)
|
||||||
|
|
||||||
|
app.add_middleware(GZipMiddleware,minimum_size=1000)
|
||||||
|
|
||||||
while 1:
|
while 1:
|
||||||
time.sleep(0.5)
|
time.sleep(0.5)
|
||||||
if getattr(demo, 'do_restart', False):
|
if getattr(demo, 'do_restart', False):
|
||||||
|
|
Loading…
Reference in New Issue