Merge branch 'dev' into test-fp8
This commit is contained in:
commit
3d341ebc7d
|
@ -20,7 +20,7 @@ jobs:
|
|||
# not to have GHA download an (at the time of writing) 4 GB cache
|
||||
# of PyTorch and other dependencies.
|
||||
- name: Install Ruff
|
||||
run: pip install ruff==0.0.272
|
||||
run: pip install ruff==0.1.6
|
||||
- name: Run Ruff
|
||||
run: ruff .
|
||||
lint-js:
|
||||
|
|
|
@ -121,7 +121,9 @@ Alternatively, use online services (like Google Colab):
|
|||
# Debian-based:
|
||||
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
|
||||
# Red Hat-based:
|
||||
sudo dnf install wget git python3
|
||||
sudo dnf install wget git python3 gperftools-libs libglvnd-glx
|
||||
# openSUSE-based:
|
||||
sudo zypper install wget git python3 libtcmalloc4 libglvnd
|
||||
# Arch-based:
|
||||
sudo pacman -S wget git python3
|
||||
```
|
||||
|
@ -174,5 +176,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al
|
|||
- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd
|
||||
- LyCORIS - KohakuBlueleaf
|
||||
- Restart sampling - lambertae - https://github.com/Newbeeer/diffusion_restart_sampling
|
||||
- Hypertile - tfernd - https://github.com/tfernd/HyperTile
|
||||
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
|
||||
- (You)
|
||||
|
|
|
@ -0,0 +1,345 @@
|
|||
"""
|
||||
Hypertile module for splitting attention layers in SD-1.5 U-Net and SD-1.5 VAE
|
||||
Warn: The patch works well only if the input image has a width and height that are multiples of 128
|
||||
Original author: @tfernd Github: https://github.com/tfernd/HyperTile
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable
|
||||
|
||||
from functools import wraps, cache
|
||||
|
||||
import math
|
||||
import torch.nn as nn
|
||||
import random
|
||||
|
||||
from einops import rearrange
|
||||
|
||||
|
||||
@dataclass
|
||||
class HypertileParams:
|
||||
depth = 0
|
||||
layer_name = ""
|
||||
tile_size: int = 0
|
||||
swap_size: int = 0
|
||||
aspect_ratio: float = 1.0
|
||||
forward = None
|
||||
enabled = False
|
||||
|
||||
|
||||
|
||||
# TODO add SD-XL layers
|
||||
DEPTH_LAYERS = {
|
||||
0: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.0.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.0.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.1.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.2.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.9.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.10.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.11.1.transformer_blocks.0.attn1",
|
||||
# SD 1.5 VAE
|
||||
"decoder.mid_block.attentions.0",
|
||||
"decoder.mid.attn_1",
|
||||
],
|
||||
1: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.2.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.6.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.8.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
2: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.1.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
3: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"mid_block.attentions.0.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"middle_block.1.transformer_blocks.0.attn1",
|
||||
],
|
||||
}
|
||||
# XL layers, thanks for GitHub@gel-crabs for the help
|
||||
DEPTH_LAYERS_XL = {
|
||||
0: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"down_blocks.0.attentions.0.transformer_blocks.0.attn1",
|
||||
"down_blocks.0.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.0.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.1.transformer_blocks.0.attn1",
|
||||
"up_blocks.3.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.0.attn1",
|
||||
# SD 1.5 VAE
|
||||
"decoder.mid_block.attentions.0",
|
||||
"decoder.mid.attn_1",
|
||||
],
|
||||
1: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
#"down_blocks.1.attentions.0.transformer_blocks.0.attn1",
|
||||
#"down_blocks.1.attentions.1.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.0.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.1.transformer_blocks.0.attn1",
|
||||
#"up_blocks.2.attentions.2.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"input_blocks.4.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.5.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.3.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.4.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.5.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.0.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.0.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.1.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.1.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.2.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.2.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.2.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.3.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.3.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.3.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.4.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.4.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.4.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.5.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.5.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.5.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.6.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.6.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.6.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.7.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.7.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.7.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.8.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.8.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.8.attn1",
|
||||
"input_blocks.7.1.transformer_blocks.9.attn1",
|
||||
"input_blocks.8.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.0.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.1.1.transformer_blocks.9.attn1",
|
||||
"output_blocks.2.1.transformer_blocks.9.attn1",
|
||||
],
|
||||
2: [
|
||||
# SD 1.5 U-Net (diffusers)
|
||||
"mid_block.attentions.0.transformer_blocks.0.attn1",
|
||||
# SD 1.5 U-Net (ldm)
|
||||
"middle_block.1.transformer_blocks.0.attn1",
|
||||
"middle_block.1.transformer_blocks.1.attn1",
|
||||
"middle_block.1.transformer_blocks.2.attn1",
|
||||
"middle_block.1.transformer_blocks.3.attn1",
|
||||
"middle_block.1.transformer_blocks.4.attn1",
|
||||
"middle_block.1.transformer_blocks.5.attn1",
|
||||
"middle_block.1.transformer_blocks.6.attn1",
|
||||
"middle_block.1.transformer_blocks.7.attn1",
|
||||
"middle_block.1.transformer_blocks.8.attn1",
|
||||
"middle_block.1.transformer_blocks.9.attn1",
|
||||
],
|
||||
3 : [] # TODO - separate layers for SD-XL
|
||||
}
|
||||
|
||||
|
||||
RNG_INSTANCE = random.Random()
|
||||
|
||||
|
||||
def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
|
||||
"""
|
||||
Returns a random divisor of value that
|
||||
x * min_value <= value
|
||||
if max_options is 1, the behavior is deterministic
|
||||
"""
|
||||
min_value = min(min_value, value)
|
||||
|
||||
# All big divisors of value (inclusive)
|
||||
divisors = [i for i in range(min_value, value + 1) if value % i == 0] # divisors in small -> big order
|
||||
|
||||
ns = [value // i for i in divisors[:max_options]] # has at least 1 element # big -> small order
|
||||
|
||||
idx = RNG_INSTANCE.randint(0, len(ns) - 1)
|
||||
|
||||
return ns[idx]
|
||||
|
||||
|
||||
def set_hypertile_seed(seed: int) -> None:
|
||||
RNG_INSTANCE.seed(seed)
|
||||
|
||||
|
||||
@functools.cache
|
||||
def largest_tile_size_available(width: int, height: int) -> int:
|
||||
"""
|
||||
Calculates the largest tile size available for a given width and height
|
||||
Tile size is always a power of 2
|
||||
"""
|
||||
gcd = math.gcd(width, height)
|
||||
largest_tile_size_available = 1
|
||||
while gcd % (largest_tile_size_available * 2) == 0:
|
||||
largest_tile_size_available *= 2
|
||||
return largest_tile_size_available
|
||||
|
||||
|
||||
def iterative_closest_divisors(hw:int, aspect_ratio:float) -> tuple[int, int]:
|
||||
"""
|
||||
Finds h and w such that h*w = hw and h/w = aspect_ratio
|
||||
We check all possible divisors of hw and return the closest to the aspect ratio
|
||||
"""
|
||||
divisors = [i for i in range(2, hw + 1) if hw % i == 0] # all divisors of hw
|
||||
pairs = [(i, hw // i) for i in divisors] # all pairs of divisors of hw
|
||||
ratios = [w/h for h, w in pairs] # all ratios of pairs of divisors of hw
|
||||
closest_ratio = min(ratios, key=lambda x: abs(x - aspect_ratio)) # closest ratio to aspect_ratio
|
||||
closest_pair = pairs[ratios.index(closest_ratio)] # closest pair of divisors to aspect_ratio
|
||||
return closest_pair
|
||||
|
||||
|
||||
@cache
|
||||
def find_hw_candidates(hw:int, aspect_ratio:float) -> tuple[int, int]:
|
||||
"""
|
||||
Finds h and w such that h*w = hw and h/w = aspect_ratio
|
||||
"""
|
||||
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
|
||||
# find h and w such that h*w = hw and h/w = aspect_ratio
|
||||
if h * w != hw:
|
||||
w_candidate = hw / h
|
||||
# check if w is an integer
|
||||
if not w_candidate.is_integer():
|
||||
h_candidate = hw / w
|
||||
# check if h is an integer
|
||||
if not h_candidate.is_integer():
|
||||
return iterative_closest_divisors(hw, aspect_ratio)
|
||||
else:
|
||||
h = int(h_candidate)
|
||||
else:
|
||||
w = int(w_candidate)
|
||||
return h, w
|
||||
|
||||
|
||||
def self_attn_forward(params: HypertileParams, scale_depth=True) -> Callable:
|
||||
|
||||
@wraps(params.forward)
|
||||
def wrapper(*args, **kwargs):
|
||||
if not params.enabled:
|
||||
return params.forward(*args, **kwargs)
|
||||
|
||||
latent_tile_size = max(128, params.tile_size) // 8
|
||||
x = args[0]
|
||||
|
||||
# VAE
|
||||
if x.ndim == 4:
|
||||
b, c, h, w = x.shape
|
||||
|
||||
nh = random_divisor(h, latent_tile_size, params.swap_size)
|
||||
nw = random_divisor(w, latent_tile_size, params.swap_size)
|
||||
|
||||
if nh * nw > 1:
|
||||
x = rearrange(x, "b c (nh h) (nw w) -> (b nh nw) c h w", nh=nh, nw=nw) # split into nh * nw tiles
|
||||
|
||||
out = params.forward(x, *args[1:], **kwargs)
|
||||
|
||||
if nh * nw > 1:
|
||||
out = rearrange(out, "(b nh nw) c h w -> b c (nh h) (nw w)", nh=nh, nw=nw)
|
||||
|
||||
# U-Net
|
||||
else:
|
||||
hw: int = x.size(1)
|
||||
h, w = find_hw_candidates(hw, params.aspect_ratio)
|
||||
assert h * w == hw, f"Invalid aspect ratio {params.aspect_ratio} for input of shape {x.shape}, hw={hw}, h={h}, w={w}"
|
||||
|
||||
factor = 2 ** params.depth if scale_depth else 1
|
||||
nh = random_divisor(h, latent_tile_size * factor, params.swap_size)
|
||||
nw = random_divisor(w, latent_tile_size * factor, params.swap_size)
|
||||
|
||||
if nh * nw > 1:
|
||||
x = rearrange(x, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
|
||||
|
||||
out = params.forward(x, *args[1:], **kwargs)
|
||||
|
||||
if nh * nw > 1:
|
||||
out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
|
||||
out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
|
||||
|
||||
return out
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def hypertile_hook_model(model: nn.Module, width, height, *, enable=False, tile_size_max=128, swap_size=1, max_depth=3, is_sdxl=False):
|
||||
hypertile_layers = getattr(model, "__webui_hypertile_layers", None)
|
||||
if hypertile_layers is None:
|
||||
if not enable:
|
||||
return
|
||||
|
||||
hypertile_layers = {}
|
||||
layers = DEPTH_LAYERS_XL if is_sdxl else DEPTH_LAYERS
|
||||
|
||||
for depth in range(4):
|
||||
for layer_name, module in model.named_modules():
|
||||
if any(layer_name.endswith(try_name) for try_name in layers[depth]):
|
||||
params = HypertileParams()
|
||||
module.__webui_hypertile_params = params
|
||||
params.forward = module.forward
|
||||
params.depth = depth
|
||||
params.layer_name = layer_name
|
||||
module.forward = self_attn_forward(params)
|
||||
|
||||
hypertile_layers[layer_name] = 1
|
||||
|
||||
model.__webui_hypertile_layers = hypertile_layers
|
||||
|
||||
aspect_ratio = width / height
|
||||
tile_size = min(largest_tile_size_available(width, height), tile_size_max)
|
||||
|
||||
for layer_name, module in model.named_modules():
|
||||
if layer_name in hypertile_layers:
|
||||
params = module.__webui_hypertile_params
|
||||
|
||||
params.tile_size = tile_size
|
||||
params.swap_size = swap_size
|
||||
params.aspect_ratio = aspect_ratio
|
||||
params.enabled = enable and params.depth <= max_depth
|
|
@ -0,0 +1,73 @@
|
|||
import hypertile
|
||||
from modules import scripts, script_callbacks, shared
|
||||
|
||||
|
||||
class ScriptHypertile(scripts.Script):
|
||||
name = "Hypertile"
|
||||
|
||||
def title(self):
|
||||
return self.name
|
||||
|
||||
def show(self, is_img2img):
|
||||
return scripts.AlwaysVisible
|
||||
|
||||
def process(self, p, *args):
|
||||
hypertile.set_hypertile_seed(p.all_seeds[0])
|
||||
|
||||
configure_hypertile(p.width, p.height, enable_unet=shared.opts.hypertile_enable_unet)
|
||||
|
||||
def before_hr(self, p, *args):
|
||||
configure_hypertile(p.hr_upscale_to_x, p.hr_upscale_to_y, enable_unet=shared.opts.hypertile_enable_unet_secondpass or shared.opts.hypertile_enable_unet)
|
||||
|
||||
|
||||
def configure_hypertile(width, height, enable_unet=True):
|
||||
hypertile.hypertile_hook_model(
|
||||
shared.sd_model.first_stage_model,
|
||||
width,
|
||||
height,
|
||||
swap_size=shared.opts.hypertile_swap_size_vae,
|
||||
max_depth=shared.opts.hypertile_max_depth_vae,
|
||||
tile_size_max=shared.opts.hypertile_max_tile_vae,
|
||||
enable=shared.opts.hypertile_enable_vae,
|
||||
)
|
||||
|
||||
hypertile.hypertile_hook_model(
|
||||
shared.sd_model.model,
|
||||
width,
|
||||
height,
|
||||
swap_size=shared.opts.hypertile_swap_size_unet,
|
||||
max_depth=shared.opts.hypertile_max_depth_unet,
|
||||
tile_size_max=shared.opts.hypertile_max_tile_unet,
|
||||
enable=enable_unet,
|
||||
is_sdxl=shared.sd_model.is_sdxl
|
||||
)
|
||||
|
||||
|
||||
def on_ui_settings():
|
||||
import gradio as gr
|
||||
|
||||
options = {
|
||||
"hypertile_explanation": shared.OptionHTML("""
|
||||
<a href='https://github.com/tfernd/HyperTile'>Hypertile</a> optimizes the self-attention layer within U-Net and VAE models,
|
||||
resulting in a reduction in computation time ranging from 1 to 4 times. The larger the generated image is, the greater the
|
||||
benefit.
|
||||
"""),
|
||||
|
||||
"hypertile_enable_unet": shared.OptionInfo(False, "Enable Hypertile U-Net").info("noticeable change in details of the generated picture; if enabled, overrides the setting below"),
|
||||
"hypertile_enable_unet_secondpass": shared.OptionInfo(False, "Enable Hypertile U-Net for hires fix second pass"),
|
||||
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}),
|
||||
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
|
||||
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-net swap size", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}),
|
||||
|
||||
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE").info("minimal change in the generated picture"),
|
||||
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}),
|
||||
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
|
||||
"hypertile_swap_size_vae": shared.OptionInfo(3, "Hypertile VAE swap size ", gr.Slider, {"minimum": 0, "maximum": 6, "step": 1}),
|
||||
}
|
||||
|
||||
for name, opt in options.items():
|
||||
opt.section = ('hypertile', "Hypertile")
|
||||
shared.opts.add_option(name, opt)
|
||||
|
||||
|
||||
script_callbacks.on_ui_settings(on_ui_settings)
|
|
@ -6,6 +6,21 @@ import traceback
|
|||
exception_records = []
|
||||
|
||||
|
||||
def format_traceback(tb):
|
||||
return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
|
||||
|
||||
|
||||
def format_exception(e, tb):
|
||||
return {"exception": str(e), "traceback": format_traceback(tb)}
|
||||
|
||||
|
||||
def get_exceptions():
|
||||
try:
|
||||
return list(reversed(exception_records))
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
def record_exception():
|
||||
_, e, tb = sys.exc_info()
|
||||
if e is None:
|
||||
|
@ -14,8 +29,7 @@ def record_exception():
|
|||
if exception_records and exception_records[-1] == e:
|
||||
return
|
||||
|
||||
from modules import sysinfo
|
||||
exception_records.append(sysinfo.format_exception(e, tb))
|
||||
exception_records.append(format_exception(e, tb))
|
||||
|
||||
if len(exception_records) > 5:
|
||||
exception_records.pop(0)
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import configparser
|
||||
import os
|
||||
import threading
|
||||
import re
|
||||
|
||||
from modules import shared, errors, cache, scripts
|
||||
from modules.gitpython_hack import Repo
|
||||
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
|
||||
|
||||
extensions = []
|
||||
|
||||
os.makedirs(extensions_dir, exist_ok=True)
|
||||
|
||||
|
@ -19,11 +22,55 @@ def active():
|
|||
return [x for x in extensions if x.enabled]
|
||||
|
||||
|
||||
class ExtensionMetadata:
|
||||
filename = "metadata.ini"
|
||||
config: configparser.ConfigParser
|
||||
canonical_name: str
|
||||
requires: list
|
||||
|
||||
def __init__(self, path, canonical_name):
|
||||
self.config = configparser.ConfigParser()
|
||||
|
||||
filepath = os.path.join(path, self.filename)
|
||||
if os.path.isfile(filepath):
|
||||
try:
|
||||
self.config.read(filepath)
|
||||
except Exception:
|
||||
errors.report(f"Error reading {self.filename} for extension {canonical_name}.", exc_info=True)
|
||||
|
||||
self.canonical_name = self.config.get("Extension", "Name", fallback=canonical_name)
|
||||
self.canonical_name = canonical_name.lower().strip()
|
||||
|
||||
self.requires = self.get_script_requirements("Requires", "Extension")
|
||||
|
||||
def get_script_requirements(self, field, section, extra_section=None):
|
||||
"""reads a list of requirements from the config; field is the name of the field in the ini file,
|
||||
like Requires or Before, and section is the name of the [section] in the ini file; additionally,
|
||||
reads more requirements from [extra_section] if specified."""
|
||||
|
||||
x = self.config.get(section, field, fallback='')
|
||||
|
||||
if extra_section:
|
||||
x = x + ', ' + self.config.get(extra_section, field, fallback='')
|
||||
|
||||
return self.parse_list(x.lower())
|
||||
|
||||
def parse_list(self, text):
|
||||
"""converts a line from config ("ext1 ext2, ext3 ") into a python list (["ext1", "ext2", "ext3"])"""
|
||||
|
||||
if not text:
|
||||
return []
|
||||
|
||||
# both "," and " " are accepted as separator
|
||||
return [x for x in re.split(r"[,\s]+", text.strip()) if x]
|
||||
|
||||
|
||||
class Extension:
|
||||
lock = threading.Lock()
|
||||
cached_fields = ['remote', 'commit_date', 'branch', 'commit_hash', 'version']
|
||||
metadata: ExtensionMetadata
|
||||
|
||||
def __init__(self, name, path, enabled=True, is_builtin=False):
|
||||
def __init__(self, name, path, enabled=True, is_builtin=False, metadata=None):
|
||||
self.name = name
|
||||
self.path = path
|
||||
self.enabled = enabled
|
||||
|
@ -36,6 +83,8 @@ class Extension:
|
|||
self.branch = None
|
||||
self.remote = None
|
||||
self.have_info_from_repo = False
|
||||
self.metadata = metadata if metadata else ExtensionMetadata(self.path, name.lower())
|
||||
self.canonical_name = metadata.canonical_name
|
||||
|
||||
def to_dict(self):
|
||||
return {x: getattr(self, x) for x in self.cached_fields}
|
||||
|
@ -56,6 +105,7 @@ class Extension:
|
|||
self.do_read_info_from_repo()
|
||||
|
||||
return self.to_dict()
|
||||
|
||||
try:
|
||||
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
|
||||
self.from_dict(d)
|
||||
|
@ -136,9 +186,6 @@ class Extension:
|
|||
def list_extensions():
|
||||
extensions.clear()
|
||||
|
||||
if not os.path.isdir(extensions_dir):
|
||||
return
|
||||
|
||||
if shared.cmd_opts.disable_all_extensions:
|
||||
print("*** \"--disable-all-extensions\" arg was used, will not load any extensions ***")
|
||||
elif shared.opts.disable_all_extensions == "all":
|
||||
|
@ -148,18 +195,43 @@ def list_extensions():
|
|||
elif shared.opts.disable_all_extensions == "extra":
|
||||
print("*** \"Disable all extensions\" option was set, will only load built-in extensions ***")
|
||||
|
||||
extension_paths = []
|
||||
for dirname in [extensions_dir, extensions_builtin_dir]:
|
||||
loaded_extensions = {}
|
||||
|
||||
# scan through extensions directory and load metadata
|
||||
for dirname in [extensions_builtin_dir, extensions_dir]:
|
||||
if not os.path.isdir(dirname):
|
||||
return
|
||||
continue
|
||||
|
||||
for extension_dirname in sorted(os.listdir(dirname)):
|
||||
path = os.path.join(dirname, extension_dirname)
|
||||
if not os.path.isdir(path):
|
||||
continue
|
||||
|
||||
extension_paths.append((extension_dirname, path, dirname == extensions_builtin_dir))
|
||||
canonical_name = extension_dirname
|
||||
metadata = ExtensionMetadata(path, canonical_name)
|
||||
|
||||
for dirname, path, is_builtin in extension_paths:
|
||||
extension = Extension(name=dirname, path=path, enabled=dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin)
|
||||
extensions.append(extension)
|
||||
# check for duplicated canonical names
|
||||
already_loaded_extension = loaded_extensions.get(metadata.canonical_name)
|
||||
if already_loaded_extension is not None:
|
||||
errors.report(f'Duplicate canonical name "{canonical_name}" found in extensions "{extension_dirname}" and "{already_loaded_extension.name}". Former will be discarded.', exc_info=False)
|
||||
continue
|
||||
|
||||
is_builtin = dirname == extensions_builtin_dir
|
||||
extension = Extension(name=extension_dirname, path=path, enabled=extension_dirname not in shared.opts.disabled_extensions, is_builtin=is_builtin, metadata=metadata)
|
||||
extensions.append(extension)
|
||||
loaded_extensions[canonical_name] = extension
|
||||
|
||||
# check for requirements
|
||||
for extension in extensions:
|
||||
for req in extension.metadata.requires:
|
||||
required_extension = loaded_extensions.get(req)
|
||||
if required_extension is None:
|
||||
errors.report(f'Extension "{extension.name}" requires "{req}" which is not installed.', exc_info=False)
|
||||
continue
|
||||
|
||||
if not extension.enabled:
|
||||
errors.report(f'Extension "{extension.name}" requires "{required_extension.name}" which is disabled.', exc_info=False)
|
||||
continue
|
||||
|
||||
|
||||
extensions: list[Extension] = []
|
||||
|
|
|
@ -44,6 +44,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
|
|||
steps = p.steps
|
||||
override_settings = p.override_settings
|
||||
sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None))
|
||||
batch_results = None
|
||||
discard_further_results = False
|
||||
for i, image in enumerate(images):
|
||||
state.job = f"{i+1} out of {len(images)}"
|
||||
if state.skipped:
|
||||
|
@ -127,7 +129,21 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
|
|||
|
||||
if proc is None:
|
||||
p.override_settings.pop('save_images_replace_action', None)
|
||||
process_images(p)
|
||||
proc = process_images(p)
|
||||
|
||||
if not discard_further_results and proc:
|
||||
if batch_results:
|
||||
batch_results.images.extend(proc.images)
|
||||
batch_results.infotexts.extend(proc.infotexts)
|
||||
else:
|
||||
batch_results = proc
|
||||
|
||||
if 0 <= shared.opts.img2img_batch_show_results_limit < len(batch_results.images):
|
||||
discard_further_results = True
|
||||
batch_results.images = batch_results.images[:int(shared.opts.img2img_batch_show_results_limit)]
|
||||
batch_results.infotexts = batch_results.infotexts[:int(shared.opts.img2img_batch_show_results_limit)]
|
||||
|
||||
return batch_results
|
||||
|
||||
|
||||
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
|
||||
|
@ -212,10 +228,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
|
|||
with closing(p):
|
||||
if is_batch:
|
||||
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
|
||||
processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
|
||||
|
||||
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
|
||||
|
||||
processed = Processed(p, [], p.seed, "")
|
||||
if processed is None:
|
||||
processed = Processed(p, [], p.seed, "")
|
||||
else:
|
||||
processed = modules.scripts.scripts_img2img.run(p, *args)
|
||||
if processed is None:
|
||||
|
|
|
@ -441,7 +441,7 @@ def dump_sysinfo():
|
|||
import datetime
|
||||
|
||||
text = sysinfo.get()
|
||||
filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
|
||||
filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
|
||||
|
||||
with open(filename, "w", encoding="utf8") as file:
|
||||
file.write(text)
|
||||
|
|
|
@ -76,7 +76,7 @@ class Options:
|
|||
|
||||
def __init__(self, data_labels: dict[str, OptionInfo], restricted_opts):
|
||||
self.data_labels = data_labels
|
||||
self.data = {k: v.default for k, v in self.data_labels.items()}
|
||||
self.data = {k: v.default for k, v in self.data_labels.items() if not v.do_not_save}
|
||||
self.restricted_opts = restricted_opts
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
|
@ -210,7 +210,7 @@ class Options:
|
|||
|
||||
def add_option(self, key, info):
|
||||
self.data_labels[key] = info
|
||||
if key not in self.data:
|
||||
if key not in self.data and not info.do_not_save:
|
||||
self.data[key] = info.default
|
||||
|
||||
def reorder(self):
|
||||
|
|
|
@ -799,7 +799,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
|
||||
infotexts = []
|
||||
output_images = []
|
||||
|
||||
with torch.no_grad(), p.sd_model.ema_scope():
|
||||
with devices.autocast():
|
||||
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
|
||||
|
@ -873,7 +872,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
else:
|
||||
if opts.sd_vae_decode_method != 'Full':
|
||||
p.extra_generation_params['VAE Decoder'] = opts.sd_vae_decode_method
|
||||
|
||||
x_samples_ddim = decode_latent_batch(p.sd_model, samples_ddim, target_device=devices.cpu, check_for_nans=True)
|
||||
|
||||
x_samples_ddim = torch.stack(x_samples_ddim).float()
|
||||
|
@ -1147,6 +1145,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
|
||||
if not self.enable_hr:
|
||||
return samples
|
||||
devices.torch_gc()
|
||||
|
||||
if self.latent_scale_mode is None:
|
||||
decoded_samples = torch.stack(decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)).to(dtype=torch.float32)
|
||||
|
@ -1156,8 +1155,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
with sd_models.SkipWritingToConfig():
|
||||
sd_models.reload_model_weights(info=self.hr_checkpoint_info)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
return self.sample_hr_pass(samples, decoded_samples, seeds, subseeds, subseed_strength, prompts)
|
||||
|
||||
def sample_hr_pass(self, samples, decoded_samples, seeds, subseeds, subseed_strength, prompts):
|
||||
|
@ -1165,7 +1162,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
return samples
|
||||
|
||||
self.is_hr_pass = True
|
||||
|
||||
target_width = self.hr_upscale_to_x
|
||||
target_height = self.hr_upscale_to_y
|
||||
|
||||
|
@ -1254,7 +1250,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
decoded_samples = decode_latent_batch(self.sd_model, samples, target_device=devices.cpu, check_for_nans=True)
|
||||
|
||||
self.is_hr_pass = False
|
||||
|
||||
return decoded_samples
|
||||
|
||||
def close(self):
|
||||
|
|
|
@ -311,20 +311,113 @@ scripts_data = []
|
|||
postprocessing_scripts_data = []
|
||||
ScriptClassData = namedtuple("ScriptClassData", ["script_class", "path", "basedir", "module"])
|
||||
|
||||
def topological_sort(dependencies):
|
||||
"""Accepts a dictionary mapping name to its dependencies, returns a list of names ordered according to dependencies.
|
||||
Ignores errors relating to missing dependeencies or circular dependencies
|
||||
"""
|
||||
|
||||
visited = {}
|
||||
result = []
|
||||
|
||||
def inner(name):
|
||||
visited[name] = True
|
||||
|
||||
for dep in dependencies.get(name, []):
|
||||
if dep in dependencies and dep not in visited:
|
||||
inner(dep)
|
||||
|
||||
result.append(name)
|
||||
|
||||
for depname in dependencies:
|
||||
if depname not in visited:
|
||||
inner(depname)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptWithDependencies:
|
||||
script_canonical_name: str
|
||||
file: ScriptFile
|
||||
requires: list
|
||||
load_before: list
|
||||
load_after: list
|
||||
|
||||
|
||||
def list_scripts(scriptdirname, extension, *, include_extensions=True):
|
||||
scripts_list = []
|
||||
scripts = {}
|
||||
|
||||
basedir = os.path.join(paths.script_path, scriptdirname)
|
||||
if os.path.exists(basedir):
|
||||
for filename in sorted(os.listdir(basedir)):
|
||||
scripts_list.append(ScriptFile(paths.script_path, filename, os.path.join(basedir, filename)))
|
||||
loaded_extensions = {ext.canonical_name: ext for ext in extensions.active()}
|
||||
loaded_extensions_scripts = {ext.canonical_name: [] for ext in extensions.active()}
|
||||
|
||||
# build script dependency map
|
||||
root_script_basedir = os.path.join(paths.script_path, scriptdirname)
|
||||
if os.path.exists(root_script_basedir):
|
||||
for filename in sorted(os.listdir(root_script_basedir)):
|
||||
if not os.path.isfile(os.path.join(root_script_basedir, filename)):
|
||||
continue
|
||||
|
||||
if os.path.splitext(filename)[1].lower() != extension:
|
||||
continue
|
||||
|
||||
script_file = ScriptFile(paths.script_path, filename, os.path.join(root_script_basedir, filename))
|
||||
scripts[filename] = ScriptWithDependencies(filename, script_file, [], [], [])
|
||||
|
||||
if include_extensions:
|
||||
for ext in extensions.active():
|
||||
scripts_list += ext.list_files(scriptdirname, extension)
|
||||
extension_scripts_list = ext.list_files(scriptdirname, extension)
|
||||
for extension_script in extension_scripts_list:
|
||||
if not os.path.isfile(extension_script.path):
|
||||
continue
|
||||
|
||||
scripts_list = [x for x in scripts_list if os.path.splitext(x.path)[1].lower() == extension and os.path.isfile(x.path)]
|
||||
script_canonical_name = ("builtin/" if ext.is_builtin else "") + ext.canonical_name + "/" + extension_script.filename
|
||||
relative_path = scriptdirname + "/" + extension_script.filename
|
||||
|
||||
script = ScriptWithDependencies(
|
||||
script_canonical_name=script_canonical_name,
|
||||
file=extension_script,
|
||||
requires=ext.metadata.get_script_requirements("Requires", relative_path, scriptdirname),
|
||||
load_before=ext.metadata.get_script_requirements("Before", relative_path, scriptdirname),
|
||||
load_after=ext.metadata.get_script_requirements("After", relative_path, scriptdirname),
|
||||
)
|
||||
|
||||
scripts[script_canonical_name] = script
|
||||
loaded_extensions_scripts[ext.canonical_name].append(script)
|
||||
|
||||
for script_canonical_name, script in scripts.items():
|
||||
# load before requires inverse dependency
|
||||
# in this case, append the script name into the load_after list of the specified script
|
||||
for load_before in script.load_before:
|
||||
# if this requires an individual script to be loaded before
|
||||
other_script = scripts.get(load_before)
|
||||
if other_script:
|
||||
other_script.load_after.append(script_canonical_name)
|
||||
|
||||
# if this requires an extension
|
||||
other_extension_scripts = loaded_extensions_scripts.get(load_before)
|
||||
if other_extension_scripts:
|
||||
for other_script in other_extension_scripts:
|
||||
other_script.load_after.append(script_canonical_name)
|
||||
|
||||
# if After mentions an extension, remove it and instead add all of its scripts
|
||||
for load_after in list(script.load_after):
|
||||
if load_after not in scripts and load_after in loaded_extensions_scripts:
|
||||
script.load_after.remove(load_after)
|
||||
|
||||
for other_script in loaded_extensions_scripts.get(load_after, []):
|
||||
script.load_after.append(other_script.script_canonical_name)
|
||||
|
||||
dependencies = {}
|
||||
|
||||
for script_canonical_name, script in scripts.items():
|
||||
for required_script in script.requires:
|
||||
if required_script not in scripts and required_script not in loaded_extensions:
|
||||
errors.report(f'Script "{script_canonical_name}" requires "{required_script}" to be loaded, but it is not.', exc_info=False)
|
||||
|
||||
dependencies[script_canonical_name] = script.load_after
|
||||
|
||||
ordered_scripts = topological_sort(dependencies)
|
||||
scripts_list = [scripts[script_canonical_name].file for script_canonical_name in ordered_scripts]
|
||||
|
||||
return scripts_list
|
||||
|
||||
|
@ -365,15 +458,9 @@ def load_scripts():
|
|||
elif issubclass(script_class, scripts_postprocessing.ScriptPostprocessing):
|
||||
postprocessing_scripts_data.append(ScriptClassData(script_class, scriptfile.path, scriptfile.basedir, module))
|
||||
|
||||
def orderby(basedir):
|
||||
# 1st webui, 2nd extensions-builtin, 3rd extensions
|
||||
priority = {os.path.join(paths.script_path, "extensions-builtin"):1, paths.script_path:0}
|
||||
for key in priority:
|
||||
if basedir.startswith(key):
|
||||
return priority[key]
|
||||
return 9999
|
||||
|
||||
for scriptfile in sorted(scripts_list, key=lambda x: [orderby(x.basedir), x]):
|
||||
# here the scripts_list is already ordered
|
||||
# processing_script is not considered though
|
||||
for scriptfile in scripts_list:
|
||||
try:
|
||||
if scriptfile.basedir != paths.script_path:
|
||||
sys.path = [scriptfile.basedir] + sys.path
|
||||
|
|
|
@ -60,7 +60,7 @@ def restart_sampler(model, x, sigmas, extra_args=None, callback=None, disable=No
|
|||
sigma_restart = get_sigmas_karras(restart_steps, sigmas[min_idx].item(), sigmas[max_idx].item(), device=sigmas.device)[:-1]
|
||||
while restart_times > 0:
|
||||
restart_times -= 1
|
||||
step_list.extend([(old_sigma, new_sigma) for (old_sigma, new_sigma) in zip(sigma_restart[:-1], sigma_restart[1:])])
|
||||
step_list.extend(zip(sigma_restart[:-1], sigma_restart[1:]))
|
||||
|
||||
last_sigma = None
|
||||
for old_sigma, new_sigma in tqdm.tqdm(step_list, disable=disable):
|
||||
|
|
|
@ -189,6 +189,7 @@ options_templates.update(options_section(('img2img', "img2img"), {
|
|||
"img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
|
||||
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
|
||||
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
|
||||
"img2img_batch_show_results_limit": OptionInfo(32, "Show the first N batch img2img results in UI", gr.Slider, {"minimum": -1, "maximum": 1000, "step": 1}).info('0: disable, -1: show all images. Too many images can cause lag'),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('optimizations', "Optimizations"), {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import platform
|
||||
import hashlib
|
||||
|
@ -84,7 +83,7 @@ def get_dict():
|
|||
"Checksum": checksum_token,
|
||||
"Commandline": get_argv(),
|
||||
"Torch env info": get_torch_sysinfo(),
|
||||
"Exceptions": get_exceptions(),
|
||||
"Exceptions": errors.get_exceptions(),
|
||||
"CPU": {
|
||||
"model": platform.processor(),
|
||||
"count logical": psutil.cpu_count(logical=True),
|
||||
|
@ -104,21 +103,6 @@ def get_dict():
|
|||
return res
|
||||
|
||||
|
||||
def format_traceback(tb):
|
||||
return [[f"{x.filename}, line {x.lineno}, {x.name}", x.line] for x in traceback.extract_tb(tb)]
|
||||
|
||||
|
||||
def format_exception(e, tb):
|
||||
return {"exception": str(e), "traceback": format_traceback(tb)}
|
||||
|
||||
|
||||
def get_exceptions():
|
||||
try:
|
||||
return list(reversed(errors.exception_records))
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
|
||||
def get_environment():
|
||||
return {k: os.environ[k] for k in sorted(os.environ) if k in environment_whitelist}
|
||||
|
||||
|
|
|
@ -635,12 +635,6 @@ def create_ui():
|
|||
scale_by.release(**on_change_args)
|
||||
button_update_resize_to.click(**on_change_args)
|
||||
|
||||
# the code below is meant to update the resolution label after the image in the image selection UI has changed.
|
||||
# as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
|
||||
# I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
|
||||
for component in [init_img, sketch]:
|
||||
component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
|
||||
|
||||
tab_scale_to.select(fn=lambda: 0, inputs=[], outputs=[selected_scale_tab])
|
||||
tab_scale_by.select(fn=lambda: 1, inputs=[], outputs=[selected_scale_tab])
|
||||
|
||||
|
@ -701,6 +695,12 @@ def create_ui():
|
|||
if category not in {"accordions"}:
|
||||
scripts.scripts_img2img.setup_ui_for_section(category)
|
||||
|
||||
# the code below is meant to update the resolution label after the image in the image selection UI has changed.
|
||||
# as it is now the event keeps firing continuously for inpaint edits, which ruins the page with constant requests.
|
||||
# I assume this must be a gradio bug and for now we'll just do it for non-inpaint inputs.
|
||||
for component in [init_img, sketch]:
|
||||
component.change(fn=lambda: None, _js="updateImg2imgResizeToTextAfterChangingImage", inputs=[], outputs=[], show_progress=False)
|
||||
|
||||
def select_img2img_tab(tab):
|
||||
return gr.update(visible=tab in [2, 3, 4]), gr.update(visible=tab == 3),
|
||||
|
||||
|
@ -1308,7 +1308,7 @@ def setup_ui_api(app):
|
|||
from fastapi.responses import PlainTextResponse
|
||||
|
||||
text = sysinfo.get()
|
||||
filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.txt"
|
||||
filename = f"sysinfo-{datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M')}.json"
|
||||
|
||||
return PlainTextResponse(text, headers={'Content-Disposition': f'{"attachment" if attachment else "inline"}; filename="{filename}"'})
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ exclude = [
|
|||
|
||||
ignore = [
|
||||
"E501", # Line too long
|
||||
"E721", # Do not compare types, use `isinstance`
|
||||
"E731", # Do not assign a `lambda` expression, use a `def`
|
||||
|
||||
"I001", # Import block is un-sorted or un-formatted
|
||||
|
|
4
webui.sh
4
webui.sh
|
@ -89,7 +89,7 @@ delimiter="################################################################"
|
|||
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
printf "\e[1m\e[32mInstall script for stable-diffusion + Web UI\n"
|
||||
printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m"
|
||||
printf "\e[1m\e[34mTested on Debian 11 (Bullseye), Fedora 34+ and openSUSE Leap 15.4 or newer.\e[0m"
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
|
||||
# Do not run as root
|
||||
|
@ -223,7 +223,7 @@ fi
|
|||
# Try using TCMalloc on Linux
|
||||
prepare_tcmalloc() {
|
||||
if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
|
||||
TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)"
|
||||
TCMALLOC="$(PATH=/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)"
|
||||
if [[ ! -z "${TCMALLOC}" ]]; then
|
||||
echo "Using TCMalloc: ${TCMALLOC}"
|
||||
export LD_PRELOAD="${TCMALLOC}"
|
||||
|
|
Loading…
Reference in New Issue