commit
5abecea34c
|
@ -18,22 +18,29 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.10.6
|
||||
cache: pip
|
||||
cache-dependency-path: |
|
||||
**/requirements*txt
|
||||
- name: Install PyLint
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
# This lets PyLint check to see if it can resolve imports
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
|
||||
python launch.py
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py')
|
||||
python-version: 3.11
|
||||
# NB: there's no cache: pip here since we're not installing anything
|
||||
# from the requirements.txt file(s) in the repository; it's faster
|
||||
# not to have GHA download an (at the time of writing) 4 GB cache
|
||||
# of PyTorch and other dependencies.
|
||||
- name: Install Ruff
|
||||
run: pip install ruff==0.0.265
|
||||
- name: Run Ruff
|
||||
run: ruff .
|
||||
|
||||
# The rest are currently disabled pending fixing of e.g. installing the torch dependency.
|
||||
|
||||
# - name: Install PyLint
|
||||
# run: |
|
||||
# python -m pip install --upgrade pip
|
||||
# pip install pylint
|
||||
# # This lets PyLint check to see if it can resolve imports
|
||||
# - name: Install dependencies
|
||||
# run: |
|
||||
# export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit"
|
||||
# python launch.py
|
||||
# - name: Analysing the code with pylint
|
||||
# run: |
|
||||
# pylint $(git ls-files '*.py')
|
||||
|
|
|
@ -88,7 +88,7 @@ class LDSR:
|
|||
|
||||
x_t = None
|
||||
logs = None
|
||||
for n in range(n_runs):
|
||||
for _ in range(n_runs):
|
||||
if custom_shape is not None:
|
||||
x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device)
|
||||
x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0])
|
||||
|
@ -110,7 +110,6 @@ class LDSR:
|
|||
diffusion_steps = int(steps)
|
||||
eta = 1.0
|
||||
|
||||
down_sample_method = 'Lanczos'
|
||||
|
||||
gc.collect()
|
||||
if torch.cuda.is_available:
|
||||
|
@ -158,7 +157,7 @@ class LDSR:
|
|||
|
||||
|
||||
def get_cond(selected_path):
|
||||
example = dict()
|
||||
example = {}
|
||||
up_f = 4
|
||||
c = selected_path.convert('RGB')
|
||||
c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0)
|
||||
|
@ -196,7 +195,7 @@ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_s
|
|||
@torch.no_grad()
|
||||
def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None,
|
||||
corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False):
|
||||
log = dict()
|
||||
log = {}
|
||||
|
||||
z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key,
|
||||
return_first_stage_outputs=True,
|
||||
|
@ -244,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
|
|||
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
|
||||
log["sample_noquant"] = x_sample_noquant
|
||||
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
log["sample"] = x_sample
|
||||
|
|
|
@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url
|
|||
from modules.upscaler import Upscaler, UpscalerData
|
||||
from ldsr_model_arch import LDSR
|
||||
from modules import shared, script_callbacks
|
||||
import sd_hijack_autoencoder, sd_hijack_ddpm_v1
|
||||
import sd_hijack_autoencoder # noqa: F401
|
||||
import sd_hijack_ddpm_v1 # noqa: F401
|
||||
|
||||
|
||||
class UpscalerLDSR(Upscaler):
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
|
||||
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
|
||||
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import pytorch_lightning as pl
|
||||
import torch.nn.functional as F
|
||||
from contextlib import contextmanager
|
||||
|
||||
from torch.optim.lr_scheduler import LambdaLR
|
||||
|
||||
from ldm.modules.ema import LitEma
|
||||
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
|
||||
from ldm.modules.diffusionmodules.model import Encoder, Decoder
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
import ldm.models.autoencoder
|
||||
from packaging import version
|
||||
|
||||
class VQModel(pl.LightningModule):
|
||||
def __init__(self,
|
||||
|
@ -19,7 +24,7 @@ class VQModel(pl.LightningModule):
|
|||
n_embed,
|
||||
embed_dim,
|
||||
ckpt_path=None,
|
||||
ignore_keys=[],
|
||||
ignore_keys=None,
|
||||
image_key="image",
|
||||
colorize_nlabels=None,
|
||||
monitor=None,
|
||||
|
@ -57,7 +62,7 @@ class VQModel(pl.LightningModule):
|
|||
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
||||
|
||||
if ckpt_path is not None:
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [])
|
||||
self.scheduler_config = scheduler_config
|
||||
self.lr_g_factor = lr_g_factor
|
||||
|
||||
|
@ -76,11 +81,11 @@ class VQModel(pl.LightningModule):
|
|||
if context is not None:
|
||||
print(f"{context}: Restored training weights")
|
||||
|
||||
def init_from_ckpt(self, path, ignore_keys=list()):
|
||||
def init_from_ckpt(self, path, ignore_keys=None):
|
||||
sd = torch.load(path, map_location="cpu")["state_dict"]
|
||||
keys = list(sd.keys())
|
||||
for k in keys:
|
||||
for ik in ignore_keys:
|
||||
for ik in ignore_keys or []:
|
||||
if k.startswith(ik):
|
||||
print("Deleting key {} from state_dict.".format(k))
|
||||
del sd[k]
|
||||
|
@ -165,7 +170,7 @@ class VQModel(pl.LightningModule):
|
|||
def validation_step(self, batch, batch_idx):
|
||||
log_dict = self._validation_step(batch, batch_idx)
|
||||
with self.ema_scope():
|
||||
log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
|
||||
self._validation_step(batch, batch_idx, suffix="_ema")
|
||||
return log_dict
|
||||
|
||||
def _validation_step(self, batch, batch_idx, suffix=""):
|
||||
|
@ -232,7 +237,7 @@ class VQModel(pl.LightningModule):
|
|||
return self.decoder.conv_out.weight
|
||||
|
||||
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
|
||||
log = dict()
|
||||
log = {}
|
||||
x = self.get_input(batch, self.image_key)
|
||||
x = x.to(self.device)
|
||||
if only_inputs:
|
||||
|
@ -249,7 +254,8 @@ class VQModel(pl.LightningModule):
|
|||
if plot_ema:
|
||||
with self.ema_scope():
|
||||
xrec_ema, _ = self(x)
|
||||
if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
|
||||
if x.shape[1] > 3:
|
||||
xrec_ema = self.to_rgb(xrec_ema)
|
||||
log["reconstructions_ema"] = xrec_ema
|
||||
return log
|
||||
|
||||
|
@ -264,7 +270,7 @@ class VQModel(pl.LightningModule):
|
|||
|
||||
class VQModelInterface(VQModel):
|
||||
def __init__(self, embed_dim, *args, **kwargs):
|
||||
super().__init__(embed_dim=embed_dim, *args, **kwargs)
|
||||
super().__init__(*args, embed_dim=embed_dim, **kwargs)
|
||||
self.embed_dim = embed_dim
|
||||
|
||||
def encode(self, x):
|
||||
|
@ -282,5 +288,5 @@ class VQModelInterface(VQModel):
|
|||
dec = self.decoder(quant)
|
||||
return dec
|
||||
|
||||
setattr(ldm.models.autoencoder, "VQModel", VQModel)
|
||||
setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface)
|
||||
ldm.models.autoencoder.VQModel = VQModel
|
||||
ldm.models.autoencoder.VQModelInterface = VQModelInterface
|
||||
|
|
|
@ -48,7 +48,7 @@ class DDPMV1(pl.LightningModule):
|
|||
beta_schedule="linear",
|
||||
loss_type="l2",
|
||||
ckpt_path=None,
|
||||
ignore_keys=[],
|
||||
ignore_keys=None,
|
||||
load_only_unet=False,
|
||||
monitor="val/loss",
|
||||
use_ema=True,
|
||||
|
@ -100,7 +100,7 @@ class DDPMV1(pl.LightningModule):
|
|||
if monitor is not None:
|
||||
self.monitor = monitor
|
||||
if ckpt_path is not None:
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
||||
|
||||
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
|
||||
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
|
||||
|
@ -182,13 +182,13 @@ class DDPMV1(pl.LightningModule):
|
|||
if context is not None:
|
||||
print(f"{context}: Restored training weights")
|
||||
|
||||
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
||||
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
||||
sd = torch.load(path, map_location="cpu")
|
||||
if "state_dict" in list(sd.keys()):
|
||||
sd = sd["state_dict"]
|
||||
keys = list(sd.keys())
|
||||
for k in keys:
|
||||
for ik in ignore_keys:
|
||||
for ik in ignore_keys or []:
|
||||
if k.startswith(ik):
|
||||
print("Deleting key {} from state_dict.".format(k))
|
||||
del sd[k]
|
||||
|
@ -375,7 +375,7 @@ class DDPMV1(pl.LightningModule):
|
|||
|
||||
@torch.no_grad()
|
||||
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
||||
log = dict()
|
||||
log = {}
|
||||
x = self.get_input(batch, self.first_stage_key)
|
||||
N = min(x.shape[0], N)
|
||||
n_row = min(x.shape[0], n_row)
|
||||
|
@ -383,7 +383,7 @@ class DDPMV1(pl.LightningModule):
|
|||
log["inputs"] = x
|
||||
|
||||
# get diffusion row
|
||||
diffusion_row = list()
|
||||
diffusion_row = []
|
||||
x_start = x[:n_row]
|
||||
|
||||
for t in range(self.num_timesteps):
|
||||
|
@ -444,13 +444,13 @@ class LatentDiffusionV1(DDPMV1):
|
|||
conditioning_key = None
|
||||
ckpt_path = kwargs.pop("ckpt_path", None)
|
||||
ignore_keys = kwargs.pop("ignore_keys", [])
|
||||
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
|
||||
super().__init__(*args, conditioning_key=conditioning_key, **kwargs)
|
||||
self.concat_mode = concat_mode
|
||||
self.cond_stage_trainable = cond_stage_trainable
|
||||
self.cond_stage_key = cond_stage_key
|
||||
try:
|
||||
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
||||
except:
|
||||
except Exception:
|
||||
self.num_downs = 0
|
||||
if not scale_by_std:
|
||||
self.scale_factor = scale_factor
|
||||
|
@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1):
|
|||
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
||||
return self.p_losses(x, c, t, *args, **kwargs)
|
||||
|
||||
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
|
||||
def rescale_bbox(bbox):
|
||||
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
|
||||
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
|
||||
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
|
||||
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
|
||||
return x0, y0, w, h
|
||||
|
||||
return [rescale_bbox(b) for b in bboxes]
|
||||
|
||||
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
||||
|
||||
if isinstance(cond, dict):
|
||||
|
@ -1126,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1):
|
|||
if cond is not None:
|
||||
if isinstance(cond, dict):
|
||||
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
||||
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
||||
[x[:batch_size] for x in cond[key]] for key in cond}
|
||||
else:
|
||||
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
||||
|
||||
|
@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1):
|
|||
|
||||
if i % log_every_t == 0 or i == timesteps - 1:
|
||||
intermediates.append(x0_partial)
|
||||
if callback: callback(i)
|
||||
if img_callback: img_callback(img, i)
|
||||
if callback:
|
||||
callback(i)
|
||||
if img_callback:
|
||||
img_callback(img, i)
|
||||
return img, intermediates
|
||||
|
||||
@torch.no_grad()
|
||||
|
@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1):
|
|||
|
||||
if i % log_every_t == 0 or i == timesteps - 1:
|
||||
intermediates.append(img)
|
||||
if callback: callback(i)
|
||||
if img_callback: img_callback(img, i)
|
||||
if callback:
|
||||
callback(i)
|
||||
if img_callback:
|
||||
img_callback(img, i)
|
||||
|
||||
if return_intermediates:
|
||||
return img, intermediates
|
||||
|
@ -1221,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1):
|
|||
if cond is not None:
|
||||
if isinstance(cond, dict):
|
||||
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
||||
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
||||
[x[:batch_size] for x in cond[key]] for key in cond}
|
||||
else:
|
||||
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
||||
return self.p_sample_loop(cond,
|
||||
|
@ -1253,7 +1247,7 @@ class LatentDiffusionV1(DDPMV1):
|
|||
|
||||
use_ddim = ddim_steps is not None
|
||||
|
||||
log = dict()
|
||||
log = {}
|
||||
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
||||
return_first_stage_outputs=True,
|
||||
force_c_encode=True,
|
||||
|
@ -1280,7 +1274,7 @@ class LatentDiffusionV1(DDPMV1):
|
|||
|
||||
if plot_diffusion_rows:
|
||||
# get diffusion row
|
||||
diffusion_row = list()
|
||||
diffusion_row = []
|
||||
z_start = z[:n_row]
|
||||
for t in range(self.num_timesteps):
|
||||
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
||||
|
@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1):
|
|||
|
||||
if inpaint:
|
||||
# make a simple center square
|
||||
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
||||
h, w = z.shape[2], z.shape[3]
|
||||
mask = torch.ones(N, h, w).to(self.device)
|
||||
# zeros will be filled in
|
||||
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
||||
|
@ -1424,10 +1418,10 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
|||
# TODO: move all layout-specific hacks to this class
|
||||
def __init__(self, cond_stage_key, *args, **kwargs):
|
||||
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
||||
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
|
||||
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
||||
|
||||
def log_images(self, batch, N=8, *args, **kwargs):
|
||||
logs = super().log_images(batch=batch, N=N, *args, **kwargs)
|
||||
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
||||
|
||||
key = 'train' if self.training else 'validation'
|
||||
dset = self.trainer.datamodule.datasets[key]
|
||||
|
@ -1443,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1):
|
|||
logs['bbox_image'] = cond_img
|
||||
return logs
|
||||
|
||||
setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1)
|
||||
setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1)
|
||||
setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1)
|
||||
setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1)
|
||||
ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1
|
||||
ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1
|
||||
ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1
|
||||
ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import glob
|
||||
import os
|
||||
import re
|
||||
import torch
|
||||
|
@ -173,7 +172,7 @@ def load_lora(name, filename):
|
|||
else:
|
||||
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
|
||||
continue
|
||||
assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}'
|
||||
raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}")
|
||||
|
||||
with torch.no_grad():
|
||||
module.weight.copy_(weight)
|
||||
|
@ -185,7 +184,7 @@ def load_lora(name, filename):
|
|||
elif lora_key == "lora_down.weight":
|
||||
lora_module.down = module
|
||||
else:
|
||||
assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha'
|
||||
raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha")
|
||||
|
||||
if len(keys_failed_to_match) > 0:
|
||||
print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}")
|
||||
|
@ -203,7 +202,7 @@ def load_loras(names, multipliers=None):
|
|||
loaded_loras.clear()
|
||||
|
||||
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
|
||||
if any([x is None for x in loras_on_disk]):
|
||||
if any(x is None for x in loras_on_disk):
|
||||
list_available_loras()
|
||||
|
||||
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
|
||||
|
@ -310,7 +309,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
|
|||
|
||||
print(f'failed to calculate lora weights for layer {lora_layer_name}')
|
||||
|
||||
setattr(self, "lora_current_names", wanted_names)
|
||||
self.lora_current_names = wanted_names
|
||||
|
||||
|
||||
def lora_forward(module, input, original_forward):
|
||||
|
@ -344,8 +343,8 @@ def lora_forward(module, input, original_forward):
|
|||
|
||||
|
||||
def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
|
||||
setattr(self, "lora_current_names", ())
|
||||
setattr(self, "lora_weights_backup", None)
|
||||
self.lora_current_names = ()
|
||||
self.lora_weights_backup = None
|
||||
|
||||
|
||||
def lora_Linear_forward(self, input):
|
||||
|
@ -419,7 +418,7 @@ def infotext_pasted(infotext, params):
|
|||
|
||||
added = []
|
||||
|
||||
for k, v in params.items():
|
||||
for k in params:
|
||||
if not k.startswith("AddNet Model "):
|
||||
continue
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted)
|
|||
|
||||
|
||||
shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
|
||||
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
|
||||
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras),
|
||||
}))
|
||||
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ import modules.upscaler
|
|||
from modules import devices, modelloader
|
||||
from scunet_model_arch import SCUNet as net
|
||||
from modules.shared import opts
|
||||
from modules import images
|
||||
|
||||
|
||||
class UpscalerScuNET(modules.upscaler.Upscaler):
|
||||
|
@ -133,7 +132,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
|
|||
model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
|
||||
model.load_state_dict(torch.load(filename), strict=True)
|
||||
model.eval()
|
||||
for k, v in model.named_parameters():
|
||||
for _, v in model.named_parameters():
|
||||
v.requires_grad = False
|
||||
model = model.to(device)
|
||||
|
||||
|
|
|
@ -61,7 +61,9 @@ class WMSA(nn.Module):
|
|||
Returns:
|
||||
output: tensor shape [b h w c]
|
||||
"""
|
||||
if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
|
||||
if self.type != 'W':
|
||||
x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
|
||||
|
||||
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
|
||||
h_windows = x.size(1)
|
||||
w_windows = x.size(2)
|
||||
|
@ -85,8 +87,9 @@ class WMSA(nn.Module):
|
|||
output = self.linear(output)
|
||||
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
|
||||
|
||||
if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2),
|
||||
dims=(1, 2))
|
||||
if self.type != 'W':
|
||||
output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
|
||||
|
||||
return output
|
||||
|
||||
def relative_embedding(self):
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import contextlib
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url
|
|||
from tqdm import tqdm
|
||||
|
||||
from modules import modelloader, devices, script_callbacks, shared
|
||||
from modules.shared import cmd_opts, opts, state
|
||||
from modules.shared import opts, state
|
||||
from swinir_model_arch import SwinIR as net
|
||||
from swinir_model_arch_v2 import Swin2SR as net2
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
|
@ -45,7 +44,7 @@ class UpscalerSwinIR(Upscaler):
|
|||
img = upscale(img, model)
|
||||
try:
|
||||
torch.cuda.empty_cache()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
return img
|
||||
|
||||
|
|
|
@ -644,7 +644,7 @@ class SwinIR(nn.Module):
|
|||
"""
|
||||
|
||||
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
||||
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
|
||||
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
|
||||
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
|
||||
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
||||
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
||||
|
@ -844,7 +844,7 @@ class SwinIR(nn.Module):
|
|||
H, W = self.patches_resolution
|
||||
flops += H * W * 3 * self.embed_dim * 9
|
||||
flops += self.patch_embed.flops()
|
||||
for i, layer in enumerate(self.layers):
|
||||
for layer in self.layers:
|
||||
flops += layer.flops()
|
||||
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
||||
flops += self.upsample.flops()
|
||||
|
|
|
@ -74,7 +74,7 @@ class WindowAttention(nn.Module):
|
|||
"""
|
||||
|
||||
def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0.,
|
||||
pretrained_window_size=[0, 0]):
|
||||
pretrained_window_size=(0, 0)):
|
||||
|
||||
super().__init__()
|
||||
self.dim = dim
|
||||
|
@ -698,7 +698,7 @@ class Swin2SR(nn.Module):
|
|||
"""
|
||||
|
||||
def __init__(self, img_size=64, patch_size=1, in_chans=3,
|
||||
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
|
||||
embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6),
|
||||
window_size=7, mlp_ratio=4., qkv_bias=True,
|
||||
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
|
||||
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
|
||||
|
@ -994,7 +994,7 @@ class Swin2SR(nn.Module):
|
|||
H, W = self.patches_resolution
|
||||
flops += H * W * 3 * self.embed_dim * 9
|
||||
flops += self.patch_embed.flops()
|
||||
for i, layer in enumerate(self.layers):
|
||||
for layer in self.layers:
|
||||
flops += layer.flops()
|
||||
flops += H * W * 3 * self.embed_dim * self.embed_dim
|
||||
flops += self.upsample.flops()
|
||||
|
|
|
@ -15,7 +15,8 @@ from secrets import compare_digest
|
|||
|
||||
import modules.shared as shared
|
||||
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
|
||||
from modules.api.models import *
|
||||
from modules.api import models
|
||||
from modules.shared import opts
|
||||
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
|
||||
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
|
||||
from modules.textual_inversion.preprocess import preprocess
|
||||
|
@ -25,21 +26,24 @@ from modules.sd_models import checkpoints_list, unload_model_weights, reload_mod
|
|||
from modules.sd_models_config import find_checkpoint_config_near_filename
|
||||
from modules.realesrgan_model import get_realesrgan_models
|
||||
from modules import devices
|
||||
from typing import List
|
||||
from typing import Dict, List, Any
|
||||
import piexif
|
||||
import piexif.helper
|
||||
|
||||
|
||||
def upscaler_to_index(name: str):
|
||||
try:
|
||||
return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
|
||||
except:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in shared.sd_upscalers])}") from e
|
||||
|
||||
|
||||
def script_name_to_index(name, scripts):
|
||||
try:
|
||||
return [script.title().lower() for script in scripts].index(name.lower())
|
||||
except:
|
||||
raise HTTPException(status_code=422, detail=f"Script '{name}' not found")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=422, detail=f"Script '{name}' not found") from e
|
||||
|
||||
|
||||
def validate_sampler_name(name):
|
||||
config = sd_samplers.all_samplers_map.get(name, None)
|
||||
|
@ -48,20 +52,23 @@ def validate_sampler_name(name):
|
|||
|
||||
return name
|
||||
|
||||
|
||||
def setUpscalers(req: dict):
|
||||
reqDict = vars(req)
|
||||
reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None)
|
||||
reqDict['extras_upscaler_2'] = reqDict.pop('upscaler_2', None)
|
||||
return reqDict
|
||||
|
||||
|
||||
def decode_base64_to_image(encoding):
|
||||
if encoding.startswith("data:image/"):
|
||||
encoding = encoding.split(";")[1].split(",")[1]
|
||||
try:
|
||||
image = Image.open(BytesIO(base64.b64decode(encoding)))
|
||||
return image
|
||||
except Exception as err:
|
||||
raise HTTPException(status_code=500, detail="Invalid encoded image")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail="Invalid encoded image") from e
|
||||
|
||||
|
||||
def encode_pil_to_base64(image):
|
||||
with io.BytesIO() as output_bytes:
|
||||
|
@ -92,6 +99,7 @@ def encode_pil_to_base64(image):
|
|||
|
||||
return base64.b64encode(bytes_data)
|
||||
|
||||
|
||||
def api_middleware(app: FastAPI):
|
||||
rich_available = True
|
||||
try:
|
||||
|
@ -99,7 +107,7 @@ def api_middleware(app: FastAPI):
|
|||
import starlette # importing just so it can be placed on silent list
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
except:
|
||||
except Exception:
|
||||
import traceback
|
||||
rich_available = False
|
||||
|
||||
|
@ -157,7 +165,7 @@ def api_middleware(app: FastAPI):
|
|||
class Api:
|
||||
def __init__(self, app: FastAPI, queue_lock: Lock):
|
||||
if shared.cmd_opts.api_auth:
|
||||
self.credentials = dict()
|
||||
self.credentials = {}
|
||||
for auth in shared.cmd_opts.api_auth.split(","):
|
||||
user, password = auth.split(":")
|
||||
self.credentials[user] = password
|
||||
|
@ -166,36 +174,36 @@ class Api:
|
|||
self.app = app
|
||||
self.queue_lock = queue_lock
|
||||
api_middleware(self.app)
|
||||
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse)
|
||||
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse)
|
||||
self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse)
|
||||
self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=ExtrasBatchImagesResponse)
|
||||
self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=PNGInfoResponse)
|
||||
self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=ProgressResponse)
|
||||
self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=models.TextToImageResponse)
|
||||
self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=models.ImageToImageResponse)
|
||||
self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=models.ExtrasSingleImageResponse)
|
||||
self.add_api_route("/sdapi/v1/extra-batch-images", self.extras_batch_images_api, methods=["POST"], response_model=models.ExtrasBatchImagesResponse)
|
||||
self.add_api_route("/sdapi/v1/png-info", self.pnginfoapi, methods=["POST"], response_model=models.PNGInfoResponse)
|
||||
self.add_api_route("/sdapi/v1/progress", self.progressapi, methods=["GET"], response_model=models.ProgressResponse)
|
||||
self.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/interrupt", self.interruptapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/skip", self.skip, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=OptionsModel)
|
||||
self.add_api_route("/sdapi/v1/options", self.get_config, methods=["GET"], response_model=models.OptionsModel)
|
||||
self.add_api_route("/sdapi/v1/options", self.set_config, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=FlagsModel)
|
||||
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[SamplerItem])
|
||||
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[UpscalerItem])
|
||||
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[SDModelItem])
|
||||
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[HypernetworkItem])
|
||||
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[FaceRestorerItem])
|
||||
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[RealesrganItem])
|
||||
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem])
|
||||
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse)
|
||||
self.add_api_route("/sdapi/v1/cmd-flags", self.get_cmd_flags, methods=["GET"], response_model=models.FlagsModel)
|
||||
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
|
||||
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
|
||||
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
|
||||
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
|
||||
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
|
||||
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
|
||||
self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[models.PromptStyleItem])
|
||||
self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse)
|
||||
self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=PreprocessResponse)
|
||||
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=MemoryResponse)
|
||||
self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=models.CreateResponse)
|
||||
self.add_api_route("/sdapi/v1/preprocess", self.preprocess, methods=["POST"], response_model=models.PreprocessResponse)
|
||||
self.add_api_route("/sdapi/v1/train/embedding", self.train_embedding, methods=["POST"], response_model=models.TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/train/hypernetwork", self.train_hypernetwork, methods=["POST"], response_model=models.TrainResponse)
|
||||
self.add_api_route("/sdapi/v1/memory", self.get_memory, methods=["GET"], response_model=models.MemoryResponse)
|
||||
self.add_api_route("/sdapi/v1/unload-checkpoint", self.unloadapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/reload-checkpoint", self.reloadapi, methods=["POST"])
|
||||
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=ScriptsList)
|
||||
self.add_api_route("/sdapi/v1/scripts", self.get_scripts_list, methods=["GET"], response_model=models.ScriptsList)
|
||||
|
||||
self.default_script_arg_txt2img = []
|
||||
self.default_script_arg_img2img = []
|
||||
|
@ -224,7 +232,7 @@ class Api:
|
|||
t2ilist = [str(title.lower()) for title in scripts.scripts_txt2img.titles]
|
||||
i2ilist = [str(title.lower()) for title in scripts.scripts_img2img.titles]
|
||||
|
||||
return ScriptsList(txt2img = t2ilist, img2img = i2ilist)
|
||||
return models.ScriptsList(txt2img=t2ilist, img2img=i2ilist)
|
||||
|
||||
def get_script(self, script_name, script_runner):
|
||||
if script_name is None or script_name == "":
|
||||
|
@ -264,11 +272,11 @@ class Api:
|
|||
if request.alwayson_scripts and (len(request.alwayson_scripts) > 0):
|
||||
for alwayson_script_name in request.alwayson_scripts.keys():
|
||||
alwayson_script = self.get_script(alwayson_script_name, script_runner)
|
||||
if alwayson_script == None:
|
||||
if alwayson_script is None:
|
||||
raise HTTPException(status_code=422, detail=f"always on script {alwayson_script_name} not found")
|
||||
# Selectable script in always on script param check
|
||||
if alwayson_script.alwayson == False:
|
||||
raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params")
|
||||
if alwayson_script.alwayson is False:
|
||||
raise HTTPException(status_code=422, detail="Cannot have a selectable script in the always on scripts params")
|
||||
# always on script with no arg should always run so you don't really need to add them to the requests
|
||||
if "args" in request.alwayson_scripts[alwayson_script_name]:
|
||||
# min between arg length in scriptrunner and arg length in the request
|
||||
|
@ -276,7 +284,7 @@ class Api:
|
|||
script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx]
|
||||
return script_args
|
||||
|
||||
def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI):
|
||||
def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
|
||||
script_runner = scripts.scripts_txt2img
|
||||
if not script_runner.scripts:
|
||||
script_runner.initialize_scripts(False)
|
||||
|
@ -310,7 +318,7 @@ class Api:
|
|||
p.outpath_samples = opts.outdir_txt2img_samples
|
||||
|
||||
shared.state.begin()
|
||||
if selectable_scripts != None:
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
|
@ -320,9 +328,9 @@ class Api:
|
|||
|
||||
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
|
||||
|
||||
return TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
|
||||
return models.TextToImageResponse(images=b64images, parameters=vars(txt2imgreq), info=processed.js())
|
||||
|
||||
def img2imgapi(self, img2imgreq: StableDiffusionImg2ImgProcessingAPI):
|
||||
def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
|
||||
init_images = img2imgreq.init_images
|
||||
if init_images is None:
|
||||
raise HTTPException(status_code=404, detail="Init image not found")
|
||||
|
@ -367,7 +375,7 @@ class Api:
|
|||
p.outpath_samples = opts.outdir_img2img_samples
|
||||
|
||||
shared.state.begin()
|
||||
if selectable_scripts != None:
|
||||
if selectable_scripts is not None:
|
||||
p.script_args = script_args
|
||||
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
|
||||
else:
|
||||
|
@ -381,9 +389,9 @@ class Api:
|
|||
img2imgreq.init_images = None
|
||||
img2imgreq.mask = None
|
||||
|
||||
return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
|
||||
return models.ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js())
|
||||
|
||||
def extras_single_image_api(self, req: ExtrasSingleImageRequest):
|
||||
def extras_single_image_api(self, req: models.ExtrasSingleImageRequest):
|
||||
reqDict = setUpscalers(req)
|
||||
|
||||
reqDict['image'] = decode_base64_to_image(reqDict['image'])
|
||||
|
@ -391,9 +399,9 @@ class Api:
|
|||
with self.queue_lock:
|
||||
result = postprocessing.run_extras(extras_mode=0, image_folder="", input_dir="", output_dir="", save_output=False, **reqDict)
|
||||
|
||||
return ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
|
||||
return models.ExtrasSingleImageResponse(image=encode_pil_to_base64(result[0][0]), html_info=result[1])
|
||||
|
||||
def extras_batch_images_api(self, req: ExtrasBatchImagesRequest):
|
||||
def extras_batch_images_api(self, req: models.ExtrasBatchImagesRequest):
|
||||
reqDict = setUpscalers(req)
|
||||
|
||||
image_list = reqDict.pop('imageList', [])
|
||||
|
@ -402,15 +410,15 @@ class Api:
|
|||
with self.queue_lock:
|
||||
result = postprocessing.run_extras(extras_mode=1, image_folder=image_folder, image="", input_dir="", output_dir="", save_output=False, **reqDict)
|
||||
|
||||
return ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
|
||||
return models.ExtrasBatchImagesResponse(images=list(map(encode_pil_to_base64, result[0])), html_info=result[1])
|
||||
|
||||
def pnginfoapi(self, req: PNGInfoRequest):
|
||||
def pnginfoapi(self, req: models.PNGInfoRequest):
|
||||
if(not req.image.strip()):
|
||||
return PNGInfoResponse(info="")
|
||||
return models.PNGInfoResponse(info="")
|
||||
|
||||
image = decode_base64_to_image(req.image.strip())
|
||||
if image is None:
|
||||
return PNGInfoResponse(info="")
|
||||
return models.PNGInfoResponse(info="")
|
||||
|
||||
geninfo, items = images.read_info_from_image(image)
|
||||
if geninfo is None:
|
||||
|
@ -418,13 +426,13 @@ class Api:
|
|||
|
||||
items = {**{'parameters': geninfo}, **items}
|
||||
|
||||
return PNGInfoResponse(info=geninfo, items=items)
|
||||
return models.PNGInfoResponse(info=geninfo, items=items)
|
||||
|
||||
def progressapi(self, req: ProgressRequest = Depends()):
|
||||
def progressapi(self, req: models.ProgressRequest = Depends()):
|
||||
# copy from check_progress_call of ui.py
|
||||
|
||||
if shared.state.job_count == 0:
|
||||
return ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
|
||||
return models.ProgressResponse(progress=0, eta_relative=0, state=shared.state.dict(), textinfo=shared.state.textinfo)
|
||||
|
||||
# avoid dividing zero
|
||||
progress = 0.01
|
||||
|
@ -446,9 +454,9 @@ class Api:
|
|||
if shared.state.current_image and not req.skip_current_image:
|
||||
current_image = encode_pil_to_base64(shared.state.current_image)
|
||||
|
||||
return ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
|
||||
return models.ProgressResponse(progress=progress, eta_relative=eta_relative, state=shared.state.dict(), current_image=current_image, textinfo=shared.state.textinfo)
|
||||
|
||||
def interrogateapi(self, interrogatereq: InterrogateRequest):
|
||||
def interrogateapi(self, interrogatereq: models.InterrogateRequest):
|
||||
image_b64 = interrogatereq.image
|
||||
if image_b64 is None:
|
||||
raise HTTPException(status_code=404, detail="Image not found")
|
||||
|
@ -465,7 +473,7 @@ class Api:
|
|||
else:
|
||||
raise HTTPException(status_code=404, detail="Model not found")
|
||||
|
||||
return InterrogateResponse(caption=processed)
|
||||
return models.InterrogateResponse(caption=processed)
|
||||
|
||||
def interruptapi(self):
|
||||
shared.state.interrupt()
|
||||
|
@ -570,36 +578,36 @@ class Api:
|
|||
filename = create_embedding(**args) # create empty embedding
|
||||
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() # reload embeddings so new one can be immediately used
|
||||
shared.state.end()
|
||||
return CreateResponse(info=f"create embedding filename: {filename}")
|
||||
return models.CreateResponse(info=f"create embedding filename: {filename}")
|
||||
except AssertionError as e:
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"create embedding error: {e}")
|
||||
return models.TrainResponse(info=f"create embedding error: {e}")
|
||||
|
||||
def create_hypernetwork(self, args: dict):
|
||||
try:
|
||||
shared.state.begin()
|
||||
filename = create_hypernetwork(**args) # create empty embedding
|
||||
shared.state.end()
|
||||
return CreateResponse(info=f"create hypernetwork filename: {filename}")
|
||||
return models.CreateResponse(info=f"create hypernetwork filename: {filename}")
|
||||
except AssertionError as e:
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"create hypernetwork error: {e}")
|
||||
return models.TrainResponse(info=f"create hypernetwork error: {e}")
|
||||
|
||||
def preprocess(self, args: dict):
|
||||
try:
|
||||
shared.state.begin()
|
||||
preprocess(**args) # quick operation unless blip/booru interrogation is enabled
|
||||
shared.state.end()
|
||||
return PreprocessResponse(info = 'preprocess complete')
|
||||
return models.PreprocessResponse(info = 'preprocess complete')
|
||||
except KeyError as e:
|
||||
shared.state.end()
|
||||
return PreprocessResponse(info=f"preprocess error: invalid token: {e}")
|
||||
return models.PreprocessResponse(info=f"preprocess error: invalid token: {e}")
|
||||
except AssertionError as e:
|
||||
shared.state.end()
|
||||
return PreprocessResponse(info=f"preprocess error: {e}")
|
||||
return models.PreprocessResponse(info=f"preprocess error: {e}")
|
||||
except FileNotFoundError as e:
|
||||
shared.state.end()
|
||||
return PreprocessResponse(info=f'preprocess error: {e}')
|
||||
return models.PreprocessResponse(info=f'preprocess error: {e}')
|
||||
|
||||
def train_embedding(self, args: dict):
|
||||
try:
|
||||
|
@ -617,10 +625,10 @@ class Api:
|
|||
if not apply_optimizations:
|
||||
sd_hijack.apply_optimizations()
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except AssertionError as msg:
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding error: {msg}")
|
||||
return models.TrainResponse(info=f"train embedding error: {msg}")
|
||||
|
||||
def train_hypernetwork(self, args: dict):
|
||||
try:
|
||||
|
@ -641,14 +649,15 @@ class Api:
|
|||
if not apply_optimizations:
|
||||
sd_hijack.apply_optimizations()
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except AssertionError as msg:
|
||||
return models.TrainResponse(info=f"train embedding complete: filename: {filename} error: {error}")
|
||||
except AssertionError:
|
||||
shared.state.end()
|
||||
return TrainResponse(info=f"train embedding error: {error}")
|
||||
return models.TrainResponse(info=f"train embedding error: {error}")
|
||||
|
||||
def get_memory(self):
|
||||
try:
|
||||
import os, psutil
|
||||
import os
|
||||
import psutil
|
||||
process = psutil.Process(os.getpid())
|
||||
res = process.memory_info() # only rss is cross-platform guaranteed so we dont rely on other values
|
||||
ram_total = 100 * res.rss / process.memory_percent() # and total memory is calculated as actual value is not cross-platform safe
|
||||
|
@ -675,10 +684,10 @@ class Api:
|
|||
'events': warnings,
|
||||
}
|
||||
else:
|
||||
cuda = { 'error': 'unavailable' }
|
||||
cuda = {'error': 'unavailable'}
|
||||
except Exception as err:
|
||||
cuda = { 'error': f'{err}' }
|
||||
return MemoryResponse(ram = ram, cuda = cuda)
|
||||
cuda = {'error': f'{err}'}
|
||||
return models.MemoryResponse(ram=ram, cuda=cuda)
|
||||
|
||||
def launch(self, server_name, port):
|
||||
self.app.include_router(self.router)
|
||||
|
|
|
@ -223,8 +223,9 @@ for key in _options:
|
|||
if(_options[key].dest != 'help'):
|
||||
flag = _options[key]
|
||||
_type = str
|
||||
if _options[key].default is not None: _type = type(_options[key].default)
|
||||
flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
|
||||
if _options[key].default is not None:
|
||||
_type = type(_options[key].default)
|
||||
flags.update({flag.dest: (_type, Field(default=flag.default, description=flag.help))})
|
||||
|
||||
FlagsModel = create_model("Flags", **flags)
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import argparse
|
||||
import os
|
||||
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file
|
||||
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
# this file is copied from CodeFormer repository. Please see comment in modules/codeformer_model.py
|
||||
|
||||
import math
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn, Tensor
|
||||
import torch.nn.functional as F
|
||||
from typing import Optional, List
|
||||
from typing import Optional
|
||||
|
||||
from modules.codeformer.vqgan_arch import *
|
||||
from basicsr.utils import get_root_logger
|
||||
from modules.codeformer.vqgan_arch import VQAutoEncoder, ResBlock
|
||||
from basicsr.utils.registry import ARCH_REGISTRY
|
||||
|
||||
def calc_mean_std(feat, eps=1e-5):
|
||||
|
@ -163,8 +161,8 @@ class Fuse_sft_block(nn.Module):
|
|||
class CodeFormer(VQAutoEncoder):
|
||||
def __init__(self, dim_embd=512, n_head=8, n_layers=9,
|
||||
codebook_size=1024, latent_size=256,
|
||||
connect_list=['32', '64', '128', '256'],
|
||||
fix_modules=['quantize','generator']):
|
||||
connect_list=('32', '64', '128', '256'),
|
||||
fix_modules=('quantize', 'generator')):
|
||||
super(CodeFormer, self).__init__(512, 64, [1, 2, 2, 4, 4, 8], 'nearest',2, [16], codebook_size)
|
||||
|
||||
if fix_modules is not None:
|
||||
|
|
|
@ -5,11 +5,9 @@ VQGAN code, adapted from the original created by the Unleashing Transformers aut
|
|||
https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
|
||||
|
||||
'''
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import copy
|
||||
from basicsr.utils import get_root_logger
|
||||
from basicsr.utils.registry import ARCH_REGISTRY
|
||||
|
||||
|
@ -328,7 +326,7 @@ class Generator(nn.Module):
|
|||
|
||||
@ARCH_REGISTRY.register()
|
||||
class VQAutoEncoder(nn.Module):
|
||||
def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256,
|
||||
def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=None, codebook_size=1024, emb_dim=256,
|
||||
beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
|
||||
super().__init__()
|
||||
logger = get_root_logger()
|
||||
|
@ -339,7 +337,7 @@ class VQAutoEncoder(nn.Module):
|
|||
self.embed_dim = emb_dim
|
||||
self.ch_mult = ch_mult
|
||||
self.resolution = img_size
|
||||
self.attn_resolutions = attn_resolutions
|
||||
self.attn_resolutions = attn_resolutions or [16]
|
||||
self.quantizer_type = quantizer
|
||||
self.encoder = Encoder(
|
||||
self.in_channels,
|
||||
|
|
|
@ -33,11 +33,9 @@ def setup_model(dirname):
|
|||
try:
|
||||
from torchvision.transforms.functional import normalize
|
||||
from modules.codeformer.codeformer_arch import CodeFormer
|
||||
from basicsr.utils.download_util import load_file_from_url
|
||||
from basicsr.utils import imwrite, img2tensor, tensor2img
|
||||
from basicsr.utils import img2tensor, tensor2img
|
||||
from facelib.utils.face_restoration_helper import FaceRestoreHelper
|
||||
from facelib.detection.retinaface import retinaface
|
||||
from modules.shared import cmd_opts
|
||||
|
||||
net_class = CodeFormer
|
||||
|
||||
|
@ -96,7 +94,7 @@ def setup_model(dirname):
|
|||
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
|
||||
self.face_helper.align_warp_face()
|
||||
|
||||
for idx, cropped_face in enumerate(self.face_helper.cropped_faces):
|
||||
for cropped_face in self.face_helper.cropped_faces:
|
||||
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
||||
normalize(cropped_face_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
|
||||
cropped_face_t = cropped_face_t.unsqueeze(0).to(devices.device_codeformer)
|
||||
|
|
|
@ -14,7 +14,7 @@ from collections import OrderedDict
|
|||
import git
|
||||
|
||||
from modules import shared, extensions
|
||||
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path, config_states_dir
|
||||
from modules.paths_internal import script_path, config_states_dir
|
||||
|
||||
|
||||
all_config_states = OrderedDict()
|
||||
|
@ -35,7 +35,7 @@ def list_config_states():
|
|||
j["filepath"] = path
|
||||
config_states.append(j)
|
||||
|
||||
config_states = list(sorted(config_states, key=lambda cs: cs["created_at"], reverse=True))
|
||||
config_states = sorted(config_states, key=lambda cs: cs["created_at"], reverse=True)
|
||||
|
||||
for cs in config_states:
|
||||
timestamp = time.asctime(time.gmtime(cs["created_at"]))
|
||||
|
|
|
@ -2,7 +2,6 @@ import os
|
|||
import re
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
from modules import modelloader, paths, deepbooru_model, devices, images, shared
|
||||
|
@ -79,7 +78,7 @@ class DeepDanbooru:
|
|||
|
||||
res = []
|
||||
|
||||
filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")])
|
||||
filtertags = {x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")}
|
||||
|
||||
for tag in [x for x in tags if x not in filtertags]:
|
||||
probability = probability_dict[tag]
|
||||
|
|
|
@ -65,7 +65,7 @@ def enable_tf32():
|
|||
|
||||
# enabling benchmark option seems to enable a range of cards to do fp16 when they otherwise can't
|
||||
# see https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/4407
|
||||
if any([torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())]):
|
||||
if any(torch.cuda.get_device_capability(devid) == (7, 5) for devid in range(0, torch.cuda.device_count())):
|
||||
torch.backends.cudnn.benchmark = True
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
|
|
|
@ -6,7 +6,7 @@ from PIL import Image
|
|||
from basicsr.utils.download_util import load_file_from_url
|
||||
|
||||
import modules.esrgan_model_arch as arch
|
||||
from modules import shared, modelloader, images, devices
|
||||
from modules import modelloader, images, devices
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
from modules.shared import opts
|
||||
|
||||
|
@ -16,9 +16,7 @@ def mod2normal(state_dict):
|
|||
# this code is copied from https://github.com/victorca25/iNNfer
|
||||
if 'conv_first.weight' in state_dict:
|
||||
crt_net = {}
|
||||
items = []
|
||||
for k, v in state_dict.items():
|
||||
items.append(k)
|
||||
items = list(state_dict)
|
||||
|
||||
crt_net['model.0.weight'] = state_dict['conv_first.weight']
|
||||
crt_net['model.0.bias'] = state_dict['conv_first.bias']
|
||||
|
@ -52,9 +50,7 @@ def resrgan2normal(state_dict, nb=23):
|
|||
if "conv_first.weight" in state_dict and "body.0.rdb1.conv1.weight" in state_dict:
|
||||
re8x = 0
|
||||
crt_net = {}
|
||||
items = []
|
||||
for k, v in state_dict.items():
|
||||
items.append(k)
|
||||
items = list(state_dict)
|
||||
|
||||
crt_net['model.0.weight'] = state_dict['conv_first.weight']
|
||||
crt_net['model.0.bias'] = state_dict['conv_first.bias']
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
from collections import OrderedDict
|
||||
import math
|
||||
import functools
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
@ -438,9 +437,11 @@ def conv_block(in_nc, out_nc, kernel_size, stride=1, dilation=1, groups=1, bias=
|
|||
padding = padding if pad_type == 'zero' else 0
|
||||
|
||||
if convtype=='PartialConv2D':
|
||||
from torchvision.ops import PartialConv2d # this is definitely not going to work, but PartialConv2d doesn't work anyway and this shuts up static analyzer
|
||||
c = PartialConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
|
||||
dilation=dilation, bias=bias, groups=groups)
|
||||
elif convtype=='DeformConv2D':
|
||||
from torchvision.ops import DeformConv2d # not tested
|
||||
c = DeformConv2d(in_nc, out_nc, kernel_size=kernel_size, stride=stride, padding=padding,
|
||||
dilation=dilation, bias=bias, groups=groups)
|
||||
elif convtype=='Conv3D':
|
||||
|
|
|
@ -3,11 +3,10 @@ import sys
|
|||
import traceback
|
||||
|
||||
import time
|
||||
from datetime import datetime
|
||||
import git
|
||||
|
||||
from modules import shared
|
||||
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path
|
||||
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
|
||||
|
||||
extensions = []
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ def deactivate(p, extra_network_data):
|
|||
"""call deactivate for extra networks in extra_network_data in specified order, then call
|
||||
deactivate for all remaining registered networks"""
|
||||
|
||||
for extra_network_name, extra_network_args in extra_network_data.items():
|
||||
for extra_network_name in extra_network_data:
|
||||
extra_network = extra_network_registry.get(extra_network_name, None)
|
||||
if extra_network is None:
|
||||
continue
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from modules import extra_networks, shared, extra_networks
|
||||
from modules import extra_networks, shared
|
||||
from modules.hypernetworks import hypernetwork
|
||||
|
||||
|
||||
|
|
|
@ -136,14 +136,14 @@ def run_modelmerger(id_task, primary_model_name, secondary_model_name, tertiary_
|
|||
result_is_instruct_pix2pix_model = False
|
||||
|
||||
if theta_func2:
|
||||
shared.state.textinfo = f"Loading B"
|
||||
shared.state.textinfo = "Loading B"
|
||||
print(f"Loading {secondary_model_info.filename}...")
|
||||
theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu')
|
||||
else:
|
||||
theta_1 = None
|
||||
|
||||
if theta_func1:
|
||||
shared.state.textinfo = f"Loading C"
|
||||
shared.state.textinfo = "Loading C"
|
||||
print(f"Loading {tertiary_model_info.filename}...")
|
||||
theta_2 = sd_models.read_state_dict(tertiary_model_info.filename, map_location='cpu')
|
||||
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
import base64
|
||||
import html
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import gradio as gr
|
||||
from modules.paths import data_path
|
||||
from modules import shared, ui_tempdir, script_callbacks
|
||||
import tempfile
|
||||
from PIL import Image
|
||||
|
||||
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
|
||||
|
@ -23,14 +19,14 @@ registered_param_bindings = []
|
|||
|
||||
|
||||
class ParamBinding:
|
||||
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=[]):
|
||||
def __init__(self, paste_button, tabname, source_text_component=None, source_image_component=None, source_tabname=None, override_settings_component=None, paste_field_names=None):
|
||||
self.paste_button = paste_button
|
||||
self.tabname = tabname
|
||||
self.source_text_component = source_text_component
|
||||
self.source_image_component = source_image_component
|
||||
self.source_tabname = source_tabname
|
||||
self.override_settings_component = override_settings_component
|
||||
self.paste_field_names = paste_field_names
|
||||
self.paste_field_names = paste_field_names or []
|
||||
|
||||
|
||||
def reset():
|
||||
|
@ -251,7 +247,7 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
|||
lines.append(lastline)
|
||||
lastline = ''
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if line.startswith("Negative prompt:"):
|
||||
done_with_prompt = True
|
||||
|
|
|
@ -78,7 +78,7 @@ def setup_model(dirname):
|
|||
|
||||
try:
|
||||
from gfpgan import GFPGANer
|
||||
from facexlib import detection, parsing
|
||||
from facexlib import detection, parsing # noqa: F401
|
||||
global user_path
|
||||
global have_gfpgan
|
||||
global gfpgan_constructor
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import csv
|
||||
import datetime
|
||||
import glob
|
||||
import html
|
||||
|
@ -18,7 +17,7 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler
|
|||
from torch import einsum
|
||||
from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_
|
||||
|
||||
from collections import defaultdict, deque
|
||||
from collections import deque
|
||||
from statistics import stdev, mean
|
||||
|
||||
|
||||
|
@ -178,34 +177,34 @@ class Hypernetwork:
|
|||
|
||||
def weights(self):
|
||||
res = []
|
||||
for k, layers in self.layers.items():
|
||||
for layers in self.layers.values():
|
||||
for layer in layers:
|
||||
res += layer.parameters()
|
||||
return res
|
||||
|
||||
def train(self, mode=True):
|
||||
for k, layers in self.layers.items():
|
||||
for layers in self.layers.values():
|
||||
for layer in layers:
|
||||
layer.train(mode=mode)
|
||||
for param in layer.parameters():
|
||||
param.requires_grad = mode
|
||||
|
||||
def to(self, device):
|
||||
for k, layers in self.layers.items():
|
||||
for layers in self.layers.values():
|
||||
for layer in layers:
|
||||
layer.to(device)
|
||||
|
||||
return self
|
||||
|
||||
def set_multiplier(self, multiplier):
|
||||
for k, layers in self.layers.items():
|
||||
for layers in self.layers.values():
|
||||
for layer in layers:
|
||||
layer.multiplier = multiplier
|
||||
|
||||
return self
|
||||
|
||||
def eval(self):
|
||||
for k, layers in self.layers.items():
|
||||
for layers in self.layers.values():
|
||||
for layer in layers:
|
||||
layer.eval()
|
||||
for param in layer.parameters():
|
||||
|
@ -404,7 +403,7 @@ def attention_CrossAttention_forward(self, x, context=None, mask=None):
|
|||
k = self.to_k(context_k)
|
||||
v = self.to_v(context_v)
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
||||
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
|
||||
|
||||
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
|
||||
|
||||
|
@ -620,7 +619,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
|
|||
try:
|
||||
sd_hijack_checkpoint.add()
|
||||
|
||||
for i in range((steps-initial_step) * gradient_step):
|
||||
for _ in range((steps-initial_step) * gradient_step):
|
||||
if scheduler.finished:
|
||||
break
|
||||
if shared.state.interrupted:
|
||||
|
|
|
@ -1,19 +1,17 @@
|
|||
import html
|
||||
import os
|
||||
import re
|
||||
|
||||
import gradio as gr
|
||||
import modules.hypernetworks.hypernetwork
|
||||
from modules import devices, sd_hijack, shared
|
||||
|
||||
not_available = ["hardswish", "multiheadattention"]
|
||||
keys = list(x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
|
||||
keys = [x for x in modules.hypernetworks.hypernetwork.HypernetworkModule.activation_dict if x not in not_available]
|
||||
|
||||
|
||||
def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False, dropout_structure=None):
|
||||
filename = modules.hypernetworks.hypernetwork.create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, dropout_structure)
|
||||
|
||||
return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {filename}", ""
|
||||
return gr.Dropdown.update(choices=sorted(shared.hypernetworks)), f"Created: {filename}", ""
|
||||
|
||||
|
||||
def train_hypernetwork(*args):
|
||||
|
|
|
@ -19,7 +19,7 @@ import json
|
|||
import hashlib
|
||||
|
||||
from modules import sd_samplers, shared, script_callbacks, errors
|
||||
from modules.shared import opts, cmd_opts
|
||||
from modules.shared import opts
|
||||
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
|
||||
|
@ -149,7 +149,7 @@ def draw_grid_annotations(im, width, height, hor_texts, ver_texts, margin=0):
|
|||
return ImageFont.truetype(Roboto, fontsize)
|
||||
|
||||
def draw_texts(drawing, draw_x, draw_y, lines, initial_fnt, initial_fontsize):
|
||||
for i, line in enumerate(lines):
|
||||
for line in lines:
|
||||
fnt = initial_fnt
|
||||
fontsize = initial_fontsize
|
||||
while drawing.multiline_textsize(line.text, font=fnt)[0] > line.allowed_width and fontsize > 0:
|
||||
|
@ -409,13 +409,13 @@ class FilenameGenerator:
|
|||
time_format = args[0] if len(args) > 0 and args[0] != "" else self.default_time_format
|
||||
try:
|
||||
time_zone = pytz.timezone(args[1]) if len(args) > 1 else None
|
||||
except pytz.exceptions.UnknownTimeZoneError as _:
|
||||
except pytz.exceptions.UnknownTimeZoneError:
|
||||
time_zone = None
|
||||
|
||||
time_zone_time = time_datetime.astimezone(time_zone)
|
||||
try:
|
||||
formatted_time = time_zone_time.strftime(time_format)
|
||||
except (ValueError, TypeError) as _:
|
||||
except (ValueError, TypeError):
|
||||
formatted_time = time_zone_time.strftime(self.default_time_format)
|
||||
|
||||
return sanitize_filename_part(formatted_time, replace_spaces=False)
|
||||
|
@ -472,9 +472,9 @@ def get_next_sequence_number(path, basename):
|
|||
prefix_length = len(basename)
|
||||
for p in os.listdir(path):
|
||||
if p.startswith(basename):
|
||||
l = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
|
||||
parts = os.path.splitext(p[prefix_length:])[0].split('-') # splits the filename (removing the basename first if one is defined, so the sequence number is always the first element)
|
||||
try:
|
||||
result = max(int(l[0]), result)
|
||||
result = max(int(parts[0]), result)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
|
|
@ -1,19 +1,15 @@
|
|||
import math
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
|
||||
|
||||
from modules import devices, sd_samplers
|
||||
from modules import sd_samplers
|
||||
from modules.generation_parameters_copypaste import create_override_settings_dict
|
||||
from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images
|
||||
from modules.shared import opts, state
|
||||
import modules.shared as shared
|
||||
import modules.processing as processing
|
||||
from modules.ui import plaintext_to_html
|
||||
import modules.images as images
|
||||
import modules.scripts
|
||||
|
||||
|
||||
|
@ -59,7 +55,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
|
|||
# try to find corresponding mask for an image using simple filename matching
|
||||
mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
|
||||
# if not found use first one ("same mask for all images" use-case)
|
||||
if not mask_image_path in inpaint_masks:
|
||||
if mask_image_path not in inpaint_masks:
|
||||
mask_image_path = inpaint_masks[0]
|
||||
mask_image = Image.open(mask_image_path)
|
||||
p.image_mask = mask_image
|
||||
|
|
|
@ -11,7 +11,6 @@ import torch.hub
|
|||
from torchvision import transforms
|
||||
from torchvision.transforms.functional import InterpolationMode
|
||||
|
||||
import modules.shared as shared
|
||||
from modules import devices, paths, shared, lowvram, modelloader, errors
|
||||
|
||||
blip_image_eval_size = 384
|
||||
|
@ -160,7 +159,7 @@ class InterrogateModels:
|
|||
text_array = text_array[0:int(shared.opts.interrogate_clip_dict_limit)]
|
||||
|
||||
top_count = min(top_count, len(text_array))
|
||||
text_tokens = clip.tokenize([text for text in text_array], truncate=True).to(devices.device_interrogate)
|
||||
text_tokens = clip.tokenize(list(text_array), truncate=True).to(devices.device_interrogate)
|
||||
text_features = self.clip_model.encode_text(text_tokens).type(self.dtype)
|
||||
text_features /= text_features.norm(dim=-1, keepdim=True)
|
||||
|
||||
|
@ -208,8 +207,8 @@ class InterrogateModels:
|
|||
|
||||
image_features /= image_features.norm(dim=-1, keepdim=True)
|
||||
|
||||
for name, topn, items in self.categories():
|
||||
matches = self.rank(image_features, items, top_count=topn)
|
||||
for cat in self.categories():
|
||||
matches = self.rank(image_features, cat.items, top_count=cat.topn)
|
||||
for match, score in matches:
|
||||
if shared.opts.interrogate_return_ranks:
|
||||
res += f", ({match}:{score/100:.3f})"
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import torch
|
||||
import platform
|
||||
from modules import paths
|
||||
from modules.sd_hijack_utils import CondFunc
|
||||
from packaging import version
|
||||
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import glob
|
||||
import os
|
||||
import shutil
|
||||
import importlib
|
||||
|
@ -40,7 +39,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
|
|||
if os.path.islink(full_path) and not os.path.exists(full_path):
|
||||
print(f"Skipping broken symlink: {full_path}")
|
||||
continue
|
||||
if ext_blacklist is not None and any([full_path.endswith(x) for x in ext_blacklist]):
|
||||
if ext_blacklist is not None and any(full_path.endswith(x) for x in ext_blacklist):
|
||||
continue
|
||||
if full_path not in output:
|
||||
output.append(full_path)
|
||||
|
@ -108,12 +107,12 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
|
|||
print(f"Moving {file} from {src_path} to {dest_path}.")
|
||||
try:
|
||||
shutil.move(fullpath, dest_path)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
if len(os.listdir(src_path)) == 0:
|
||||
print(f"Removing empty folder: {src_path}")
|
||||
shutil.rmtree(src_path, True)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
|
@ -141,7 +140,7 @@ def load_upscalers():
|
|||
full_model = f"modules.{model_name}_model"
|
||||
try:
|
||||
importlib.import_module(full_model)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
datas = []
|
||||
|
|
|
@ -52,7 +52,7 @@ class DDPM(pl.LightningModule):
|
|||
beta_schedule="linear",
|
||||
loss_type="l2",
|
||||
ckpt_path=None,
|
||||
ignore_keys=[],
|
||||
ignore_keys=None,
|
||||
load_only_unet=False,
|
||||
monitor="val/loss",
|
||||
use_ema=True,
|
||||
|
@ -107,7 +107,7 @@ class DDPM(pl.LightningModule):
|
|||
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
|
||||
|
||||
if ckpt_path is not None:
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
|
||||
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet)
|
||||
|
||||
# If initialing from EMA-only checkpoint, create EMA model after loading.
|
||||
if self.use_ema and not load_ema:
|
||||
|
@ -194,7 +194,9 @@ class DDPM(pl.LightningModule):
|
|||
if context is not None:
|
||||
print(f"{context}: Restored training weights")
|
||||
|
||||
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
|
||||
def init_from_ckpt(self, path, ignore_keys=None, only_model=False):
|
||||
ignore_keys = ignore_keys or []
|
||||
|
||||
sd = torch.load(path, map_location="cpu")
|
||||
if "state_dict" in list(sd.keys()):
|
||||
sd = sd["state_dict"]
|
||||
|
@ -403,7 +405,7 @@ class DDPM(pl.LightningModule):
|
|||
|
||||
@torch.no_grad()
|
||||
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
|
||||
log = dict()
|
||||
log = {}
|
||||
x = self.get_input(batch, self.first_stage_key)
|
||||
N = min(x.shape[0], N)
|
||||
n_row = min(x.shape[0], n_row)
|
||||
|
@ -411,7 +413,7 @@ class DDPM(pl.LightningModule):
|
|||
log["inputs"] = x
|
||||
|
||||
# get diffusion row
|
||||
diffusion_row = list()
|
||||
diffusion_row = []
|
||||
x_start = x[:n_row]
|
||||
|
||||
for t in range(self.num_timesteps):
|
||||
|
@ -473,13 +475,13 @@ class LatentDiffusion(DDPM):
|
|||
conditioning_key = None
|
||||
ckpt_path = kwargs.pop("ckpt_path", None)
|
||||
ignore_keys = kwargs.pop("ignore_keys", [])
|
||||
super().__init__(conditioning_key=conditioning_key, *args, load_ema=load_ema, **kwargs)
|
||||
super().__init__(*args, conditioning_key=conditioning_key, load_ema=load_ema, **kwargs)
|
||||
self.concat_mode = concat_mode
|
||||
self.cond_stage_trainable = cond_stage_trainable
|
||||
self.cond_stage_key = cond_stage_key
|
||||
try:
|
||||
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
|
||||
except:
|
||||
except Exception:
|
||||
self.num_downs = 0
|
||||
if not scale_by_std:
|
||||
self.scale_factor = scale_factor
|
||||
|
@ -891,16 +893,6 @@ class LatentDiffusion(DDPM):
|
|||
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
|
||||
return self.p_losses(x, c, t, *args, **kwargs)
|
||||
|
||||
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
|
||||
def rescale_bbox(bbox):
|
||||
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
|
||||
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
|
||||
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
|
||||
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
|
||||
return x0, y0, w, h
|
||||
|
||||
return [rescale_bbox(b) for b in bboxes]
|
||||
|
||||
def apply_model(self, x_noisy, t, cond, return_ids=False):
|
||||
|
||||
if isinstance(cond, dict):
|
||||
|
@ -1140,7 +1132,7 @@ class LatentDiffusion(DDPM):
|
|||
if cond is not None:
|
||||
if isinstance(cond, dict):
|
||||
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
||||
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
||||
[x[:batch_size] for x in cond[key]] for key in cond}
|
||||
else:
|
||||
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
||||
|
||||
|
@ -1171,8 +1163,10 @@ class LatentDiffusion(DDPM):
|
|||
|
||||
if i % log_every_t == 0 or i == timesteps - 1:
|
||||
intermediates.append(x0_partial)
|
||||
if callback: callback(i)
|
||||
if img_callback: img_callback(img, i)
|
||||
if callback:
|
||||
callback(i)
|
||||
if img_callback:
|
||||
img_callback(img, i)
|
||||
return img, intermediates
|
||||
|
||||
@torch.no_grad()
|
||||
|
@ -1219,8 +1213,10 @@ class LatentDiffusion(DDPM):
|
|||
|
||||
if i % log_every_t == 0 or i == timesteps - 1:
|
||||
intermediates.append(img)
|
||||
if callback: callback(i)
|
||||
if img_callback: img_callback(img, i)
|
||||
if callback:
|
||||
callback(i)
|
||||
if img_callback:
|
||||
img_callback(img, i)
|
||||
|
||||
if return_intermediates:
|
||||
return img, intermediates
|
||||
|
@ -1235,7 +1231,7 @@ class LatentDiffusion(DDPM):
|
|||
if cond is not None:
|
||||
if isinstance(cond, dict):
|
||||
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
|
||||
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
|
||||
[x[:batch_size] for x in cond[key]] for key in cond}
|
||||
else:
|
||||
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
|
||||
return self.p_sample_loop(cond,
|
||||
|
@ -1267,7 +1263,7 @@ class LatentDiffusion(DDPM):
|
|||
|
||||
use_ddim = False
|
||||
|
||||
log = dict()
|
||||
log = {}
|
||||
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
|
||||
return_first_stage_outputs=True,
|
||||
force_c_encode=True,
|
||||
|
@ -1295,7 +1291,7 @@ class LatentDiffusion(DDPM):
|
|||
|
||||
if plot_diffusion_rows:
|
||||
# get diffusion row
|
||||
diffusion_row = list()
|
||||
diffusion_row = []
|
||||
z_start = z[:n_row]
|
||||
for t in range(self.num_timesteps):
|
||||
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
|
||||
|
@ -1337,7 +1333,7 @@ class LatentDiffusion(DDPM):
|
|||
|
||||
if inpaint:
|
||||
# make a simple center square
|
||||
b, h, w = z.shape[0], z.shape[2], z.shape[3]
|
||||
h, w = z.shape[2], z.shape[3]
|
||||
mask = torch.ones(N, h, w).to(self.device)
|
||||
# zeros will be filled in
|
||||
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
|
||||
|
@ -1439,10 +1435,10 @@ class Layout2ImgDiffusion(LatentDiffusion):
|
|||
# TODO: move all layout-specific hacks to this class
|
||||
def __init__(self, cond_stage_key, *args, **kwargs):
|
||||
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
|
||||
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
|
||||
super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs)
|
||||
|
||||
def log_images(self, batch, N=8, *args, **kwargs):
|
||||
logs = super().log_images(batch=batch, N=N, *args, **kwargs)
|
||||
logs = super().log_images(*args, batch=batch, N=N, **kwargs)
|
||||
|
||||
key = 'train' if self.training else 'validation'
|
||||
dset = self.trainer.datamodule.datasets[key]
|
||||
|
|
|
@ -1 +1 @@
|
|||
from .sampler import UniPCSampler
|
||||
from .sampler import UniPCSampler # noqa: F401
|
||||
|
|
|
@ -54,7 +54,8 @@ class UniPCSampler(object):
|
|||
if conditioning is not None:
|
||||
if isinstance(conditioning, dict):
|
||||
ctmp = conditioning[list(conditioning.keys())[0]]
|
||||
while isinstance(ctmp, list): ctmp = ctmp[0]
|
||||
while isinstance(ctmp, list):
|
||||
ctmp = ctmp[0]
|
||||
cbs = ctmp.shape[0]
|
||||
if cbs != batch_size:
|
||||
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import torch
|
||||
import torch.nn.functional as F
|
||||
import math
|
||||
from tqdm.auto import trange
|
||||
|
||||
|
@ -179,13 +178,13 @@ def model_wrapper(
|
|||
model,
|
||||
noise_schedule,
|
||||
model_type="noise",
|
||||
model_kwargs={},
|
||||
model_kwargs=None,
|
||||
guidance_type="uncond",
|
||||
#condition=None,
|
||||
#unconditional_condition=None,
|
||||
guidance_scale=1.,
|
||||
classifier_fn=None,
|
||||
classifier_kwargs={},
|
||||
classifier_kwargs=None,
|
||||
):
|
||||
"""Create a wrapper function for the noise prediction model.
|
||||
|
||||
|
@ -276,6 +275,9 @@ def model_wrapper(
|
|||
A noise prediction model that accepts the noised data and the continuous time as the inputs.
|
||||
"""
|
||||
|
||||
model_kwargs = model_kwargs or {}
|
||||
classifier_kwargs = classifier_kwargs or {}
|
||||
|
||||
def get_model_input_time(t_continuous):
|
||||
"""
|
||||
Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.
|
||||
|
@ -342,7 +344,7 @@ def model_wrapper(
|
|||
t_in = torch.cat([t_continuous] * 2)
|
||||
if isinstance(condition, dict):
|
||||
assert isinstance(unconditional_condition, dict)
|
||||
c_in = dict()
|
||||
c_in = {}
|
||||
for k in condition:
|
||||
if isinstance(condition[k], list):
|
||||
c_in[k] = [torch.cat([
|
||||
|
@ -353,7 +355,7 @@ def model_wrapper(
|
|||
unconditional_condition[k],
|
||||
condition[k]])
|
||||
elif isinstance(condition, list):
|
||||
c_in = list()
|
||||
c_in = []
|
||||
assert isinstance(unconditional_condition, list)
|
||||
for i in range(len(condition)):
|
||||
c_in.append(torch.cat([unconditional_condition[i], condition[i]]))
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import os
|
||||
import sys
|
||||
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir
|
||||
from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir # noqa: F401
|
||||
|
||||
import modules.safe
|
||||
import modules.safe # noqa: F401
|
||||
|
||||
|
||||
# data_path = cmd_opts_pre.data
|
||||
|
|
|
@ -2,7 +2,6 @@ import json
|
|||
import math
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import hashlib
|
||||
|
||||
import torch
|
||||
|
@ -11,10 +10,10 @@ from PIL import Image, ImageFilter, ImageOps
|
|||
import random
|
||||
import cv2
|
||||
from skimage import exposure
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import modules.sd_hijack
|
||||
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, script_callbacks, extra_networks, sd_vae_approx, scripts
|
||||
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts
|
||||
from modules.sd_hijack import model_hijack
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
import modules.shared as shared
|
||||
|
@ -664,7 +663,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
if not shared.opts.dont_fix_second_order_samplers_schedule:
|
||||
try:
|
||||
step_multiplier = 2 if sd_samplers.all_samplers_map.get(p.sampler_name).aliases[0] in ['k_dpmpp_2s_a', 'k_dpmpp_2s_a_ka', 'k_dpmpp_sde', 'k_dpmpp_sde_ka', 'k_dpm_2', 'k_dpm_2_a', 'k_heun'] else 1
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps * step_multiplier, cached_uc)
|
||||
c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps * step_multiplier, cached_c)
|
||||
|
|
|
@ -54,18 +54,21 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
|
|||
"""
|
||||
|
||||
def collect_steps(steps, tree):
|
||||
l = [steps]
|
||||
res = [steps]
|
||||
|
||||
class CollectSteps(lark.Visitor):
|
||||
def scheduled(self, tree):
|
||||
tree.children[-1] = float(tree.children[-1])
|
||||
if tree.children[-1] < 1:
|
||||
tree.children[-1] *= steps
|
||||
tree.children[-1] = min(steps, int(tree.children[-1]))
|
||||
l.append(tree.children[-1])
|
||||
res.append(tree.children[-1])
|
||||
|
||||
def alternate(self, tree):
|
||||
l.extend(range(1, steps+1))
|
||||
res.extend(range(1, steps+1))
|
||||
|
||||
CollectSteps().visit(tree)
|
||||
return sorted(set(l))
|
||||
return sorted(set(res))
|
||||
|
||||
def at_step(step, tree):
|
||||
class AtStep(lark.Transformer):
|
||||
|
@ -92,7 +95,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
|
|||
def get_schedule(prompt):
|
||||
try:
|
||||
tree = schedule_parser.parse(prompt)
|
||||
except lark.exceptions.LarkError as e:
|
||||
except lark.exceptions.LarkError:
|
||||
if 0:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
@ -140,7 +143,7 @@ def get_learned_conditioning(model, prompts, steps):
|
|||
conds = model.get_learned_conditioning(texts)
|
||||
|
||||
cond_schedule = []
|
||||
for i, (end_at_step, text) in enumerate(prompt_schedule):
|
||||
for i, (end_at_step, _) in enumerate(prompt_schedule):
|
||||
cond_schedule.append(ScheduledPromptConditioning(end_at_step, conds[i]))
|
||||
|
||||
cache[prompt] = cond_schedule
|
||||
|
@ -216,8 +219,8 @@ def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_s
|
|||
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
|
||||
for i, cond_schedule in enumerate(c):
|
||||
target_index = 0
|
||||
for current, (end_at, cond) in enumerate(cond_schedule):
|
||||
if current_step <= end_at:
|
||||
for current, entry in enumerate(cond_schedule):
|
||||
if current_step <= entry.end_at_step:
|
||||
target_index = current
|
||||
break
|
||||
res[i] = cond_schedule[target_index].cond
|
||||
|
@ -231,13 +234,13 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
|
|||
tensors = []
|
||||
conds_list = []
|
||||
|
||||
for batch_no, composable_prompts in enumerate(c.batch):
|
||||
for composable_prompts in c.batch:
|
||||
conds_for_batch = []
|
||||
|
||||
for cond_index, composable_prompt in enumerate(composable_prompts):
|
||||
for composable_prompt in composable_prompts:
|
||||
target_index = 0
|
||||
for current, (end_at, cond) in enumerate(composable_prompt.schedules):
|
||||
if current_step <= end_at:
|
||||
for current, entry in enumerate(composable_prompt.schedules):
|
||||
if current_step <= entry.end_at_step:
|
||||
target_index = current
|
||||
break
|
||||
|
||||
|
|
|
@ -17,9 +17,9 @@ class UpscalerRealESRGAN(Upscaler):
|
|||
self.user_path = path
|
||||
super().__init__()
|
||||
try:
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet
|
||||
from realesrgan import RealESRGANer
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
|
||||
from basicsr.archs.rrdbnet_arch import RRDBNet # noqa: F401
|
||||
from realesrgan import RealESRGANer # noqa: F401
|
||||
from realesrgan.archs.srvgg_arch import SRVGGNetCompact # noqa: F401
|
||||
self.enable = True
|
||||
self.scalers = []
|
||||
scalers = self.load_models(path)
|
||||
|
@ -134,6 +134,6 @@ def get_realesrgan_models(scaler):
|
|||
),
|
||||
]
|
||||
return models
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Error making Real-ESRGAN models list:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
|
|
@ -95,16 +95,16 @@ def check_pt(filename, extra_handler):
|
|||
|
||||
except zipfile.BadZipfile:
|
||||
|
||||
# if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
|
||||
# if it's not a zip file, it's an old pytorch format, with five objects written to pickle
|
||||
with open(filename, "rb") as file:
|
||||
unpickler = RestrictedUnpickler(file)
|
||||
unpickler.extra_handler = extra_handler
|
||||
for i in range(5):
|
||||
for _ in range(5):
|
||||
unpickler.load()
|
||||
|
||||
|
||||
def load(filename, *args, **kwargs):
|
||||
return load_with_extra(filename, extra_handler=global_extra_handler, *args, **kwargs)
|
||||
return load_with_extra(filename, *args, extra_handler=global_extra_handler, **kwargs)
|
||||
|
||||
|
||||
def load_with_extra(filename, extra_handler=None, *args, **kwargs):
|
||||
|
|
|
@ -2,7 +2,6 @@ import os
|
|||
import sys
|
||||
import traceback
|
||||
import importlib.util
|
||||
from types import ModuleType
|
||||
|
||||
|
||||
def load_module(path):
|
||||
|
|
|
@ -231,7 +231,7 @@ def load_scripts():
|
|||
syspath = sys.path
|
||||
|
||||
def register_scripts_from_module(module):
|
||||
for key, script_class in module.__dict__.items():
|
||||
for script_class in module.__dict__.values():
|
||||
if type(script_class) != type:
|
||||
continue
|
||||
|
||||
|
@ -295,9 +295,9 @@ class ScriptRunner:
|
|||
|
||||
auto_processing_scripts = scripts_auto_postprocessing.create_auto_preprocessing_script_data()
|
||||
|
||||
for script_class, path, basedir, script_module in auto_processing_scripts + scripts_data:
|
||||
script = script_class()
|
||||
script.filename = path
|
||||
for script_data in auto_processing_scripts + scripts_data:
|
||||
script = script_data.script_class()
|
||||
script.filename = script_data.path
|
||||
script.is_txt2img = not is_img2img
|
||||
script.is_img2img = is_img2img
|
||||
|
||||
|
@ -492,7 +492,7 @@ class ScriptRunner:
|
|||
module = script_loading.load_module(script.filename)
|
||||
cache[filename] = module
|
||||
|
||||
for key, script_class in module.__dict__.items():
|
||||
for script_class in module.__dict__.values():
|
||||
if type(script_class) == type and issubclass(script_class, Script):
|
||||
self.scripts[si] = script_class()
|
||||
self.scripts[si].filename = filename
|
||||
|
|
|
@ -17,7 +17,7 @@ class ScriptPostprocessingForMainUI(scripts.Script):
|
|||
return self.postprocessing_controls.values()
|
||||
|
||||
def postprocess_image(self, p, script_pp, *args):
|
||||
args_dict = {k: v for k, v in zip(self.postprocessing_controls, args)}
|
||||
args_dict = dict(zip(self.postprocessing_controls, args))
|
||||
|
||||
pp = scripts_postprocessing.PostprocessedImage(script_pp.image)
|
||||
pp.info = {}
|
||||
|
|
|
@ -66,9 +66,9 @@ class ScriptPostprocessingRunner:
|
|||
def initialize_scripts(self, scripts_data):
|
||||
self.scripts = []
|
||||
|
||||
for script_class, path, basedir, script_module in scripts_data:
|
||||
script: ScriptPostprocessing = script_class()
|
||||
script.filename = path
|
||||
for script_data in scripts_data:
|
||||
script: ScriptPostprocessing = script_data.script_class()
|
||||
script.filename = script_data.path
|
||||
|
||||
if script.name == "Simple Upscale":
|
||||
continue
|
||||
|
@ -124,7 +124,7 @@ class ScriptPostprocessingRunner:
|
|||
script_args = args[script.args_from:script.args_to]
|
||||
|
||||
process_args = {}
|
||||
for (name, component), value in zip(script.controls.items(), script_args):
|
||||
for (name, _component), value in zip(script.controls.items(), script_args):
|
||||
process_args[name] = value
|
||||
|
||||
script.process(pp, **process_args)
|
||||
|
|
|
@ -61,7 +61,7 @@ class DisableInitialization:
|
|||
if res is None:
|
||||
res = original(url, *args, local_files_only=False, **kwargs)
|
||||
return res
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
return original(url, *args, local_files_only=False, **kwargs)
|
||||
|
||||
def transformers_utils_hub_get_from_cache(url, *args, local_files_only=False, **kwargs):
|
||||
|
|
|
@ -3,7 +3,7 @@ from torch.nn.functional import silu
|
|||
from types import MethodType
|
||||
|
||||
import modules.textual_inversion.textual_inversion
|
||||
from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint
|
||||
from modules import devices, sd_hijack_optimizations, shared
|
||||
from modules.hypernetworks import hypernetwork
|
||||
from modules.shared import cmd_opts
|
||||
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
|
||||
|
@ -37,7 +37,7 @@ def apply_optimizations():
|
|||
|
||||
optimization_method = None
|
||||
|
||||
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(getattr(torch.nn.functional, "scaled_dot_product_attention")) # not everyone has torch 2.x to use sdp
|
||||
can_use_sdp = hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention) # not everyone has torch 2.x to use sdp
|
||||
|
||||
if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)):
|
||||
print("Applying xformers cross attention optimization.")
|
||||
|
@ -118,7 +118,7 @@ def weighted_forward(sd_model, x, c, w, *args, **kwargs):
|
|||
try:
|
||||
#Delete temporary weights if appended
|
||||
del sd_model._custom_loss_weight
|
||||
except AttributeError as e:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
#If we have an old loss function, reset the loss function to the original one
|
||||
|
@ -133,7 +133,7 @@ def apply_weighted_forward(sd_model):
|
|||
def undo_weighted_forward(sd_model):
|
||||
try:
|
||||
del sd_model.weighted_forward
|
||||
except AttributeError as e:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
@ -223,7 +223,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module):
|
|||
self.hijack.fixes = [x.fixes for x in batch_chunk]
|
||||
|
||||
for fixes in self.hijack.fixes:
|
||||
for position, embedding in fixes:
|
||||
for _position, embedding in fixes:
|
||||
used_embeddings[embedding.name] = embedding
|
||||
|
||||
z = self.process_tokens(tokens, multipliers)
|
||||
|
|
|
@ -1,16 +1,10 @@
|
|||
import os
|
||||
import torch
|
||||
|
||||
from einops import repeat
|
||||
from omegaconf import ListConfig
|
||||
|
||||
import ldm.models.diffusion.ddpm
|
||||
import ldm.models.diffusion.ddim
|
||||
import ldm.models.diffusion.plms
|
||||
|
||||
from ldm.models.diffusion.ddpm import LatentDiffusion
|
||||
from ldm.models.diffusion.plms import PLMSSampler
|
||||
from ldm.models.diffusion.ddim import DDIMSampler, noise_like
|
||||
from ldm.models.diffusion.ddim import noise_like
|
||||
from ldm.models.diffusion.sampling_util import norm_thresholding
|
||||
|
||||
|
||||
|
@ -29,7 +23,7 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F
|
|||
|
||||
if isinstance(c, dict):
|
||||
assert isinstance(unconditional_conditioning, dict)
|
||||
c_in = dict()
|
||||
c_in = {}
|
||||
for k in c:
|
||||
if isinstance(c[k], list):
|
||||
c_in[k] = [
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
import collections
|
||||
import os.path
|
||||
import sys
|
||||
import gc
|
||||
import time
|
||||
|
||||
|
||||
def should_hijack_ip2p(checkpoint_info):
|
||||
from modules import sd_models_config
|
||||
|
@ -10,4 +7,4 @@ def should_hijack_ip2p(checkpoint_info):
|
|||
ckpt_basename = os.path.basename(checkpoint_info.filename).lower()
|
||||
cfg_basename = os.path.basename(sd_models_config.find_checkpoint_config_near_filename(checkpoint_info)).lower()
|
||||
|
||||
return "pix2pix" in ckpt_basename and not "pix2pix" in cfg_basename
|
||||
return "pix2pix" in ckpt_basename and "pix2pix" not in cfg_basename
|
||||
|
|
|
@ -49,7 +49,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
|
|||
v_in = self.to_v(context_v)
|
||||
del context, context_k, context_v, x
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
|
||||
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
|
||||
del q_in, k_in, v_in
|
||||
|
||||
dtype = q.dtype
|
||||
|
@ -98,7 +98,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
|
|||
|
||||
del context, x
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
|
||||
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q_in, k_in, v_in))
|
||||
del q_in, k_in, v_in
|
||||
|
||||
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
|
||||
|
@ -229,7 +229,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None):
|
|||
with devices.without_autocast(disable=not shared.opts.upcast_attn):
|
||||
k = k * self.scale
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
|
||||
q, k, v = (rearrange(t, 'b n (h d) -> (b h) n d', h=h) for t in (q, k, v))
|
||||
r = einsum_op(q, k, v)
|
||||
r = r.to(dtype)
|
||||
return self.to_out(rearrange(r, '(b h) n d -> b n (h d)', h=h))
|
||||
|
@ -296,7 +296,6 @@ def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_
|
|||
if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
|
||||
# the big matmul fits into our memory limit; do everything in 1 chunk,
|
||||
# i.e. send it down the unchunked fast-path
|
||||
query_chunk_size = q_tokens
|
||||
kv_chunk_size = k_tokens
|
||||
|
||||
with devices.without_autocast(disable=q.dtype == v.dtype):
|
||||
|
@ -335,7 +334,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
|
|||
k_in = self.to_k(context_k)
|
||||
v_in = self.to_v(context_v)
|
||||
|
||||
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
|
||||
q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
|
||||
del q_in, k_in, v_in
|
||||
|
||||
dtype = q.dtype
|
||||
|
@ -461,7 +460,7 @@ def xformers_attnblock_forward(self, x):
|
|||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
b, c, h, w = q.shape
|
||||
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
|
||||
q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
|
||||
dtype = q.dtype
|
||||
if shared.opts.upcast_attn:
|
||||
q, k = q.float(), k.float()
|
||||
|
@ -483,7 +482,7 @@ def sdp_attnblock_forward(self, x):
|
|||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
b, c, h, w = q.shape
|
||||
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
|
||||
q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
|
||||
dtype = q.dtype
|
||||
if shared.opts.upcast_attn:
|
||||
q, k = q.float(), k.float()
|
||||
|
@ -507,7 +506,7 @@ def sub_quad_attnblock_forward(self, x):
|
|||
k = self.k(h_)
|
||||
v = self.v(h_)
|
||||
b, c, h, w = q.shape
|
||||
q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v))
|
||||
q, k, v = (rearrange(t, 'b c h w -> b (h w) c') for t in (q, k, v))
|
||||
q = q.contiguous()
|
||||
k = k.contiguous()
|
||||
v = v.contiguous()
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
import open_clip.tokenizer
|
||||
import torch
|
||||
|
||||
from modules import sd_hijack_clip, devices
|
||||
from modules.shared import opts
|
||||
|
||||
|
||||
class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords):
|
||||
|
|
|
@ -15,7 +15,6 @@ import ldm.modules.midas as midas
|
|||
from ldm.util import instantiate_from_config
|
||||
|
||||
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
|
||||
from modules.paths import models_path
|
||||
from modules.sd_hijack_inpainting import do_inpainting_hijack
|
||||
from modules.timer import Timer
|
||||
|
||||
|
@ -87,8 +86,7 @@ class CheckpointInfo:
|
|||
|
||||
try:
|
||||
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
||||
|
||||
from transformers import logging, CLIPModel
|
||||
from transformers import logging, CLIPModel # noqa: F401
|
||||
|
||||
logging.set_verbosity_error()
|
||||
except Exception:
|
||||
|
@ -239,7 +237,7 @@ def read_metadata_from_safetensors(filename):
|
|||
if isinstance(v, str) and v[0:1] == '{':
|
||||
try:
|
||||
res[k] = json.loads(v)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return res
|
||||
|
@ -467,7 +465,7 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
|
|||
try:
|
||||
with sd_disable_initialization.DisableInitialization(disable_clip=clip_is_included_into_sd):
|
||||
sd_model = instantiate_from_config(sd_config.model)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if sd_model is None:
|
||||
|
@ -544,7 +542,7 @@ def reload_model_weights(sd_model=None, info=None):
|
|||
|
||||
try:
|
||||
load_model_weights(sd_model, checkpoint_info, state_dict, timer)
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
print("Failed to load checkpoint, restoring previous")
|
||||
load_model_weights(sd_model, current_checkpoint_info, None, timer)
|
||||
raise
|
||||
|
@ -565,7 +563,7 @@ def reload_model_weights(sd_model=None, info=None):
|
|||
|
||||
|
||||
def unload_model_weights(sd_model=None, info=None):
|
||||
from modules import lowvram, devices, sd_hijack
|
||||
from modules import devices, sd_hijack
|
||||
timer = Timer()
|
||||
|
||||
if model_data.sd_model:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import re
|
||||
import os
|
||||
|
||||
import torch
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
from modules import sd_samplers_compvis, sd_samplers_kdiffusion, shared
|
||||
|
||||
# imports for functions that previously were here and are used by other modules
|
||||
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image
|
||||
from modules.sd_samplers_common import samples_to_image_grid, sample_to_image # noqa: F401
|
||||
|
||||
all_samplers = [
|
||||
*sd_samplers_kdiffusion.samplers_data_k_diffusion,
|
||||
|
|
|
@ -55,7 +55,7 @@ class VanillaStableDiffusionSampler:
|
|||
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
|
||||
x_dec, ts, cond, unconditional_conditioning = self.before_sample(x_dec, ts, cond, unconditional_conditioning)
|
||||
|
||||
res = self.orig_p_sample_ddim(x_dec, cond, ts, unconditional_conditioning=unconditional_conditioning, *args, **kwargs)
|
||||
res = self.orig_p_sample_ddim(x_dec, cond, ts, *args, unconditional_conditioning=unconditional_conditioning, **kwargs)
|
||||
|
||||
x_dec, ts, cond, unconditional_conditioning, res = self.after_sample(x_dec, ts, cond, unconditional_conditioning, res)
|
||||
|
||||
|
@ -83,7 +83,7 @@ class VanillaStableDiffusionSampler:
|
|||
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
|
||||
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
|
||||
|
||||
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
|
||||
assert all(len(conds) == 1 for conds in conds_list), 'composition via AND is not supported for DDIM/PLMS samplers'
|
||||
cond = tensor
|
||||
|
||||
# for DDIM, shapes must match, we can't just process cond and uncond independently;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
from collections import deque
|
||||
import torch
|
||||
import inspect
|
||||
import einops
|
||||
import k_diffusion.sampling
|
||||
from modules import prompt_parser, devices, sd_samplers_common
|
||||
|
||||
|
@ -87,7 +86,7 @@ class CFGDenoiser(torch.nn.Module):
|
|||
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
|
||||
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
|
||||
|
||||
assert not is_edit_model or all([len(conds) == 1 for conds in conds_list]), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
|
||||
assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
|
||||
|
||||
batch_size = len(conds_list)
|
||||
repeats = [len(conds_list[i]) for i in range(batch_size)]
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
import torch
|
||||
import safetensors.torch
|
||||
import os
|
||||
import collections
|
||||
from collections import namedtuple
|
||||
from modules import paths, shared, devices, script_callbacks, sd_models
|
||||
import glob
|
||||
from copy import deepcopy
|
||||
|
|
|
@ -1,12 +1,9 @@
|
|||
import argparse
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import requests
|
||||
|
||||
from PIL import Image
|
||||
import gradio as gr
|
||||
import tqdm
|
||||
|
||||
|
@ -15,7 +12,7 @@ import modules.memmon
|
|||
import modules.styles
|
||||
import modules.devices as devices
|
||||
from modules import localization, script_loading, errors, ui_components, shared_items, cmd_args
|
||||
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir
|
||||
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
|
||||
from ldm.models.diffusion.ddpm import LatentDiffusion
|
||||
|
||||
demo = None
|
||||
|
@ -214,7 +211,7 @@ class OptionInfo:
|
|||
|
||||
|
||||
def options_section(section_identifier, options_dict):
|
||||
for k, v in options_dict.items():
|
||||
for v in options_dict.values():
|
||||
v.section = section_identifier
|
||||
|
||||
return options_dict
|
||||
|
@ -384,7 +381,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks"), {
|
|||
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks (px)"),
|
||||
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks (px)"),
|
||||
"extra_networks_add_text_separator": OptionInfo(" ", "Extra text to add before <...> when adding extra network to prompt"),
|
||||
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks),
|
||||
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", hypernetworks]}, refresh=reload_hypernetworks),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('ui', "User interface"), {
|
||||
|
@ -406,7 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), {
|
|||
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
|
||||
"keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
|
||||
"quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}),
|
||||
"hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": [x for x in tab_names]}),
|
||||
"hidden_tabs": OptionInfo([], "Hidden UI tabs (requires restart)", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}),
|
||||
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"),
|
||||
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"),
|
||||
"localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
|
||||
|
@ -582,11 +579,11 @@ class Options:
|
|||
|
||||
section_ids = {}
|
||||
settings_items = self.data_labels.items()
|
||||
for k, item in settings_items:
|
||||
for _, item in settings_items:
|
||||
if item.section not in section_ids:
|
||||
section_ids[item.section] = len(section_ids)
|
||||
|
||||
self.data_labels = {k: v for k, v in sorted(settings_items, key=lambda x: section_ids[x[1].section])}
|
||||
self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
|
||||
|
||||
def cast_value(self, key, value):
|
||||
"""casts an arbitrary to the same type as this setting's value with key
|
||||
|
@ -743,7 +740,7 @@ def walk_files(path, allowed_extensions=None):
|
|||
if allowed_extensions is not None:
|
||||
allowed_extensions = set(allowed_extensions)
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for root, _, files in os.walk(path):
|
||||
for filename in files:
|
||||
if allowed_extensions is not None:
|
||||
_, ext = os.path.splitext(filename)
|
||||
|
|
|
@ -1,18 +1,9 @@
|
|||
# We need this so Python doesn't complain about the unknown StableDiffusionProcessing-typehint at runtime
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import os
|
||||
import os.path
|
||||
import typing
|
||||
import collections.abc as abc
|
||||
import tempfile
|
||||
import shutil
|
||||
|
||||
if typing.TYPE_CHECKING:
|
||||
# Only import this when code is being type-checked, it doesn't have any effect at runtime
|
||||
from .processing import StableDiffusionProcessing
|
||||
|
||||
|
||||
class PromptStyle(typing.NamedTuple):
|
||||
name: str
|
||||
|
|
|
@ -1,10 +1,8 @@
|
|||
import cv2
|
||||
import requests
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from math import log, sqrt
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw
|
||||
from PIL import ImageDraw
|
||||
|
||||
GREEN = "#0F0"
|
||||
BLUE = "#00F"
|
||||
|
@ -185,7 +183,7 @@ def image_face_points(im, settings):
|
|||
try:
|
||||
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
|
||||
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if len(faces) > 0:
|
||||
|
|
|
@ -2,7 +2,7 @@ import base64
|
|||
import json
|
||||
import numpy as np
|
||||
import zlib
|
||||
from PIL import Image, PngImagePlugin, ImageDraw, ImageFont
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
from fonts.ttf import Roboto
|
||||
import torch
|
||||
from modules.shared import opts
|
||||
|
@ -17,7 +17,7 @@ class EmbeddingEncoder(json.JSONEncoder):
|
|||
|
||||
class EmbeddingDecoder(json.JSONDecoder):
|
||||
def __init__(self, *args, **kwargs):
|
||||
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
|
||||
json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs)
|
||||
|
||||
def object_hook(self, d):
|
||||
if 'TORCHTENSOR' in d:
|
||||
|
|
|
@ -12,7 +12,7 @@ class LearnScheduleIterator:
|
|||
self.it = 0
|
||||
self.maxit = 0
|
||||
try:
|
||||
for i, pair in enumerate(pairs):
|
||||
for pair in pairs:
|
||||
if not pair.strip():
|
||||
continue
|
||||
tmp = pair.split(':')
|
||||
|
@ -32,8 +32,8 @@ class LearnScheduleIterator:
|
|||
self.maxit += 1
|
||||
return
|
||||
assert self.rates
|
||||
except (ValueError, AssertionError):
|
||||
raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.')
|
||||
except (ValueError, AssertionError) as e:
|
||||
raise Exception('Invalid learning rate schedule. It should be a number or, for example, like "0.001:100, 0.00001:1000, 1e-5:10000" to have lr of 0.001 until step 100, 0.00001 until 1000, and 1e-5 until 10000.') from e
|
||||
|
||||
|
||||
def __iter__(self):
|
||||
|
|
|
@ -1,13 +1,9 @@
|
|||
import os
|
||||
from PIL import Image, ImageOps
|
||||
import math
|
||||
import platform
|
||||
import sys
|
||||
import tqdm
|
||||
import time
|
||||
|
||||
from modules import paths, shared, images, deepbooru
|
||||
from modules.shared import opts, cmd_opts
|
||||
from modules.textual_inversion import autocrop
|
||||
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
import sys
|
||||
import traceback
|
||||
import inspect
|
||||
from collections import namedtuple
|
||||
|
||||
import torch
|
||||
|
@ -30,7 +29,7 @@ textual_inversion_templates = {}
|
|||
def list_textual_inversion_templates():
|
||||
textual_inversion_templates.clear()
|
||||
|
||||
for root, dirs, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
|
||||
for root, _, fns in os.walk(shared.cmd_opts.textual_inversion_templates_dir):
|
||||
for fn in fns:
|
||||
path = os.path.join(root, fn)
|
||||
|
||||
|
@ -167,8 +166,7 @@ class EmbeddingDatabase:
|
|||
# textual inversion embeddings
|
||||
if 'string_to_param' in data:
|
||||
param_dict = data['string_to_param']
|
||||
if hasattr(param_dict, '_parameters'):
|
||||
param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
|
||||
param_dict = getattr(param_dict, '_parameters', param_dict) # fix for torch 1.12.1 loading saved file from torch 1.11
|
||||
assert len(param_dict) == 1, 'embedding file has multiple terms in it'
|
||||
emb = next(iter(param_dict.items()))[1]
|
||||
# diffuser concepts
|
||||
|
@ -199,7 +197,7 @@ class EmbeddingDatabase:
|
|||
if not os.path.isdir(embdir.path):
|
||||
return
|
||||
|
||||
for root, dirs, fns in os.walk(embdir.path, followlinks=True):
|
||||
for root, _, fns in os.walk(embdir.path, followlinks=True):
|
||||
for fn in fns:
|
||||
try:
|
||||
fullfn = os.path.join(root, fn)
|
||||
|
@ -216,7 +214,7 @@ class EmbeddingDatabase:
|
|||
def load_textual_inversion_embeddings(self, force_reload=False):
|
||||
if not force_reload:
|
||||
need_reload = False
|
||||
for path, embdir in self.embedding_dirs.items():
|
||||
for embdir in self.embedding_dirs.values():
|
||||
if embdir.has_changed():
|
||||
need_reload = True
|
||||
break
|
||||
|
@ -229,7 +227,7 @@ class EmbeddingDatabase:
|
|||
self.skipped_embeddings.clear()
|
||||
self.expected_shape = self.get_expected_shape()
|
||||
|
||||
for path, embdir in self.embedding_dirs.items():
|
||||
for embdir in self.embedding_dirs.values():
|
||||
self.load_from_dir(embdir)
|
||||
embdir.update()
|
||||
|
||||
|
@ -470,7 +468,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
|||
try:
|
||||
sd_hijack_checkpoint.add()
|
||||
|
||||
for i in range((steps-initial_step) * gradient_step):
|
||||
for _ in range((steps-initial_step) * gradient_step):
|
||||
if scheduler.finished:
|
||||
break
|
||||
if shared.state.interrupted:
|
||||
|
@ -603,7 +601,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
|
|||
|
||||
try:
|
||||
vectorSize = list(data['string_to_param'].values())[0].shape[0]
|
||||
except Exception as e:
|
||||
except Exception:
|
||||
vectorSize = '?'
|
||||
|
||||
checkpoint = sd_models.select_checkpoint()
|
||||
|
|
|
@ -1,18 +1,15 @@
|
|||
import modules.scripts
|
||||
from modules import sd_samplers
|
||||
from modules import sd_samplers, processing
|
||||
from modules.generation_parameters_copypaste import create_override_settings_dict
|
||||
from modules.processing import StableDiffusionProcessing, Processed, StableDiffusionProcessingTxt2Img, \
|
||||
StableDiffusionProcessingImg2Img, process_images
|
||||
from modules.shared import opts, cmd_opts
|
||||
import modules.shared as shared
|
||||
import modules.processing as processing
|
||||
from modules.ui import plaintext_to_html
|
||||
|
||||
|
||||
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, override_settings_texts, *args):
|
||||
override_settings = create_override_settings_dict(override_settings_texts)
|
||||
|
||||
p = StableDiffusionProcessingTxt2Img(
|
||||
p = processing.StableDiffusionProcessingTxt2Img(
|
||||
sd_model=shared.sd_model,
|
||||
outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples,
|
||||
outpath_grids=opts.outdir_grids or opts.outdir_txt2img_grids,
|
||||
|
@ -53,7 +50,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
|
|||
processed = modules.scripts.scripts_txt2img.run(p, *args)
|
||||
|
||||
if processed is None:
|
||||
processed = process_images(p)
|
||||
processed = processing.process_images(p)
|
||||
|
||||
p.close()
|
||||
|
||||
|
|
|
@ -1,29 +1,23 @@
|
|||
import html
|
||||
import json
|
||||
import math
|
||||
import mimetypes
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import traceback
|
||||
from functools import partial, reduce
|
||||
from functools import reduce
|
||||
import warnings
|
||||
|
||||
import gradio as gr
|
||||
import gradio.routes
|
||||
import gradio.utils
|
||||
import numpy as np
|
||||
from PIL import Image, PngImagePlugin
|
||||
from PIL import Image, PngImagePlugin # noqa: F401
|
||||
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
|
||||
|
||||
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, postprocessing, ui_components, ui_common, ui_postprocessing, progress
|
||||
from modules.ui_components import FormRow, FormColumn, FormGroup, ToolButton, FormHTML
|
||||
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress
|
||||
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
|
||||
from modules.paths import script_path, data_path
|
||||
|
||||
from modules.shared import opts, cmd_opts, restricted_opts
|
||||
from modules.shared import opts, cmd_opts
|
||||
|
||||
import modules.codeformer_model
|
||||
import modules.generation_parameters_copypaste as parameters_copypaste
|
||||
|
@ -34,7 +28,6 @@ import modules.shared as shared
|
|||
import modules.styles
|
||||
import modules.textual_inversion.ui
|
||||
from modules import prompt_parser
|
||||
from modules.images import save_image
|
||||
from modules.sd_hijack import model_hijack
|
||||
from modules.sd_samplers import samplers, samplers_for_img2img
|
||||
from modules.textual_inversion import textual_inversion
|
||||
|
@ -246,7 +239,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
|
|||
all_seeds = gen_info.get('all_seeds', [-1])
|
||||
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
|
||||
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
except json.decoder.JSONDecodeError:
|
||||
if gen_info_string != '':
|
||||
print("Error parsing JSON generation info:", file=sys.stderr)
|
||||
print(gen_info_string, file=sys.stderr)
|
||||
|
@ -423,7 +416,7 @@ def create_sampler_and_steps_selection(choices, tabname):
|
|||
def ordered_ui_categories():
|
||||
user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
|
||||
|
||||
for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
|
||||
for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
|
||||
yield category
|
||||
|
||||
|
||||
|
@ -736,8 +729,8 @@ def create_ui():
|
|||
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
|
||||
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
|
||||
gr.HTML(
|
||||
f"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
||||
f"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
||||
"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
||||
"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
||||
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
|
||||
f"{hidden}</p>"
|
||||
)
|
||||
|
@ -746,7 +739,6 @@ def create_ui():
|
|||
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
|
||||
|
||||
img2img_tabs = [tab_img2img, tab_sketch, tab_inpaint, tab_inpaint_color, tab_inpaint_upload, tab_batch]
|
||||
img2img_image_inputs = [init_img, sketch, init_img_with_mask, inpaint_color_sketch]
|
||||
|
||||
for i, tab in enumerate(img2img_tabs):
|
||||
tab.select(fn=lambda tabnum=i: tabnum, inputs=[], outputs=[img2img_selected_tab])
|
||||
|
@ -1230,7 +1222,7 @@ def create_ui():
|
|||
)
|
||||
|
||||
def get_textual_inversion_template_names():
|
||||
return sorted([x for x in textual_inversion.textual_inversion_templates])
|
||||
return sorted(textual_inversion.textual_inversion_templates)
|
||||
|
||||
with gr.Tab(label="Train", id="train"):
|
||||
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
|
||||
|
@ -1238,8 +1230,8 @@ def create_ui():
|
|||
train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
|
||||
create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name")
|
||||
|
||||
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()])
|
||||
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name")
|
||||
train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=sorted(shared.hypernetworks))
|
||||
create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted(shared.hypernetworks)}, "refresh_train_hypernetwork_name")
|
||||
|
||||
with FormRow():
|
||||
embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate")
|
||||
|
@ -1290,8 +1282,8 @@ def create_ui():
|
|||
|
||||
with gr.Column(elem_id='ti_gallery_container'):
|
||||
ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
|
||||
ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
|
||||
ti_progress = gr.HTML(elem_id="ti_progress", value="")
|
||||
gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(columns=4)
|
||||
gr.HTML(elem_id="ti_progress", value="")
|
||||
ti_outcome = gr.HTML(elem_id="ti_error", value="")
|
||||
|
||||
create_embedding.click(
|
||||
|
@ -1654,7 +1646,7 @@ def create_ui():
|
|||
|
||||
with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
|
||||
with gr.Row(elem_id="quicksettings", variant="compact"):
|
||||
for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
|
||||
for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
|
||||
component = create_setting_component(k, is_quicksettings=True)
|
||||
component_dict[k] = component
|
||||
|
||||
|
@ -1668,7 +1660,7 @@ def create_ui():
|
|||
interface.render()
|
||||
|
||||
if os.path.exists(os.path.join(script_path, "notification.mp3")):
|
||||
audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
|
||||
gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False)
|
||||
|
||||
footer = shared.html("footer.html")
|
||||
footer = footer.format(versions=versions_html())
|
||||
|
@ -1681,7 +1673,7 @@ def create_ui():
|
|||
outputs=[text_settings, result],
|
||||
)
|
||||
|
||||
for i, k, item in quicksettings_list:
|
||||
for _i, k, _item in quicksettings_list:
|
||||
component = component_dict[k]
|
||||
info = opts.data_labels[k]
|
||||
|
||||
|
@ -1816,7 +1808,7 @@ def create_ui():
|
|||
if type(x) == gr.Dropdown:
|
||||
def check_dropdown(val):
|
||||
if getattr(x, 'multiselect', False):
|
||||
return all([value in x.choices for value in val])
|
||||
return all(value in x.choices for value in val)
|
||||
else:
|
||||
return val in x.choices
|
||||
|
||||
|
|
|
@ -490,7 +490,7 @@ def create_ui():
|
|||
config_states.list_config_states()
|
||||
|
||||
with gr.Blocks(analytics_enabled=False) as ui:
|
||||
with gr.Tabs(elem_id="tabs_extensions") as tabs:
|
||||
with gr.Tabs(elem_id="tabs_extensions"):
|
||||
with gr.TabItem("Installed", id="installed"):
|
||||
|
||||
with gr.Row(elem_id="extensions_installed_top"):
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import glob
|
||||
import os.path
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
|
@ -27,7 +26,7 @@ def register_page(page):
|
|||
def fetch_file(filename: str = ""):
|
||||
from starlette.responses import FileResponse
|
||||
|
||||
if not any([Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs]):
|
||||
if not any(Path(x).absolute() in Path(filename).absolute().parents for x in allowed_dirs):
|
||||
raise ValueError(f"File cannot be fetched: {filename}. Must be in one of directories registered by extra pages.")
|
||||
|
||||
ext = os.path.splitext(filename)[1].lower()
|
||||
|
@ -91,7 +90,7 @@ class ExtraNetworksPage:
|
|||
|
||||
subdirs = {}
|
||||
for parentdir in [os.path.abspath(x) for x in self.allowed_directories_for_previews()]:
|
||||
for root, dirs, files in os.walk(parentdir):
|
||||
for root, dirs, _ in os.walk(parentdir):
|
||||
for dirname in dirs:
|
||||
x = os.path.join(root, dirname)
|
||||
|
||||
|
@ -263,7 +262,7 @@ def create_ui(container, button, tabname):
|
|||
ui.stored_extra_pages = pages_in_preferred_order(extra_pages.copy())
|
||||
ui.tabname = tabname
|
||||
|
||||
with gr.Tabs(elem_id=tabname+"_extra_tabs") as tabs:
|
||||
with gr.Tabs(elem_id=tabname+"_extra_tabs"):
|
||||
for page in ui.stored_extra_pages:
|
||||
page_id = page.title.lower().replace(" ", "_")
|
||||
|
||||
|
@ -327,7 +326,7 @@ def setup_ui(ui, gallery):
|
|||
|
||||
is_allowed = False
|
||||
for extra_page in ui.stored_extra_pages:
|
||||
if any([path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()]):
|
||||
if any(path_is_parent(x, filename) for x in extra_page.allowed_directories_for_previews()):
|
||||
is_allowed = True
|
||||
break
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import gradio as gr
|
||||
from modules import scripts_postprocessing, scripts, shared, gfpgan_model, codeformer_model, ui_common, postprocessing, call_queue
|
||||
from modules import scripts, shared, ui_common, postprocessing, call_queue
|
||||
import modules.generation_parameters_copypaste as parameters_copypaste
|
||||
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ def register_tmp_file(gradio, filename):
|
|||
|
||||
def check_tmp_file(gradio, filename):
|
||||
if hasattr(gradio, 'temp_file_sets'):
|
||||
return any([filename in fileset for fileset in gradio.temp_file_sets])
|
||||
return any(filename in fileset for fileset in gradio.temp_file_sets)
|
||||
|
||||
if hasattr(gradio, 'temp_dirs'):
|
||||
return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs)
|
||||
|
@ -72,7 +72,7 @@ def cleanup_tmpdr():
|
|||
if temp_dir == "" or not os.path.isdir(temp_dir):
|
||||
return
|
||||
|
||||
for root, dirs, files in os.walk(temp_dir, topdown=False):
|
||||
for root, _, files in os.walk(temp_dir, topdown=False):
|
||||
for name in files:
|
||||
_, extension = os.path.splitext(name)
|
||||
if extension != ".png":
|
||||
|
|
|
@ -2,8 +2,6 @@ import os
|
|||
from abc import abstractmethod
|
||||
|
||||
import PIL
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
||||
import modules.shared
|
||||
|
@ -43,9 +41,9 @@ class Upscaler:
|
|||
os.makedirs(self.model_path, exist_ok=True)
|
||||
|
||||
try:
|
||||
import cv2
|
||||
import cv2 # noqa: F401
|
||||
self.can_tile = True
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
@ -57,7 +55,7 @@ class Upscaler:
|
|||
dest_w = int(img.width * scale)
|
||||
dest_h = int(img.height * scale)
|
||||
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
shape = (img.width, img.height)
|
||||
|
||||
img = self.do_upscale(img, selected_model)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from transformers import BertPreTrainedModel,BertModel,BertConfig
|
||||
from transformers import BertPreTrainedModel, BertConfig
|
||||
import torch.nn as nn
|
||||
import torch
|
||||
from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
[tool.ruff]
|
||||
|
||||
target-version = "py39"
|
||||
|
||||
extend-select = [
|
||||
"B",
|
||||
"C",
|
||||
"I",
|
||||
]
|
||||
|
||||
exclude = [
|
||||
"extensions",
|
||||
"extensions-disabled",
|
||||
]
|
||||
|
||||
ignore = [
|
||||
"E501", # Line too long
|
||||
"E731", # Do not assign a `lambda` expression, use a `def`
|
||||
|
||||
"I001", # Import block is un-sorted or un-formatted
|
||||
"C901", # Function is too complex
|
||||
"C408", # Rewrite as a literal
|
||||
|
||||
]
|
||||
|
||||
[tool.ruff.per-file-ignores]
|
||||
"webui.py" = ["E402"] # Module level import not at top of file
|
||||
|
||||
[tool.ruff.flake8-bugbear]
|
||||
# Allow default arguments like, e.g., `data: List[str] = fastapi.Query(None)`.
|
||||
extend-immutable-calls = ["fastapi.Depends", "fastapi.security.HTTPBasic"]
|
|
@ -4,7 +4,7 @@ import ast
|
|||
import copy
|
||||
|
||||
from modules.processing import Processed
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import cmd_opts
|
||||
|
||||
|
||||
def convertExpr2Expression(expr):
|
||||
|
|
|
@ -7,9 +7,9 @@ import modules.scripts as scripts
|
|||
import gradio as gr
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
from modules import images, processing, devices
|
||||
from modules import images
|
||||
from modules.processing import Processed, process_images
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import opts, state
|
||||
|
||||
|
||||
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
|
||||
|
@ -72,7 +72,7 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0
|
|||
height = _np_src_image.shape[1]
|
||||
num_channels = _np_src_image.shape[2]
|
||||
|
||||
np_src_image = _np_src_image[:] * (1. - np_mask_rgb)
|
||||
_np_src_image[:] * (1. - np_mask_rgb)
|
||||
np_mask_grey = (np.sum(np_mask_rgb, axis=2) / 3.)
|
||||
img_mask = np_mask_grey > 1e-6
|
||||
ref_mask = np_mask_grey < 1e-3
|
||||
|
|
|
@ -4,9 +4,9 @@ import modules.scripts as scripts
|
|||
import gradio as gr
|
||||
from PIL import Image, ImageDraw
|
||||
|
||||
from modules import images, processing, devices
|
||||
from modules import images, devices
|
||||
from modules.processing import Processed, process_images
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import opts, state
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
|
|
|
@ -98,13 +98,13 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
|
|||
assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
|
||||
|
||||
upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
|
||||
pp.info[f"Postprocess upscaler"] = upscaler1.name
|
||||
pp.info["Postprocess upscaler"] = upscaler1.name
|
||||
|
||||
if upscaler2 and upscaler_2_visibility > 0:
|
||||
second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
|
||||
upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
|
||||
|
||||
pp.info[f"Postprocess upscaler 2"] = upscaler2.name
|
||||
pp.info["Postprocess upscaler 2"] = upscaler2.name
|
||||
|
||||
pp.image = upscaled_image
|
||||
|
||||
|
@ -134,4 +134,4 @@ class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
|
|||
assert upscaler1, f'could not find upscaler named {upscaler_name}'
|
||||
|
||||
pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
|
||||
pp.info[f"Postprocess upscaler"] = upscaler1.name
|
||||
pp.info["Postprocess upscaler"] = upscaler1.name
|
||||
|
|
|
@ -1,14 +1,11 @@
|
|||
import math
|
||||
from collections import namedtuple
|
||||
from copy import copy
|
||||
import random
|
||||
|
||||
import modules.scripts as scripts
|
||||
import gradio as gr
|
||||
|
||||
from modules import images
|
||||
from modules.processing import process_images, Processed
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.processing import process_images
|
||||
from modules.shared import opts, state
|
||||
import modules.sd_samplers
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import copy
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import traceback
|
||||
|
@ -11,8 +9,7 @@ import gradio as gr
|
|||
|
||||
from modules import sd_samplers
|
||||
from modules.processing import Processed, process_images
|
||||
from PIL import Image
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import state
|
||||
|
||||
|
||||
def process_string_tag(tag):
|
||||
|
@ -159,7 +156,7 @@ class Script(scripts.Script):
|
|||
images = []
|
||||
all_prompts = []
|
||||
infotexts = []
|
||||
for n, args in enumerate(jobs):
|
||||
for args in jobs:
|
||||
state.job = f"{state.job_no + 1} out of {state.job_count}"
|
||||
|
||||
copy_p = copy.copy(p)
|
||||
|
|
|
@ -4,9 +4,9 @@ import modules.scripts as scripts
|
|||
import gradio as gr
|
||||
from PIL import Image
|
||||
|
||||
from modules import processing, shared, sd_samplers, images, devices
|
||||
from modules import processing, shared, images, devices
|
||||
from modules.processing import Processed
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import opts, state
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
|
@ -56,7 +56,7 @@ class Script(scripts.Script):
|
|||
|
||||
work = []
|
||||
|
||||
for y, h, row in grid.tiles:
|
||||
for _y, _h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
work.append(tiledata[2])
|
||||
|
||||
|
@ -85,7 +85,7 @@ class Script(scripts.Script):
|
|||
work_results += processed.images
|
||||
|
||||
image_index = 0
|
||||
for y, h, row in grid.tiles:
|
||||
for _y, _h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
|
||||
image_index += 1
|
||||
|
|
|
@ -10,15 +10,13 @@ import numpy as np
|
|||
import modules.scripts as scripts
|
||||
import gradio as gr
|
||||
|
||||
from modules import images, paths, sd_samplers, processing, sd_models, sd_vae
|
||||
from modules import images, sd_samplers, processing, sd_models, sd_vae
|
||||
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
from modules.shared import opts, state
|
||||
import modules.shared as shared
|
||||
import modules.sd_samplers
|
||||
import modules.sd_models
|
||||
import modules.sd_vae
|
||||
import glob
|
||||
import os
|
||||
import re
|
||||
|
||||
from modules.ui_components import ToolButton
|
||||
|
@ -316,7 +314,7 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
|
|||
return Processed(p, [])
|
||||
|
||||
z_count = len(zs)
|
||||
sub_grids = [None] * z_count
|
||||
|
||||
for i in range(z_count):
|
||||
start_index = (i * len(xs) * len(ys)) + i
|
||||
end_index = start_index + len(xs) * len(ys)
|
||||
|
@ -706,7 +704,7 @@ class Script(scripts.Script):
|
|||
|
||||
if not include_sub_grids:
|
||||
# Done with sub-grids, drop all related information:
|
||||
for sg in range(z_count):
|
||||
for _ in range(z_count):
|
||||
del processed.images[1]
|
||||
del processed.all_prompts[1]
|
||||
del processed.all_seeds[1]
|
||||
|
|
12
webui.py
12
webui.py
|
@ -16,12 +16,12 @@ from packaging import version
|
|||
import logging
|
||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
|
||||
from modules import paths, timer, import_hook, errors
|
||||
from modules import paths, timer, import_hook, errors # noqa: F401
|
||||
|
||||
startup_timer = timer.Timer()
|
||||
|
||||
import torch
|
||||
import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
|
||||
import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
|
||||
|
||||
|
@ -31,19 +31,19 @@ startup_timer.record("import torch")
|
|||
import gradio
|
||||
startup_timer.record("import gradio")
|
||||
|
||||
import ldm.modules.encoders.modules
|
||||
import ldm.modules.encoders.modules # noqa: F401
|
||||
startup_timer.record("import ldm")
|
||||
|
||||
from modules import extra_networks, ui_extra_networks_checkpoints
|
||||
from modules import extra_networks_hypernet, ui_extra_networks_hypernets, ui_extra_networks_textual_inversion
|
||||
from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call
|
||||
from modules.call_queue import wrap_queued_call, queue_lock
|
||||
|
||||
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
|
||||
if ".dev" in torch.__version__ or "+git" in torch.__version__:
|
||||
torch.__long_version__ = torch.__version__
|
||||
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
|
||||
|
||||
from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
|
||||
from modules import shared, sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
|
||||
import modules.codeformer_model as codeformer
|
||||
import modules.face_restoration
|
||||
import modules.gfpgan_model as gfpgan
|
||||
|
@ -360,7 +360,7 @@ def webui():
|
|||
if cmd_opts.subpath:
|
||||
redirector = FastAPI()
|
||||
redirector.get("/")
|
||||
mounted_app = gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
|
||||
gradio.mount_gradio_app(redirector, shared.demo, path=f"/{cmd_opts.subpath}")
|
||||
|
||||
wait_on_server(shared.demo)
|
||||
print('Restarting UI...')
|
||||
|
|
Loading…
Reference in New Issue