Merge branch 'dev' into refiner
This commit is contained in:
commit
70a01cd444
|
@ -195,6 +195,15 @@ def load_network(name, network_on_disk):
|
|||
return net
|
||||
|
||||
|
||||
def purge_networks_from_memory():
|
||||
while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
|
||||
name = next(iter(networks_in_memory))
|
||||
networks_in_memory.pop(name, None)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
|
||||
|
||||
def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
|
||||
already_loaded = {}
|
||||
|
||||
|
@ -212,15 +221,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
|
|||
|
||||
failed_to_load_networks = []
|
||||
|
||||
for i, name in enumerate(names):
|
||||
for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
|
||||
net = already_loaded.get(name, None)
|
||||
|
||||
network_on_disk = networks_on_disk[i]
|
||||
|
||||
if network_on_disk is not None:
|
||||
if net is None:
|
||||
net = networks_in_memory.get(name)
|
||||
|
||||
if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
|
||||
try:
|
||||
net = load_network(name, network_on_disk)
|
||||
|
||||
networks_in_memory.pop(name, None)
|
||||
networks_in_memory[name] = net
|
||||
except Exception as e:
|
||||
errors.display(e, f"loading network {network_on_disk.filename}")
|
||||
continue
|
||||
|
@ -242,6 +255,8 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
|
|||
if failed_to_load_networks:
|
||||
sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
|
||||
|
||||
purge_networks_from_memory()
|
||||
|
||||
|
||||
def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
|
||||
weights_backup = getattr(self, "network_weights_backup", None)
|
||||
|
@ -462,6 +477,7 @@ def infotext_pasted(infotext, params):
|
|||
available_networks = {}
|
||||
available_network_aliases = {}
|
||||
loaded_networks = []
|
||||
networks_in_memory = {}
|
||||
available_network_hash_lookup = {}
|
||||
forbidden_network_aliases = {}
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
|
|||
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
||||
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
||||
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
||||
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
||||
}))
|
||||
|
||||
|
||||
|
@ -121,3 +122,5 @@ def infotext_pasted(infotext, d):
|
|||
|
||||
|
||||
script_callbacks.on_infotext_pasted(infotext_pasted)
|
||||
|
||||
shared.opts.onchange("lora_in_memory_limit", networks.purge_networks_from_memory)
|
||||
|
|
|
@ -42,6 +42,11 @@ onUiLoaded(async() => {
|
|||
}
|
||||
}
|
||||
|
||||
// Detect whether the element has a horizontal scroll bar
|
||||
function hasHorizontalScrollbar(element) {
|
||||
return element.scrollWidth > element.clientWidth;
|
||||
}
|
||||
|
||||
// Function for defining the "Ctrl", "Shift" and "Alt" keys
|
||||
function isModifierKey(event, key) {
|
||||
switch (key) {
|
||||
|
@ -201,7 +206,8 @@ onUiLoaded(async() => {
|
|||
canvas_hotkey_overlap: "KeyO",
|
||||
canvas_disabled_functions: [],
|
||||
canvas_show_tooltip: true,
|
||||
canvas_blur_prompt: false
|
||||
canvas_auto_expand: true,
|
||||
canvas_blur_prompt: false,
|
||||
};
|
||||
|
||||
const functionMap = {
|
||||
|
@ -648,8 +654,32 @@ onUiLoaded(async() => {
|
|||
mouseY = e.offsetY;
|
||||
}
|
||||
|
||||
// Simulation of the function to put a long image into the screen.
|
||||
// We detect if an image has a scroll bar or not, make a fullscreen to reveal the image, then reduce it to fit into the element.
|
||||
// We hide the image and show it to the user when it is ready.
|
||||
function autoExpand(e) {
|
||||
const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
|
||||
const isMainTab = activeElement === elementIDs.inpaint || activeElement === elementIDs.inpaintSketch || activeElement === elementIDs.sketch;
|
||||
|
||||
if (canvas && isMainTab) {
|
||||
if (hasHorizontalScrollbar(targetElement)) {
|
||||
targetElement.style.visibility = "hidden";
|
||||
setTimeout(() => {
|
||||
fitToScreen();
|
||||
resetZoom();
|
||||
targetElement.style.visibility = "visible";
|
||||
}, 10);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetElement.addEventListener("mousemove", getMousePosition);
|
||||
|
||||
// Apply auto expand if enabled
|
||||
if (hotkeysConfig.canvas_auto_expand) {
|
||||
targetElement.addEventListener("mousemove", autoExpand);
|
||||
}
|
||||
|
||||
// Handle events only inside the targetElement
|
||||
let isKeyDownHandlerAttached = false;
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas
|
|||
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
|
||||
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
|
||||
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
|
||||
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
|
||||
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
|
||||
"canvas_disabled_functions": shared.OptionInfo(["Overlap"], "Disable function that you don't use", gr.CheckboxGroup, {"choices": ["Zoom","Adjust brush size", "Moving canvas","Fullscreen","Reset Zoom","Overlap"]}),
|
||||
}))
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
var observerAccordionOpen = new MutationObserver(function(mutations) {
|
||||
mutations.forEach(function(mutationRecord) {
|
||||
var elem = mutationRecord.target;
|
||||
var open = elem.classList.contains('open');
|
||||
|
||||
var accordion = elem.parentNode;
|
||||
accordion.classList.toggle('input-accordion-open', open);
|
||||
|
||||
var checkbox = gradioApp().querySelector('#' + accordion.id + "-checkbox input");
|
||||
checkbox.checked = open;
|
||||
updateInput(checkbox);
|
||||
|
||||
var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
|
||||
if (extra) {
|
||||
extra.style.display = open ? "" : "none";
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
function inputAccordionChecked(id, checked) {
|
||||
var label = gradioApp().querySelector('#' + id + " .label-wrap");
|
||||
if (label.classList.contains('open') != checked) {
|
||||
label.click();
|
||||
}
|
||||
}
|
||||
|
||||
onUiLoaded(function() {
|
||||
for (var accordion of gradioApp().querySelectorAll('.input-accordion')) {
|
||||
var labelWrap = accordion.querySelector('.label-wrap');
|
||||
observerAccordionOpen.observe(labelWrap, {attributes: true, attributeFilter: ['class']});
|
||||
|
||||
var extra = gradioApp().querySelector('#' + accordion.id + "-extra");
|
||||
if (extra) {
|
||||
labelWrap.insertBefore(extra, labelWrap.lastElementChild);
|
||||
}
|
||||
}
|
||||
});
|
|
@ -16,6 +16,7 @@ parser.add_argument("--test-server", action='store_true', help="launch.py argume
|
|||
parser.add_argument("--log-startup", action='store_true', help="launch.py argument: print a detailed log of what's happening at startup")
|
||||
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
|
||||
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
|
||||
parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
|
||||
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
|
||||
parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
|
||||
parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",)
|
||||
|
|
|
@ -3,7 +3,7 @@ import contextlib
|
|||
from functools import lru_cache
|
||||
|
||||
import torch
|
||||
from modules import errors, rng_philox
|
||||
from modules import errors, shared
|
||||
|
||||
if sys.platform == "darwin":
|
||||
from modules import mac_specific
|
||||
|
@ -17,8 +17,6 @@ def has_mps() -> bool:
|
|||
|
||||
|
||||
def get_cuda_device_string():
|
||||
from modules import shared
|
||||
|
||||
if shared.cmd_opts.device_id is not None:
|
||||
return f"cuda:{shared.cmd_opts.device_id}"
|
||||
|
||||
|
@ -40,8 +38,6 @@ def get_optimal_device():
|
|||
|
||||
|
||||
def get_device_for(task):
|
||||
from modules import shared
|
||||
|
||||
if task in shared.cmd_opts.use_cpu:
|
||||
return cpu
|
||||
|
||||
|
@ -96,87 +92,7 @@ def cond_cast_float(input):
|
|||
nv_rng = None
|
||||
|
||||
|
||||
def randn(seed, shape):
|
||||
"""Generate a tensor with random numbers from a normal distribution using seed.
|
||||
|
||||
Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed."""
|
||||
|
||||
from modules.shared import opts
|
||||
|
||||
manual_seed(seed)
|
||||
|
||||
if opts.randn_source == "NV":
|
||||
return torch.asarray(nv_rng.randn(shape), device=device)
|
||||
|
||||
if opts.randn_source == "CPU" or device.type == 'mps':
|
||||
return torch.randn(shape, device=cpu).to(device)
|
||||
|
||||
return torch.randn(shape, device=device)
|
||||
|
||||
|
||||
def randn_local(seed, shape):
|
||||
"""Generate a tensor with random numbers from a normal distribution using seed.
|
||||
|
||||
Does not change the global random number generator. You can only generate the seed's first tensor using this function."""
|
||||
|
||||
from modules.shared import opts
|
||||
|
||||
if opts.randn_source == "NV":
|
||||
rng = rng_philox.Generator(seed)
|
||||
return torch.asarray(rng.randn(shape), device=device)
|
||||
|
||||
local_device = cpu if opts.randn_source == "CPU" or device.type == 'mps' else device
|
||||
local_generator = torch.Generator(local_device).manual_seed(int(seed))
|
||||
return torch.randn(shape, device=local_device, generator=local_generator).to(device)
|
||||
|
||||
|
||||
def randn_like(x):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
from modules.shared import opts
|
||||
|
||||
if opts.randn_source == "NV":
|
||||
return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype)
|
||||
|
||||
if opts.randn_source == "CPU" or x.device.type == 'mps':
|
||||
return torch.randn_like(x, device=cpu).to(x.device)
|
||||
|
||||
return torch.randn_like(x)
|
||||
|
||||
|
||||
def randn_without_seed(shape):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
from modules.shared import opts
|
||||
|
||||
if opts.randn_source == "NV":
|
||||
return torch.asarray(nv_rng.randn(shape), device=device)
|
||||
|
||||
if opts.randn_source == "CPU" or device.type == 'mps':
|
||||
return torch.randn(shape, device=cpu).to(device)
|
||||
|
||||
return torch.randn(shape, device=device)
|
||||
|
||||
|
||||
def manual_seed(seed):
|
||||
"""Set up a global random number generator using the specified seed."""
|
||||
from modules.shared import opts
|
||||
|
||||
if opts.randn_source == "NV":
|
||||
global nv_rng
|
||||
nv_rng = rng_philox.Generator(seed)
|
||||
return
|
||||
|
||||
torch.manual_seed(seed)
|
||||
|
||||
|
||||
def autocast(disable=False):
|
||||
from modules import shared
|
||||
|
||||
if disable:
|
||||
return contextlib.nullcontext()
|
||||
|
||||
|
@ -195,8 +111,6 @@ class NansException(Exception):
|
|||
|
||||
|
||||
def test_for_nans(x, where):
|
||||
from modules import shared
|
||||
|
||||
if shared.cmd_opts.disable_nan_check:
|
||||
return
|
||||
|
||||
|
@ -236,3 +150,4 @@ def first_time_calculation():
|
|||
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
|
||||
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
|
||||
conv2d(x)
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import threading
|
||||
|
||||
from modules import shared, errors, cache
|
||||
from modules import shared, errors, cache, scripts
|
||||
from modules.gitpython_hack import Repo
|
||||
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
|
||||
|
||||
|
@ -90,8 +90,6 @@ class Extension:
|
|||
self.have_info_from_repo = True
|
||||
|
||||
def list_files(self, subdir, extension):
|
||||
from modules import scripts
|
||||
|
||||
dirpath = os.path.join(self.path, subdir)
|
||||
if not os.path.isdir(dirpath):
|
||||
return []
|
||||
|
|
|
@ -6,7 +6,7 @@ import re
|
|||
|
||||
import gradio as gr
|
||||
from modules.paths import data_path
|
||||
from modules import shared, ui_tempdir, script_callbacks
|
||||
from modules import shared, ui_tempdir, script_callbacks, processing
|
||||
from PIL import Image
|
||||
|
||||
re_param_code = r'\s*([\w ]+):\s*("(?:\\"[^,]|\\"|\\|[^\"])+"|[^,]*)(?:,|$)'
|
||||
|
@ -198,7 +198,6 @@ def restore_old_hires_fix_params(res):
|
|||
height = int(res.get("Size-2", 512))
|
||||
|
||||
if firstpass_width == 0 or firstpass_height == 0:
|
||||
from modules import processing
|
||||
firstpass_width, firstpass_height = processing.old_hires_fix_first_pass_dimensions(width, height)
|
||||
|
||||
res['Size-1'] = firstpass_width
|
||||
|
@ -317,36 +316,18 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
|||
|
||||
|
||||
infotext_to_setting_name_mapping = [
|
||||
('Clip skip', 'CLIP_stop_at_last_layers', ),
|
||||
|
||||
]
|
||||
"""Mapping of infotext labels to setting names. Only left for backwards compatibility - use OptionInfo(..., infotext='...') instead.
|
||||
Example content:
|
||||
|
||||
infotext_to_setting_name_mapping = [
|
||||
('Conditional mask weight', 'inpainting_mask_weight'),
|
||||
('Model hash', 'sd_model_checkpoint'),
|
||||
('ENSD', 'eta_noise_seed_delta'),
|
||||
('Schedule type', 'k_sched_type'),
|
||||
('Schedule max sigma', 'sigma_max'),
|
||||
('Schedule min sigma', 'sigma_min'),
|
||||
('Schedule rho', 'rho'),
|
||||
('Noise multiplier', 'initial_noise_multiplier'),
|
||||
('Eta', 'eta_ancestral'),
|
||||
('Eta DDIM', 'eta_ddim'),
|
||||
('Sigma churn', 's_churn'),
|
||||
('Sigma tmin', 's_tmin'),
|
||||
('Sigma tmax', 's_tmax'),
|
||||
('Sigma noise', 's_noise'),
|
||||
('Discard penultimate sigma', 'always_discard_next_to_last_sigma'),
|
||||
('UniPC variant', 'uni_pc_variant'),
|
||||
('UniPC skip type', 'uni_pc_skip_type'),
|
||||
('UniPC order', 'uni_pc_order'),
|
||||
('UniPC lower order final', 'uni_pc_lower_order_final'),
|
||||
('Token merging ratio', 'token_merging_ratio'),
|
||||
('Token merging ratio hr', 'token_merging_ratio_hr'),
|
||||
('RNG', 'randn_source'),
|
||||
('NGMS', 's_min_uncond'),
|
||||
('Pad conds', 'pad_cond_uncond'),
|
||||
('VAE Encoder', 'sd_vae_encode_method'),
|
||||
('VAE Decoder', 'sd_vae_decode_method'),
|
||||
('Refiner', 'sd_refiner_checkpoint'),
|
||||
('Refiner switch at', 'sd_refiner_switch_at'),
|
||||
]
|
||||
"""
|
||||
|
||||
|
||||
def create_override_settings_dict(text_pairs):
|
||||
|
@ -367,7 +348,8 @@ def create_override_settings_dict(text_pairs):
|
|||
|
||||
params[k] = v.strip()
|
||||
|
||||
for param_name, setting_name in infotext_to_setting_name_mapping:
|
||||
mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
|
||||
for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
|
||||
value = params.get(param_name, None)
|
||||
|
||||
if value is None:
|
||||
|
@ -421,7 +403,8 @@ def connect_paste(button, paste_fields, input_comp, override_settings_component,
|
|||
def paste_settings(params):
|
||||
vals = {}
|
||||
|
||||
for param_name, setting_name in infotext_to_setting_name_mapping:
|
||||
mapping = [(info.infotext, k) for k, info in shared.opts.data_labels.items() if info.infotext]
|
||||
for param_name, setting_name in mapping + infotext_to_setting_name_mapping:
|
||||
if param_name in already_handled_fields:
|
||||
continue
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import gradio as gr
|
||||
|
||||
from modules import scripts
|
||||
from modules import scripts, ui_tempdir
|
||||
|
||||
def add_classes_to_gradio_component(comp):
|
||||
"""
|
||||
|
@ -58,3 +58,5 @@ original_BlockContext_init = gr.blocks.BlockContext.__init__
|
|||
gr.components.IOComponent.__init__ = IOComponent_init
|
||||
gr.blocks.Block.get_config = Block_get_config
|
||||
gr.blocks.BlockContext.__init__ = BlockContext_init
|
||||
|
||||
ui_tempdir.install_ui_tempdir_override()
|
||||
|
|
|
@ -21,8 +21,6 @@ from modules import sd_samplers, shared, script_callbacks, errors
|
|||
from modules.paths_internal import roboto_ttf_file
|
||||
from modules.shared import opts
|
||||
|
||||
import modules.sd_vae as sd_vae
|
||||
|
||||
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
|
||||
|
||||
|
||||
|
@ -342,16 +340,6 @@ def sanitize_filename_part(text, replace_spaces=True):
|
|||
|
||||
|
||||
class FilenameGenerator:
|
||||
def get_vae_filename(self): #get the name of the VAE file.
|
||||
if sd_vae.loaded_vae_file is None:
|
||||
return "NoneType"
|
||||
file_name = os.path.basename(sd_vae.loaded_vae_file)
|
||||
split_file_name = file_name.split('.')
|
||||
if len(split_file_name) > 1 and split_file_name[0] == '':
|
||||
return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
|
||||
else:
|
||||
return split_file_name[0]
|
||||
|
||||
replacements = {
|
||||
'seed': lambda self: self.seed if self.seed is not None else '',
|
||||
'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
|
||||
|
@ -391,6 +379,22 @@ class FilenameGenerator:
|
|||
self.image = image
|
||||
self.zip = zip
|
||||
|
||||
def get_vae_filename(self):
|
||||
"""Get the name of the VAE file."""
|
||||
|
||||
import modules.sd_vae as sd_vae
|
||||
|
||||
if sd_vae.loaded_vae_file is None:
|
||||
return "NoneType"
|
||||
|
||||
file_name = os.path.basename(sd_vae.loaded_vae_file)
|
||||
split_file_name = file_name.split('.')
|
||||
if len(split_file_name) > 1 and split_file_name[0] == '':
|
||||
return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
|
||||
else:
|
||||
return split_file_name[0]
|
||||
|
||||
|
||||
def hasprompt(self, *args):
|
||||
lower = self.prompt.lower()
|
||||
if self.p is None or self.prompt is None:
|
||||
|
|
|
@ -116,7 +116,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
|
|||
process_images(p)
|
||||
|
||||
|
||||
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
|
||||
def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_name: str, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, request: gr.Request, *args):
|
||||
override_settings = create_override_settings_dict(override_settings_texts)
|
||||
|
||||
is_batch = mode == 5
|
||||
|
@ -179,8 +179,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
|
|||
cfg_scale=cfg_scale,
|
||||
width=width,
|
||||
height=height,
|
||||
restore_faces=restore_faces,
|
||||
tiling=tiling,
|
||||
init_images=[image],
|
||||
mask=mask,
|
||||
mask_blur=mask_blur,
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
import importlib
|
||||
import logging
|
||||
import sys
|
||||
import warnings
|
||||
from threading import Thread
|
||||
|
||||
from modules.timer import startup_timer
|
||||
|
||||
|
||||
def imports():
|
||||
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
|
||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
|
||||
import torch # noqa: F401
|
||||
startup_timer.record("import torch")
|
||||
import pytorch_lightning # noqa: F401
|
||||
startup_timer.record("import torch")
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
|
||||
|
||||
import gradio # noqa: F401
|
||||
startup_timer.record("import gradio")
|
||||
|
||||
from modules import paths, timer, import_hook, errors # noqa: F401
|
||||
startup_timer.record("setup paths")
|
||||
|
||||
import ldm.modules.encoders.modules # noqa: F401
|
||||
startup_timer.record("import ldm")
|
||||
|
||||
import sgm.modules.encoders.modules # noqa: F401
|
||||
startup_timer.record("import sgm")
|
||||
|
||||
from modules import shared_init
|
||||
shared_init.initialize()
|
||||
startup_timer.record("initialize shared")
|
||||
|
||||
from modules import processing, gradio_extensons, ui # noqa: F401
|
||||
startup_timer.record("other imports")
|
||||
|
||||
|
||||
def check_versions():
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
if not cmd_opts.skip_version_check:
|
||||
from modules import errors
|
||||
errors.check_versions()
|
||||
|
||||
|
||||
def initialize():
|
||||
from modules import initialize_util
|
||||
initialize_util.fix_torch_version()
|
||||
initialize_util.fix_asyncio_event_loop_policy()
|
||||
initialize_util.validate_tls_options()
|
||||
initialize_util.configure_sigint_handler()
|
||||
initialize_util.configure_opts_onchange()
|
||||
|
||||
from modules import modelloader
|
||||
modelloader.cleanup_models()
|
||||
|
||||
from modules import sd_models
|
||||
sd_models.setup_model()
|
||||
startup_timer.record("setup SD model")
|
||||
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
from modules import codeformer_model
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision.transforms.functional_tensor")
|
||||
codeformer_model.setup_model(cmd_opts.codeformer_models_path)
|
||||
startup_timer.record("setup codeformer")
|
||||
|
||||
from modules import gfpgan_model
|
||||
gfpgan_model.setup_model(cmd_opts.gfpgan_models_path)
|
||||
startup_timer.record("setup gfpgan")
|
||||
|
||||
initialize_rest(reload_script_modules=False)
|
||||
|
||||
|
||||
def initialize_rest(*, reload_script_modules=False):
|
||||
"""
|
||||
Called both from initialize() and when reloading the webui.
|
||||
"""
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
from modules import sd_samplers
|
||||
sd_samplers.set_samplers()
|
||||
startup_timer.record("set samplers")
|
||||
|
||||
from modules import extensions
|
||||
extensions.list_extensions()
|
||||
startup_timer.record("list extensions")
|
||||
|
||||
from modules import initialize_util
|
||||
initialize_util.restore_config_state_file()
|
||||
startup_timer.record("restore config state file")
|
||||
|
||||
from modules import shared, upscaler, scripts
|
||||
if cmd_opts.ui_debug_mode:
|
||||
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
|
||||
scripts.load_scripts()
|
||||
return
|
||||
|
||||
from modules import sd_models
|
||||
sd_models.list_models()
|
||||
startup_timer.record("list SD models")
|
||||
|
||||
from modules import localization
|
||||
localization.list_localizations(cmd_opts.localizations_dir)
|
||||
startup_timer.record("list localizations")
|
||||
|
||||
with startup_timer.subcategory("load scripts"):
|
||||
scripts.load_scripts()
|
||||
|
||||
if reload_script_modules:
|
||||
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
|
||||
importlib.reload(module)
|
||||
startup_timer.record("reload script modules")
|
||||
|
||||
from modules import modelloader
|
||||
modelloader.load_upscalers()
|
||||
startup_timer.record("load upscalers")
|
||||
|
||||
from modules import sd_vae
|
||||
sd_vae.refresh_vae_list()
|
||||
startup_timer.record("refresh VAE")
|
||||
|
||||
from modules import textual_inversion
|
||||
textual_inversion.textual_inversion.list_textual_inversion_templates()
|
||||
startup_timer.record("refresh textual inversion templates")
|
||||
|
||||
from modules import script_callbacks, sd_hijack_optimizations, sd_hijack
|
||||
script_callbacks.on_list_optimizers(sd_hijack_optimizations.list_optimizers)
|
||||
sd_hijack.list_optimizers()
|
||||
startup_timer.record("scripts list_optimizers")
|
||||
|
||||
from modules import sd_unet
|
||||
sd_unet.list_unets()
|
||||
startup_timer.record("scripts list_unets")
|
||||
|
||||
def load_model():
|
||||
"""
|
||||
Accesses shared.sd_model property to load model.
|
||||
After it's available, if it has been loaded before this access by some extension,
|
||||
its optimization may be None because the list of optimizaers has neet been filled
|
||||
by that time, so we apply optimization again.
|
||||
"""
|
||||
|
||||
shared.sd_model # noqa: B018
|
||||
|
||||
if sd_hijack.current_optimizer is None:
|
||||
sd_hijack.apply_optimizations()
|
||||
|
||||
from modules import devices
|
||||
devices.first_time_calculation()
|
||||
|
||||
Thread(target=load_model).start()
|
||||
|
||||
from modules import shared_items
|
||||
shared_items.reload_hypernetworks()
|
||||
startup_timer.record("reload hypernetworks")
|
||||
|
||||
from modules import ui_extra_networks
|
||||
ui_extra_networks.initialize()
|
||||
ui_extra_networks.register_default_pages()
|
||||
|
||||
from modules import extra_networks
|
||||
extra_networks.initialize()
|
||||
extra_networks.register_default_extra_networks()
|
||||
startup_timer.record("initialize extra networks")
|
|
@ -0,0 +1,183 @@
|
|||
import json
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import re
|
||||
|
||||
from modules.timer import startup_timer
|
||||
|
||||
|
||||
def gradio_server_name():
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
if cmd_opts.server_name:
|
||||
return cmd_opts.server_name
|
||||
else:
|
||||
return "0.0.0.0" if cmd_opts.listen else None
|
||||
|
||||
|
||||
def fix_torch_version():
|
||||
import torch
|
||||
|
||||
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
|
||||
if ".dev" in torch.__version__ or "+git" in torch.__version__:
|
||||
torch.__long_version__ = torch.__version__
|
||||
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
|
||||
|
||||
|
||||
def fix_asyncio_event_loop_policy():
|
||||
"""
|
||||
The default `asyncio` event loop policy only automatically creates
|
||||
event loops in the main threads. Other threads must create event
|
||||
loops explicitly or `asyncio.get_event_loop` (and therefore
|
||||
`.IOLoop.current`) will fail. Installing this policy allows event
|
||||
loops to be created automatically on any thread, matching the
|
||||
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
|
||||
# "Any thread" and "selector" should be orthogonal, but there's not a clean
|
||||
# interface for composing policies so pick the right base.
|
||||
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
|
||||
else:
|
||||
_BasePolicy = asyncio.DefaultEventLoopPolicy
|
||||
|
||||
class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
|
||||
"""Event loop policy that allows loop creation on any thread.
|
||||
Usage::
|
||||
|
||||
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
|
||||
"""
|
||||
|
||||
def get_event_loop(self) -> asyncio.AbstractEventLoop:
|
||||
try:
|
||||
return super().get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
# This was an AssertionError in python 3.4.2 (which ships with debian jessie)
|
||||
# and changed to a RuntimeError in 3.4.3.
|
||||
# "There is no current event loop in thread %r"
|
||||
loop = self.new_event_loop()
|
||||
self.set_event_loop(loop)
|
||||
return loop
|
||||
|
||||
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
|
||||
|
||||
|
||||
def restore_config_state_file():
|
||||
from modules import shared, config_states
|
||||
|
||||
config_state_file = shared.opts.restore_config_state_file
|
||||
if config_state_file == "":
|
||||
return
|
||||
|
||||
shared.opts.restore_config_state_file = ""
|
||||
shared.opts.save(shared.config_filename)
|
||||
|
||||
if os.path.isfile(config_state_file):
|
||||
print(f"*** About to restore extension state from file: {config_state_file}")
|
||||
with open(config_state_file, "r", encoding="utf-8") as f:
|
||||
config_state = json.load(f)
|
||||
config_states.restore_extension_config(config_state)
|
||||
startup_timer.record("restore extension config")
|
||||
elif config_state_file:
|
||||
print(f"!!! Config state backup not found: {config_state_file}")
|
||||
|
||||
|
||||
def validate_tls_options():
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
|
||||
return
|
||||
|
||||
try:
|
||||
if not os.path.exists(cmd_opts.tls_keyfile):
|
||||
print("Invalid path to TLS keyfile given")
|
||||
if not os.path.exists(cmd_opts.tls_certfile):
|
||||
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
|
||||
except TypeError:
|
||||
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
|
||||
print("TLS setup invalid, running webui without TLS")
|
||||
else:
|
||||
print("Running with TLS")
|
||||
startup_timer.record("TLS")
|
||||
|
||||
|
||||
def get_gradio_auth_creds():
|
||||
"""
|
||||
Convert the gradio_auth and gradio_auth_path commandline arguments into
|
||||
an iterable of (username, password) tuples.
|
||||
"""
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
def process_credential_line(s):
|
||||
s = s.strip()
|
||||
if not s:
|
||||
return None
|
||||
return tuple(s.split(':', 1))
|
||||
|
||||
if cmd_opts.gradio_auth:
|
||||
for cred in cmd_opts.gradio_auth.split(','):
|
||||
cred = process_credential_line(cred)
|
||||
if cred:
|
||||
yield cred
|
||||
|
||||
if cmd_opts.gradio_auth_path:
|
||||
with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
|
||||
for line in file.readlines():
|
||||
for cred in line.strip().split(','):
|
||||
cred = process_credential_line(cred)
|
||||
if cred:
|
||||
yield cred
|
||||
|
||||
|
||||
def configure_sigint_handler():
|
||||
# make the program just exit at ctrl+c without waiting for anything
|
||||
def sigint_handler(sig, frame):
|
||||
print(f'Interrupted with signal {sig} in {frame}')
|
||||
os._exit(0)
|
||||
|
||||
if not os.environ.get("COVERAGE_RUN"):
|
||||
# Don't install the immediate-quit handler when running under coverage,
|
||||
# as then the coverage report won't be generated.
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
|
||||
def configure_opts_onchange():
|
||||
from modules import shared, sd_models, sd_vae, ui_tempdir, sd_hijack
|
||||
from modules.call_queue import wrap_queued_call
|
||||
|
||||
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: sd_models.reload_model_weights()), call=False)
|
||||
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
|
||||
shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: sd_vae.reload_vae_weights()), call=False)
|
||||
shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
|
||||
shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
|
||||
shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
|
||||
startup_timer.record("opts onchange")
|
||||
|
||||
|
||||
def setup_middleware(app):
|
||||
from starlette.middleware.gzip import GZipMiddleware
|
||||
|
||||
app.middleware_stack = None # reset current middleware to allow modifying user provided list
|
||||
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
configure_cors_middleware(app)
|
||||
app.build_middleware_stack() # rebuild middleware stack on-the-fly
|
||||
|
||||
|
||||
def configure_cors_middleware(app):
|
||||
from starlette.middleware.cors import CORSMiddleware
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
cors_options = {
|
||||
"allow_methods": ["*"],
|
||||
"allow_headers": ["*"],
|
||||
"allow_credentials": True,
|
||||
}
|
||||
if cmd_opts.cors_allow_origins:
|
||||
cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
|
||||
if cmd_opts.cors_allow_origins_regex:
|
||||
cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
|
||||
app.add_middleware(CORSMiddleware, **cors_options)
|
||||
|
|
@ -1,4 +1,5 @@
|
|||
# this scripts installs necessary requirements and launches main program in webui.py
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import os
|
||||
|
@ -11,8 +12,10 @@ from functools import lru_cache
|
|||
from modules import cmd_args, errors
|
||||
from modules.paths_internal import script_path, extensions_dir
|
||||
from modules.timer import startup_timer
|
||||
from modules import logging_config
|
||||
|
||||
args, _ = cmd_args.parser.parse_known_args()
|
||||
logging_config.setup_logging(args.loglevel)
|
||||
|
||||
python = sys.executable
|
||||
git = os.environ.get('GIT', "git")
|
||||
|
@ -249,6 +252,8 @@ def run_extensions_installers(settings_file):
|
|||
|
||||
with startup_timer.subcategory("run extensions installers"):
|
||||
for dirname_extension in list_extensions(settings_file):
|
||||
logging.debug(f"Installing {dirname_extension}")
|
||||
|
||||
path = os.path.join(extensions_dir, dirname_extension)
|
||||
|
||||
if os.path.isdir(path):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
from modules import errors
|
||||
from modules import errors, scripts
|
||||
|
||||
localizations = {}
|
||||
|
||||
|
@ -16,7 +16,6 @@ def list_localizations(dirname):
|
|||
|
||||
localizations[fn] = os.path.join(dirname, file)
|
||||
|
||||
from modules import scripts
|
||||
for file in scripts.list_scripts("localizations", ".json"):
|
||||
fn, ext = os.path.splitext(file.filename)
|
||||
localizations[fn] = file.path
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
import os
|
||||
import logging
|
||||
|
||||
|
||||
def setup_logging(loglevel):
|
||||
if loglevel is None:
|
||||
loglevel = os.environ.get("SD_WEBUI_LOG_LEVEL")
|
||||
|
||||
if loglevel:
|
||||
log_level = getattr(logging, loglevel.upper(), None) or logging.INFO
|
||||
logging.basicConfig(
|
||||
level=log_level,
|
||||
format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S',
|
||||
)
|
||||
|
|
@ -4,6 +4,7 @@ import torch
|
|||
import platform
|
||||
from modules.sd_hijack_utils import CondFunc
|
||||
from packaging import version
|
||||
from modules import shared
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
@ -30,8 +31,7 @@ has_mps = check_for_mps()
|
|||
|
||||
def torch_mps_gc() -> None:
|
||||
try:
|
||||
from modules.shared import state
|
||||
if state.current_latent is not None:
|
||||
if shared.state.current_latent is not None:
|
||||
log.debug("`current_latent` is set, skipping MPS garbage collection")
|
||||
return
|
||||
from torch.mps import empty_cache
|
||||
|
|
|
@ -0,0 +1,238 @@
|
|||
import json
|
||||
import sys
|
||||
|
||||
import gradio as gr
|
||||
|
||||
from modules import errors
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
|
||||
class OptionInfo:
|
||||
def __init__(self, default=None, label="", component=None, component_args=None, onchange=None, section=None, refresh=None, comment_before='', comment_after='', infotext=None):
|
||||
self.default = default
|
||||
self.label = label
|
||||
self.component = component
|
||||
self.component_args = component_args
|
||||
self.onchange = onchange
|
||||
self.section = section
|
||||
self.refresh = refresh
|
||||
self.do_not_save = False
|
||||
|
||||
self.comment_before = comment_before
|
||||
"""HTML text that will be added after label in UI"""
|
||||
|
||||
self.comment_after = comment_after
|
||||
"""HTML text that will be added before label in UI"""
|
||||
|
||||
self.infotext = infotext
|
||||
|
||||
def link(self, label, url):
|
||||
self.comment_before += f"[<a href='{url}' target='_blank'>{label}</a>]"
|
||||
return self
|
||||
|
||||
def js(self, label, js_func):
|
||||
self.comment_before += f"[<a onclick='{js_func}(); return false'>{label}</a>]"
|
||||
return self
|
||||
|
||||
def info(self, info):
|
||||
self.comment_after += f"<span class='info'>({info})</span>"
|
||||
return self
|
||||
|
||||
def html(self, html):
|
||||
self.comment_after += html
|
||||
return self
|
||||
|
||||
def needs_restart(self):
|
||||
self.comment_after += " <span class='info'>(requires restart)</span>"
|
||||
return self
|
||||
|
||||
def needs_reload_ui(self):
|
||||
self.comment_after += " <span class='info'>(requires Reload UI)</span>"
|
||||
return self
|
||||
|
||||
|
||||
class OptionHTML(OptionInfo):
|
||||
def __init__(self, text):
|
||||
super().__init__(str(text).strip(), label='', component=lambda **kwargs: gr.HTML(elem_classes="settings-info", **kwargs))
|
||||
|
||||
self.do_not_save = True
|
||||
|
||||
|
||||
def options_section(section_identifier, options_dict):
|
||||
for v in options_dict.values():
|
||||
v.section = section_identifier
|
||||
|
||||
return options_dict
|
||||
|
||||
|
||||
options_builtin_fields = {"data_labels", "data", "restricted_opts", "typemap"}
|
||||
|
||||
|
||||
class Options:
|
||||
typemap = {int: float}
|
||||
|
||||
def __init__(self, data_labels, restricted_opts):
|
||||
self.data_labels = data_labels
|
||||
self.data = {k: v.default for k, v in self.data_labels.items()}
|
||||
self.restricted_opts = restricted_opts
|
||||
|
||||
def __setattr__(self, key, value):
|
||||
if key in options_builtin_fields:
|
||||
return super(Options, self).__setattr__(key, value)
|
||||
|
||||
if self.data is not None:
|
||||
if key in self.data or key in self.data_labels:
|
||||
assert not cmd_opts.freeze_settings, "changing settings is disabled"
|
||||
|
||||
info = self.data_labels.get(key, None)
|
||||
if info.do_not_save:
|
||||
return
|
||||
|
||||
comp_args = info.component_args if info else None
|
||||
if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
|
||||
raise RuntimeError(f"not possible to set {key} because it is restricted")
|
||||
|
||||
if cmd_opts.hide_ui_dir_config and key in self.restricted_opts:
|
||||
raise RuntimeError(f"not possible to set {key} because it is restricted")
|
||||
|
||||
self.data[key] = value
|
||||
return
|
||||
|
||||
return super(Options, self).__setattr__(key, value)
|
||||
|
||||
def __getattr__(self, item):
|
||||
if item in options_builtin_fields:
|
||||
return super(Options, self).__getattribute__(item)
|
||||
|
||||
if self.data is not None:
|
||||
if item in self.data:
|
||||
return self.data[item]
|
||||
|
||||
if item in self.data_labels:
|
||||
return self.data_labels[item].default
|
||||
|
||||
return super(Options, self).__getattribute__(item)
|
||||
|
||||
def set(self, key, value):
|
||||
"""sets an option and calls its onchange callback, returning True if the option changed and False otherwise"""
|
||||
|
||||
oldval = self.data.get(key, None)
|
||||
if oldval == value:
|
||||
return False
|
||||
|
||||
if self.data_labels[key].do_not_save:
|
||||
return False
|
||||
|
||||
try:
|
||||
setattr(self, key, value)
|
||||
except RuntimeError:
|
||||
return False
|
||||
|
||||
if self.data_labels[key].onchange is not None:
|
||||
try:
|
||||
self.data_labels[key].onchange()
|
||||
except Exception as e:
|
||||
errors.display(e, f"changing setting {key} to {value}")
|
||||
setattr(self, key, oldval)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_default(self, key):
|
||||
"""returns the default value for the key"""
|
||||
|
||||
data_label = self.data_labels.get(key)
|
||||
if data_label is None:
|
||||
return None
|
||||
|
||||
return data_label.default
|
||||
|
||||
def save(self, filename):
|
||||
assert not cmd_opts.freeze_settings, "saving settings is disabled"
|
||||
|
||||
with open(filename, "w", encoding="utf8") as file:
|
||||
json.dump(self.data, file, indent=4)
|
||||
|
||||
def same_type(self, x, y):
|
||||
if x is None or y is None:
|
||||
return True
|
||||
|
||||
type_x = self.typemap.get(type(x), type(x))
|
||||
type_y = self.typemap.get(type(y), type(y))
|
||||
|
||||
return type_x == type_y
|
||||
|
||||
def load(self, filename):
|
||||
with open(filename, "r", encoding="utf8") as file:
|
||||
self.data = json.load(file)
|
||||
|
||||
# 1.6.0 VAE defaults
|
||||
if self.data.get('sd_vae_as_default') is not None and self.data.get('sd_vae_overrides_per_model_preferences') is None:
|
||||
self.data['sd_vae_overrides_per_model_preferences'] = not self.data.get('sd_vae_as_default')
|
||||
|
||||
# 1.1.1 quicksettings list migration
|
||||
if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
|
||||
self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
|
||||
|
||||
# 1.4.0 ui_reorder
|
||||
if isinstance(self.data.get('ui_reorder'), str) and self.data.get('ui_reorder') and "ui_reorder_list" not in self.data:
|
||||
self.data['ui_reorder_list'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
|
||||
|
||||
bad_settings = 0
|
||||
for k, v in self.data.items():
|
||||
info = self.data_labels.get(k, None)
|
||||
if info is not None and not self.same_type(info.default, v):
|
||||
print(f"Warning: bad setting value: {k}: {v} ({type(v).__name__}; expected {type(info.default).__name__})", file=sys.stderr)
|
||||
bad_settings += 1
|
||||
|
||||
if bad_settings > 0:
|
||||
print(f"The program is likely to not work with bad settings.\nSettings file: {filename}\nEither fix the file, or delete it and restart.", file=sys.stderr)
|
||||
|
||||
def onchange(self, key, func, call=True):
|
||||
item = self.data_labels.get(key)
|
||||
item.onchange = func
|
||||
|
||||
if call:
|
||||
func()
|
||||
|
||||
def dumpjson(self):
|
||||
d = {k: self.data.get(k, v.default) for k, v in self.data_labels.items()}
|
||||
d["_comments_before"] = {k: v.comment_before for k, v in self.data_labels.items() if v.comment_before is not None}
|
||||
d["_comments_after"] = {k: v.comment_after for k, v in self.data_labels.items() if v.comment_after is not None}
|
||||
return json.dumps(d)
|
||||
|
||||
def add_option(self, key, info):
|
||||
self.data_labels[key] = info
|
||||
|
||||
def reorder(self):
|
||||
"""reorder settings so that all items related to section always go together"""
|
||||
|
||||
section_ids = {}
|
||||
settings_items = self.data_labels.items()
|
||||
for _, item in settings_items:
|
||||
if item.section not in section_ids:
|
||||
section_ids[item.section] = len(section_ids)
|
||||
|
||||
self.data_labels = dict(sorted(settings_items, key=lambda x: section_ids[x[1].section]))
|
||||
|
||||
def cast_value(self, key, value):
|
||||
"""casts an arbitrary to the same type as this setting's value with key
|
||||
Example: cast_value("eta_noise_seed_delta", "12") -> returns 12 (an int rather than str)
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
default_value = self.data_labels[key].default
|
||||
if default_value is None:
|
||||
default_value = getattr(self, key, None)
|
||||
if default_value is None:
|
||||
return None
|
||||
|
||||
expected_type = type(default_value)
|
||||
if expected_type == bool and value == "False":
|
||||
value = False
|
||||
else:
|
||||
value = expected_type(value)
|
||||
|
||||
return value
|
|
@ -14,7 +14,8 @@ from skimage import exposure
|
|||
from typing import Any, Dict, List
|
||||
|
||||
import modules.sd_hijack
|
||||
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors
|
||||
from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet, errors, rng
|
||||
from modules.rng import slerp # noqa: F401
|
||||
from modules.sd_hijack import model_hijack
|
||||
from modules.sd_samplers_common import images_tensor_to_samples, decode_first_stage, approximation_indexes
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
|
@ -110,7 +111,7 @@ class StableDiffusionProcessing:
|
|||
cached_uc = [None, None]
|
||||
cached_c = [None, None]
|
||||
|
||||
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
|
||||
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = None, tiling: bool = None, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = None, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
|
||||
if sampler_index is not None:
|
||||
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
|
||||
|
||||
|
@ -172,6 +173,8 @@ class StableDiffusionProcessing:
|
|||
self.iteration = 0
|
||||
self.is_hr_pass = False
|
||||
self.sampler = None
|
||||
self.main_prompt = None
|
||||
self.main_negative_prompt = None
|
||||
|
||||
self.prompts = None
|
||||
self.negative_prompts = None
|
||||
|
@ -184,6 +187,7 @@ class StableDiffusionProcessing:
|
|||
self.cached_c = StableDiffusionProcessing.cached_c
|
||||
self.uc = None
|
||||
self.c = None
|
||||
self.rng: rng.ImageRNG = None
|
||||
|
||||
self.user = None
|
||||
|
||||
|
@ -319,6 +323,9 @@ class StableDiffusionProcessing:
|
|||
self.all_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, self.styles) for x in self.all_prompts]
|
||||
self.all_negative_prompts = [shared.prompt_styles.apply_negative_styles_to_prompt(x, self.styles) for x in self.all_negative_prompts]
|
||||
|
||||
self.main_prompt = self.all_prompts[0]
|
||||
self.main_negative_prompt = self.all_negative_prompts[0]
|
||||
|
||||
def cached_params(self, required_prompts, steps, extra_network_data):
|
||||
"""Returns parameters that invalidate the cond cache if changed"""
|
||||
|
||||
|
@ -473,82 +480,9 @@ class Processed:
|
|||
return self.token_merging_ratio_hr if for_hr else self.token_merging_ratio
|
||||
|
||||
|
||||
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
|
||||
def slerp(val, low, high):
|
||||
low_norm = low/torch.norm(low, dim=1, keepdim=True)
|
||||
high_norm = high/torch.norm(high, dim=1, keepdim=True)
|
||||
dot = (low_norm*high_norm).sum(1)
|
||||
|
||||
if dot.mean() > 0.9995:
|
||||
return low * val + high * (1 - val)
|
||||
|
||||
omega = torch.acos(dot)
|
||||
so = torch.sin(omega)
|
||||
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
|
||||
return res
|
||||
|
||||
|
||||
def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None):
|
||||
eta_noise_seed_delta = opts.eta_noise_seed_delta or 0
|
||||
xs = []
|
||||
|
||||
# if we have multiple seeds, this means we are working with batch size>1; this then
|
||||
# enables the generation of additional tensors with noise that the sampler will use during its processing.
|
||||
# Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to
|
||||
# produce the same images as with two batches [100], [101].
|
||||
if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0):
|
||||
sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))]
|
||||
else:
|
||||
sampler_noises = None
|
||||
|
||||
for i, seed in enumerate(seeds):
|
||||
noise_shape = shape if seed_resize_from_h <= 0 or seed_resize_from_w <= 0 else (shape[0], seed_resize_from_h//8, seed_resize_from_w//8)
|
||||
|
||||
subnoise = None
|
||||
if subseeds is not None and subseed_strength != 0:
|
||||
subseed = 0 if i >= len(subseeds) else subseeds[i]
|
||||
|
||||
subnoise = devices.randn(subseed, noise_shape)
|
||||
|
||||
# randn results depend on device; gpu and cpu get different results for same seed;
|
||||
# the way I see it, it's better to do this on CPU, so that everyone gets same result;
|
||||
# but the original script had it like this, so I do not dare change it for now because
|
||||
# it will break everyone's seeds.
|
||||
noise = devices.randn(seed, noise_shape)
|
||||
|
||||
if subnoise is not None:
|
||||
noise = slerp(subseed_strength, noise, subnoise)
|
||||
|
||||
if noise_shape != shape:
|
||||
x = devices.randn(seed, shape)
|
||||
dx = (shape[2] - noise_shape[2]) // 2
|
||||
dy = (shape[1] - noise_shape[1]) // 2
|
||||
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
|
||||
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
|
||||
tx = 0 if dx < 0 else dx
|
||||
ty = 0 if dy < 0 else dy
|
||||
dx = max(-dx, 0)
|
||||
dy = max(-dy, 0)
|
||||
|
||||
x[:, ty:ty+h, tx:tx+w] = noise[:, dy:dy+h, dx:dx+w]
|
||||
noise = x
|
||||
|
||||
if sampler_noises is not None:
|
||||
cnt = p.sampler.number_of_needed_noises(p)
|
||||
|
||||
if eta_noise_seed_delta > 0:
|
||||
devices.manual_seed(seed + eta_noise_seed_delta)
|
||||
|
||||
for j in range(cnt):
|
||||
sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape)))
|
||||
|
||||
xs.append(noise)
|
||||
|
||||
if sampler_noises is not None:
|
||||
p.sampler.sampler_noises = [torch.stack(n).to(shared.device) for n in sampler_noises]
|
||||
|
||||
x = torch.stack(xs).to(shared.device)
|
||||
return x
|
||||
g = rng.ImageRNG(shape, seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=seed_resize_from_h, seed_resize_from_w=seed_resize_from_w)
|
||||
return g.next()
|
||||
|
||||
|
||||
class DecodedSamples(list):
|
||||
|
@ -571,7 +505,7 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
|
|||
errors.print_error_explanation(
|
||||
"A tensor with all NaNs was produced in VAE.\n"
|
||||
"Web UI will now convert VAE into 32-bit float and retry.\n"
|
||||
"To disable this behavior, disable the 'Automaticlly revert VAE to 32-bit floats' setting.\n"
|
||||
"To disable this behavior, disable the 'Automatically revert VAE to 32-bit floats' setting.\n"
|
||||
"To always start with 32-bit VAE, use --no-half-vae commandline flag."
|
||||
)
|
||||
|
||||
|
@ -590,7 +524,15 @@ def decode_latent_batch(model, batch, target_device=None, check_for_nans=False):
|
|||
|
||||
|
||||
def get_fixed_seed(seed):
|
||||
if seed is None or seed == '' or seed == -1:
|
||||
if seed == '' or seed is None:
|
||||
seed = -1
|
||||
elif isinstance(seed, str):
|
||||
try:
|
||||
seed = int(seed)
|
||||
except Exception:
|
||||
seed = -1
|
||||
|
||||
if seed == -1:
|
||||
return int(random.randrange(4294967294))
|
||||
|
||||
return seed
|
||||
|
@ -633,10 +575,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||
"CFG scale": p.cfg_scale,
|
||||
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
|
||||
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
|
||||
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
|
||||
"Face restoration": opts.face_restoration_model if p.restore_faces else None,
|
||||
"Size": f"{p.width}x{p.height}",
|
||||
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
|
||||
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
|
||||
"VAE hash": sd_vae.get_loaded_vae_hash() if opts.add_model_hash_to_info else None,
|
||||
"VAE": sd_vae.get_loaded_vae_name() if opts.add_model_name_to_info else None,
|
||||
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
|
||||
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
||||
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
||||
|
@ -649,6 +593,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||
"Init image hash": getattr(p, 'init_img_hash', None),
|
||||
"RNG": opts.randn_source if opts.randn_source != "GPU" and opts.randn_source != "NV" else None,
|
||||
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
|
||||
"Tiling": "True" if p.tiling else None,
|
||||
**p.extra_generation_params,
|
||||
"Version": program_version() if opts.add_version_to_infotext else None,
|
||||
"User": p.user if opts.add_user_name_to_info else None,
|
||||
|
@ -656,8 +601,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||
|
||||
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
|
||||
|
||||
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
|
||||
negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
|
||||
prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
|
||||
negative_prompt_text = f"\nNegative prompt: {p.main_negative_prompt if use_main_prompt else all_negative_prompts[index]}" if all_negative_prompts[index] else ""
|
||||
|
||||
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
|
||||
|
||||
|
@ -718,6 +663,12 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
seed = get_fixed_seed(p.seed)
|
||||
subseed = get_fixed_seed(p.subseed)
|
||||
|
||||
if p.restore_faces is None:
|
||||
p.restore_faces = opts.face_restoration
|
||||
|
||||
if p.tiling is None:
|
||||
p.tiling = opts.tiling
|
||||
|
||||
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
|
||||
modules.sd_hijack.model_hijack.clear_comments()
|
||||
|
||||
|
@ -773,6 +724,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
p.seeds = p.all_seeds[n * p.batch_size:(n + 1) * p.batch_size]
|
||||
p.subseeds = p.all_subseeds[n * p.batch_size:(n + 1) * p.batch_size]
|
||||
|
||||
p.rng = rng.ImageRNG((opt_C, p.height // opt_f, p.width // opt_f), p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w)
|
||||
|
||||
if p.scripts is not None:
|
||||
p.scripts.before_process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds)
|
||||
|
||||
|
@ -794,7 +747,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||
# strength, which is saved as "Model Strength: 1.0" in the infotext
|
||||
if n == 0:
|
||||
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
|
||||
processed = Processed(p, [], p.seed, "")
|
||||
processed = Processed(p, [])
|
||||
file.write(processed.infotext(p, 0))
|
||||
|
||||
p.setup_conds()
|
||||
|
@ -997,6 +950,45 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
self.hr_c = None
|
||||
self.hr_uc = None
|
||||
|
||||
def calculate_target_resolution(self):
|
||||
if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
|
||||
self.hr_resize_x = self.width
|
||||
self.hr_resize_y = self.height
|
||||
self.hr_upscale_to_x = self.width
|
||||
self.hr_upscale_to_y = self.height
|
||||
|
||||
self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
|
||||
self.applied_old_hires_behavior_to = (self.width, self.height)
|
||||
|
||||
if self.hr_resize_x == 0 and self.hr_resize_y == 0:
|
||||
self.extra_generation_params["Hires upscale"] = self.hr_scale
|
||||
self.hr_upscale_to_x = int(self.width * self.hr_scale)
|
||||
self.hr_upscale_to_y = int(self.height * self.hr_scale)
|
||||
else:
|
||||
self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
|
||||
|
||||
if self.hr_resize_y == 0:
|
||||
self.hr_upscale_to_x = self.hr_resize_x
|
||||
self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
|
||||
elif self.hr_resize_x == 0:
|
||||
self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
|
||||
self.hr_upscale_to_y = self.hr_resize_y
|
||||
else:
|
||||
target_w = self.hr_resize_x
|
||||
target_h = self.hr_resize_y
|
||||
src_ratio = self.width / self.height
|
||||
dst_ratio = self.hr_resize_x / self.hr_resize_y
|
||||
|
||||
if src_ratio < dst_ratio:
|
||||
self.hr_upscale_to_x = self.hr_resize_x
|
||||
self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
|
||||
else:
|
||||
self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
|
||||
self.hr_upscale_to_y = self.hr_resize_y
|
||||
|
||||
self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
|
||||
self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
|
||||
|
||||
def init(self, all_prompts, all_seeds, all_subseeds):
|
||||
if self.enable_hr:
|
||||
if self.hr_checkpoint_name:
|
||||
|
@ -1021,43 +1013,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
|
||||
raise Exception(f"could not find upscaler named {self.hr_upscaler}")
|
||||
|
||||
if opts.use_old_hires_fix_width_height and self.applied_old_hires_behavior_to != (self.width, self.height):
|
||||
self.hr_resize_x = self.width
|
||||
self.hr_resize_y = self.height
|
||||
self.hr_upscale_to_x = self.width
|
||||
self.hr_upscale_to_y = self.height
|
||||
|
||||
self.width, self.height = old_hires_fix_first_pass_dimensions(self.width, self.height)
|
||||
self.applied_old_hires_behavior_to = (self.width, self.height)
|
||||
|
||||
if self.hr_resize_x == 0 and self.hr_resize_y == 0:
|
||||
self.extra_generation_params["Hires upscale"] = self.hr_scale
|
||||
self.hr_upscale_to_x = int(self.width * self.hr_scale)
|
||||
self.hr_upscale_to_y = int(self.height * self.hr_scale)
|
||||
else:
|
||||
self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}"
|
||||
|
||||
if self.hr_resize_y == 0:
|
||||
self.hr_upscale_to_x = self.hr_resize_x
|
||||
self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
|
||||
elif self.hr_resize_x == 0:
|
||||
self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
|
||||
self.hr_upscale_to_y = self.hr_resize_y
|
||||
else:
|
||||
target_w = self.hr_resize_x
|
||||
target_h = self.hr_resize_y
|
||||
src_ratio = self.width / self.height
|
||||
dst_ratio = self.hr_resize_x / self.hr_resize_y
|
||||
|
||||
if src_ratio < dst_ratio:
|
||||
self.hr_upscale_to_x = self.hr_resize_x
|
||||
self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width
|
||||
else:
|
||||
self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height
|
||||
self.hr_upscale_to_y = self.hr_resize_y
|
||||
|
||||
self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f
|
||||
self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f
|
||||
self.calculate_target_resolution()
|
||||
|
||||
if not state.processing_has_refined_job_count:
|
||||
if state.job_count == -1:
|
||||
|
@ -1076,7 +1032,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
|
||||
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
|
||||
|
||||
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
|
||||
x = self.rng.next()
|
||||
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
|
||||
del x
|
||||
|
||||
|
@ -1164,7 +1120,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
|
||||
samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2]
|
||||
|
||||
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self)
|
||||
self.rng = rng.ImageRNG(samples.shape[1:], self.seeds, subseeds=self.subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w)
|
||||
noise = self.rng.next()
|
||||
|
||||
# GC now before running the next img2img to prevent running out of memory
|
||||
devices.torch_gc()
|
||||
|
@ -1429,7 +1386,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
|||
self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask)
|
||||
|
||||
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
|
||||
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
|
||||
x = self.rng.next()
|
||||
|
||||
if self.initial_noise_multiplier != 1.0:
|
||||
self.extra_generation_params["Noise multiplier"] = self.initial_noise_multiplier
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
import torch
|
||||
|
||||
from modules import devices, rng_philox, shared
|
||||
|
||||
|
||||
def randn(seed, shape, generator=None):
|
||||
"""Generate a tensor with random numbers from a normal distribution using seed.
|
||||
|
||||
Uses the seed parameter to set the global torch seed; to generate more with that seed, use randn_like/randn_without_seed."""
|
||||
|
||||
manual_seed(seed)
|
||||
|
||||
if shared.opts.randn_source == "NV":
|
||||
return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
|
||||
|
||||
if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
|
||||
return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
|
||||
|
||||
return torch.randn(shape, device=devices.device, generator=generator)
|
||||
|
||||
|
||||
def randn_local(seed, shape):
|
||||
"""Generate a tensor with random numbers from a normal distribution using seed.
|
||||
|
||||
Does not change the global random number generator. You can only generate the seed's first tensor using this function."""
|
||||
|
||||
if shared.opts.randn_source == "NV":
|
||||
rng = rng_philox.Generator(seed)
|
||||
return torch.asarray(rng.randn(shape), device=devices.device)
|
||||
|
||||
local_device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
|
||||
local_generator = torch.Generator(local_device).manual_seed(int(seed))
|
||||
return torch.randn(shape, device=local_device, generator=local_generator).to(devices.device)
|
||||
|
||||
|
||||
def randn_like(x):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
if shared.opts.randn_source == "NV":
|
||||
return torch.asarray(nv_rng.randn(x.shape), device=x.device, dtype=x.dtype)
|
||||
|
||||
if shared.opts.randn_source == "CPU" or x.device.type == 'mps':
|
||||
return torch.randn_like(x, device=devices.cpu).to(x.device)
|
||||
|
||||
return torch.randn_like(x)
|
||||
|
||||
|
||||
def randn_without_seed(shape, generator=None):
|
||||
"""Generate a tensor with random numbers from a normal distribution using the previously initialized genrator.
|
||||
|
||||
Use either randn() or manual_seed() to initialize the generator."""
|
||||
|
||||
if shared.opts.randn_source == "NV":
|
||||
return torch.asarray((generator or nv_rng).randn(shape), device=devices.device)
|
||||
|
||||
if shared.opts.randn_source == "CPU" or devices.device.type == 'mps':
|
||||
return torch.randn(shape, device=devices.cpu, generator=generator).to(devices.device)
|
||||
|
||||
return torch.randn(shape, device=devices.device, generator=generator)
|
||||
|
||||
|
||||
def manual_seed(seed):
|
||||
"""Set up a global random number generator using the specified seed."""
|
||||
|
||||
if shared.opts.randn_source == "NV":
|
||||
global nv_rng
|
||||
nv_rng = rng_philox.Generator(seed)
|
||||
return
|
||||
|
||||
torch.manual_seed(seed)
|
||||
|
||||
|
||||
def create_generator(seed):
|
||||
if shared.opts.randn_source == "NV":
|
||||
return rng_philox.Generator(seed)
|
||||
|
||||
device = devices.cpu if shared.opts.randn_source == "CPU" or devices.device.type == 'mps' else devices.device
|
||||
generator = torch.Generator(device).manual_seed(int(seed))
|
||||
return generator
|
||||
|
||||
|
||||
# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475/3
|
||||
def slerp(val, low, high):
|
||||
low_norm = low/torch.norm(low, dim=1, keepdim=True)
|
||||
high_norm = high/torch.norm(high, dim=1, keepdim=True)
|
||||
dot = (low_norm*high_norm).sum(1)
|
||||
|
||||
if dot.mean() > 0.9995:
|
||||
return low * val + high * (1 - val)
|
||||
|
||||
omega = torch.acos(dot)
|
||||
so = torch.sin(omega)
|
||||
res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
|
||||
return res
|
||||
|
||||
|
||||
class ImageRNG:
|
||||
def __init__(self, shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0):
|
||||
self.shape = shape
|
||||
self.seeds = seeds
|
||||
self.subseeds = subseeds
|
||||
self.subseed_strength = subseed_strength
|
||||
self.seed_resize_from_h = seed_resize_from_h
|
||||
self.seed_resize_from_w = seed_resize_from_w
|
||||
|
||||
self.generators = [create_generator(seed) for seed in seeds]
|
||||
|
||||
self.is_first = True
|
||||
|
||||
def first(self):
|
||||
noise_shape = self.shape if self.seed_resize_from_h <= 0 or self.seed_resize_from_w <= 0 else (self.shape[0], self.seed_resize_from_h // 8, self.seed_resize_from_w // 8)
|
||||
|
||||
xs = []
|
||||
|
||||
for i, (seed, generator) in enumerate(zip(self.seeds, self.generators)):
|
||||
subnoise = None
|
||||
if self.subseeds is not None and self.subseed_strength != 0:
|
||||
subseed = 0 if i >= len(self.subseeds) else self.subseeds[i]
|
||||
subnoise = randn(subseed, noise_shape)
|
||||
|
||||
if noise_shape != self.shape:
|
||||
noise = randn(seed, noise_shape)
|
||||
else:
|
||||
noise = randn(seed, self.shape, generator=generator)
|
||||
|
||||
if subnoise is not None:
|
||||
noise = slerp(self.subseed_strength, noise, subnoise)
|
||||
|
||||
if noise_shape != self.shape:
|
||||
x = randn(seed, self.shape, generator=generator)
|
||||
dx = (self.shape[2] - noise_shape[2]) // 2
|
||||
dy = (self.shape[1] - noise_shape[1]) // 2
|
||||
w = noise_shape[2] if dx >= 0 else noise_shape[2] + 2 * dx
|
||||
h = noise_shape[1] if dy >= 0 else noise_shape[1] + 2 * dy
|
||||
tx = 0 if dx < 0 else dx
|
||||
ty = 0 if dy < 0 else dy
|
||||
dx = max(-dx, 0)
|
||||
dy = max(-dy, 0)
|
||||
|
||||
x[:, ty:ty + h, tx:tx + w] = noise[:, dy:dy + h, dx:dx + w]
|
||||
noise = x
|
||||
|
||||
xs.append(noise)
|
||||
|
||||
eta_noise_seed_delta = shared.opts.eta_noise_seed_delta or 0
|
||||
if eta_noise_seed_delta:
|
||||
self.generators = [create_generator(seed + eta_noise_seed_delta) for seed in self.seeds]
|
||||
|
||||
return torch.stack(xs).to(shared.device)
|
||||
|
||||
def next(self):
|
||||
if self.is_first:
|
||||
self.is_first = False
|
||||
return self.first()
|
||||
|
||||
xs = []
|
||||
for generator in self.generators:
|
||||
x = randn_without_seed(self.shape, generator=generator)
|
||||
xs.append(x)
|
||||
|
||||
return torch.stack(xs).to(shared.device)
|
||||
|
||||
|
||||
devices.randn = randn
|
||||
devices.randn_local = randn_local
|
||||
devices.randn_like = randn_like
|
||||
devices.randn_without_seed = randn_without_seed
|
||||
devices.manual_seed = manual_seed
|
|
@ -14,7 +14,7 @@ import ldm.modules.midas as midas
|
|||
|
||||
from ldm.util import instantiate_from_config
|
||||
|
||||
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache
|
||||
from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack
|
||||
from modules.timer import Timer
|
||||
import tomesd
|
||||
|
||||
|
@ -68,7 +68,9 @@ class CheckpointInfo:
|
|||
self.title = name if self.shorthash is None else f'{name} [{self.shorthash}]'
|
||||
self.short_title = self.name_for_extra if self.shorthash is None else f'{self.name_for_extra} [{self.shorthash}]'
|
||||
|
||||
self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]'] + ([self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]'] if self.shorthash else [])
|
||||
self.ids = [self.hash, self.model_name, self.title, name, self.name_for_extra, f'{name} [{self.hash}]']
|
||||
if self.shorthash:
|
||||
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
|
||||
|
||||
def register(self):
|
||||
checkpoints_list[self.title] = self
|
||||
|
@ -80,10 +82,14 @@ class CheckpointInfo:
|
|||
if self.sha256 is None:
|
||||
return
|
||||
|
||||
self.shorthash = self.sha256[0:10]
|
||||
shorthash = self.sha256[0:10]
|
||||
if self.shorthash == self.sha256[0:10]:
|
||||
return self.shorthash
|
||||
|
||||
self.shorthash = shorthash
|
||||
|
||||
if self.shorthash not in self.ids:
|
||||
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]']
|
||||
self.ids += [self.shorthash, self.sha256, f'{self.name} [{self.shorthash}]', f'{self.name_for_extra} [{self.shorthash}]']
|
||||
|
||||
checkpoints_list.pop(self.title, None)
|
||||
self.title = f'{self.name} [{self.shorthash}]'
|
||||
|
@ -489,7 +495,6 @@ model_data = SdModelData()
|
|||
|
||||
|
||||
def get_empty_cond(sd_model):
|
||||
from modules import extra_networks, processing
|
||||
|
||||
p = processing.StableDiffusionProcessingTxt2Img()
|
||||
extra_networks.activate(p, {})
|
||||
|
@ -502,8 +507,6 @@ def get_empty_cond(sd_model):
|
|||
|
||||
|
||||
def send_model_to_cpu(m):
|
||||
from modules import lowvram
|
||||
|
||||
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
|
||||
lowvram.send_everything_to_cpu()
|
||||
else:
|
||||
|
@ -513,8 +516,6 @@ def send_model_to_cpu(m):
|
|||
|
||||
|
||||
def send_model_to_device(m):
|
||||
from modules import lowvram
|
||||
|
||||
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
|
||||
lowvram.setup_for_low_vram(m, shared.cmd_opts.medvram)
|
||||
else:
|
||||
|
@ -639,6 +640,8 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
|
|||
timer.record("send model to device")
|
||||
|
||||
model_data.set_sd_model(already_loaded)
|
||||
shared.opts.data["sd_model_checkpoint"] = already_loaded.sd_checkpoint_info.title
|
||||
shared.opts.data["sd_checkpoint_hash"] = already_loaded.sd_checkpoint_info.sha256
|
||||
print(f"Using already loaded model {already_loaded.sd_checkpoint_info.title}: done in {timer.summary()}")
|
||||
return model_data.sd_model
|
||||
elif shared.opts.sd_checkpoints_limit > 1 and len(model_data.loaded_sd_models) < shared.opts.sd_checkpoints_limit:
|
||||
|
@ -658,7 +661,6 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer):
|
|||
|
||||
|
||||
def reload_model_weights(sd_model=None, info=None):
|
||||
from modules import devices, sd_hijack
|
||||
checkpoint_info = info or select_checkpoint()
|
||||
|
||||
timer = Timer()
|
||||
|
@ -721,7 +723,6 @@ def reload_model_weights(sd_model=None, info=None):
|
|||
|
||||
|
||||
def unload_model_weights(sd_model=None, info=None):
|
||||
from modules import devices, sd_hijack
|
||||
timer = Timer()
|
||||
|
||||
if model_data.sd_model:
|
||||
|
|
|
@ -2,7 +2,7 @@ import os
|
|||
|
||||
import torch
|
||||
|
||||
from modules import shared, paths, sd_disable_initialization
|
||||
from modules import shared, paths, sd_disable_initialization, devices
|
||||
|
||||
sd_configs_path = shared.sd_configs_path
|
||||
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
|
||||
|
@ -29,7 +29,6 @@ def is_using_v_parameterization_for_sd2(state_dict):
|
|||
"""
|
||||
|
||||
import ldm.modules.diffusionmodules.openaimodel
|
||||
from modules import devices
|
||||
|
||||
device = devices.cpu
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import inspect
|
||||
from collections import namedtuple, deque
|
||||
from collections import namedtuple
|
||||
import numpy as np
|
||||
import torch
|
||||
from PIL import Image
|
||||
|
@ -161,10 +161,15 @@ def apply_refiner(sampler):
|
|||
|
||||
|
||||
class TorchHijack:
|
||||
def __init__(self, sampler_noises):
|
||||
# Using a deque to efficiently receive the sampler_noises in the same order as the previous index-based
|
||||
# implementation.
|
||||
self.sampler_noises = deque(sampler_noises)
|
||||
"""This is here to replace torch.randn_like of k-diffusion.
|
||||
|
||||
k-diffusion has random_sampler argument for most samplers, but not for all, so
|
||||
this is needed to properly replace every use of torch.randn_like.
|
||||
|
||||
We need to replace to make images generated in batches to be same as images generated individually."""
|
||||
|
||||
def __init__(self, p):
|
||||
self.rng = p.rng
|
||||
|
||||
def __getattr__(self, item):
|
||||
if item == 'randn_like':
|
||||
|
@ -176,12 +181,7 @@ class TorchHijack:
|
|||
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{item}'")
|
||||
|
||||
def randn_like(self, x):
|
||||
if self.sampler_noises:
|
||||
noise = self.sampler_noises.popleft()
|
||||
if noise.shape == x.shape:
|
||||
return noise
|
||||
|
||||
return devices.randn_like(x)
|
||||
return self.rng.next()
|
||||
|
||||
|
||||
class Sampler:
|
||||
|
@ -248,7 +248,7 @@ class Sampler:
|
|||
self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0)
|
||||
self.s_min_uncond = getattr(p, 's_min_uncond', 0.0)
|
||||
|
||||
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
|
||||
k_diffusion.sampling.torch = TorchHijack(p)
|
||||
|
||||
extra_params_kwargs = {}
|
||||
for param_name in self.extra_params:
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import torch
|
||||
import inspect
|
||||
import k_diffusion.sampling
|
||||
from modules import sd_samplers_common, sd_samplers_extra, sd_samplers_cfg_denoiser
|
||||
from modules import sd_samplers_common, sd_samplers_extra
|
||||
from modules.sd_samplers_cfg_denoiser import CFGDenoiser
|
||||
|
||||
from modules.shared import opts
|
||||
import modules.shared as shared
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import torch
|
||||
import inspect
|
||||
import sys
|
||||
from modules import devices, sd_samplers_common, sd_samplers_timesteps_impl
|
||||
from modules.sd_samplers_cfg_denoiser import CFGDenoiser
|
||||
|
||||
|
@ -152,3 +153,6 @@ class CompVisSampler(sd_samplers_common.Sampler):
|
|||
|
||||
return samples
|
||||
|
||||
|
||||
sys.modules['modules.sd_samplers_compvis'] = sys.modules[__name__]
|
||||
VanillaStableDiffusionSampler = CompVisSampler # temp. compatibility with older extensions
|
||||
|
|
|
@ -2,7 +2,8 @@ import os
|
|||
import collections
|
||||
from dataclasses import dataclass
|
||||
|
||||
from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks
|
||||
from modules import paths, shared, devices, script_callbacks, sd_models, extra_networks, lowvram, sd_hijack, hashes
|
||||
|
||||
import glob
|
||||
from copy import deepcopy
|
||||
|
||||
|
@ -19,6 +20,20 @@ checkpoint_info = None
|
|||
checkpoints_loaded = collections.OrderedDict()
|
||||
|
||||
|
||||
def get_loaded_vae_name():
|
||||
if loaded_vae_file is None:
|
||||
return None
|
||||
|
||||
return os.path.basename(loaded_vae_file)
|
||||
|
||||
|
||||
def get_loaded_vae_hash():
|
||||
if loaded_vae_file is None:
|
||||
return None
|
||||
|
||||
return hashes.sha256(loaded_vae_file, 'vae')[0:10]
|
||||
|
||||
|
||||
def get_base_vae(model):
|
||||
if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model:
|
||||
return base_vae
|
||||
|
@ -231,8 +246,6 @@ unspecified = object()
|
|||
|
||||
|
||||
def reload_vae_weights(sd_model=None, vae_file=unspecified):
|
||||
from modules import lowvram, devices, sd_hijack
|
||||
|
||||
if not sd_model:
|
||||
sd_model = shared.sd_model
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,18 @@
|
|||
import os
|
||||
|
||||
import launch
|
||||
from modules import cmd_args, script_loading
|
||||
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
|
||||
|
||||
parser = cmd_args.parser
|
||||
|
||||
script_loading.preload_extensions(extensions_dir, parser, extension_list=launch.list_extensions(launch.args.ui_settings_file))
|
||||
script_loading.preload_extensions(extensions_builtin_dir, parser)
|
||||
|
||||
if os.environ.get('IGNORE_CMD_ARGS_ERRORS', None) is None:
|
||||
cmd_opts = parser.parse_args()
|
||||
else:
|
||||
cmd_opts, _ = parser.parse_known_args()
|
||||
|
||||
|
||||
cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access
|
|
@ -0,0 +1,66 @@
|
|||
import os
|
||||
|
||||
import gradio as gr
|
||||
|
||||
from modules import errors, shared
|
||||
from modules.paths_internal import script_path
|
||||
|
||||
|
||||
# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
|
||||
gradio_hf_hub_themes = [
|
||||
"gradio/base",
|
||||
"gradio/glass",
|
||||
"gradio/monochrome",
|
||||
"gradio/seafoam",
|
||||
"gradio/soft",
|
||||
"gradio/dracula_test",
|
||||
"abidlabs/dracula_test",
|
||||
"abidlabs/Lime",
|
||||
"abidlabs/pakistan",
|
||||
"Ama434/neutral-barlow",
|
||||
"dawood/microsoft_windows",
|
||||
"finlaymacklon/smooth_slate",
|
||||
"Franklisi/darkmode",
|
||||
"freddyaboulton/dracula_revamped",
|
||||
"freddyaboulton/test-blue",
|
||||
"gstaff/xkcd",
|
||||
"Insuz/Mocha",
|
||||
"Insuz/SimpleIndigo",
|
||||
"JohnSmith9982/small_and_pretty",
|
||||
"nota-ai/theme",
|
||||
"nuttea/Softblue",
|
||||
"ParityError/Anime",
|
||||
"reilnuud/polite",
|
||||
"remilia/Ghostly",
|
||||
"rottenlittlecreature/Moon_Goblin",
|
||||
"step-3-profit/Midnight-Deep",
|
||||
"Taithrah/Minimal",
|
||||
"ysharma/huggingface",
|
||||
"ysharma/steampunk"
|
||||
]
|
||||
|
||||
|
||||
def reload_gradio_theme(theme_name=None):
|
||||
if not theme_name:
|
||||
theme_name = shared.opts.gradio_theme
|
||||
|
||||
default_theme_args = dict(
|
||||
font=["Source Sans Pro", 'ui-sans-serif', 'system-ui', 'sans-serif'],
|
||||
font_mono=['IBM Plex Mono', 'ui-monospace', 'Consolas', 'monospace'],
|
||||
)
|
||||
|
||||
if theme_name == "Default":
|
||||
shared.gradio_theme = gr.themes.Default(**default_theme_args)
|
||||
else:
|
||||
try:
|
||||
theme_cache_dir = os.path.join(script_path, 'tmp', 'gradio_themes')
|
||||
theme_cache_path = os.path.join(theme_cache_dir, f'{theme_name.replace("/", "_")}.json')
|
||||
if shared.opts.gradio_themes_cache and os.path.exists(theme_cache_path):
|
||||
shared.gradio_theme = gr.themes.ThemeClass.load(theme_cache_path)
|
||||
else:
|
||||
os.makedirs(theme_cache_dir, exist_ok=True)
|
||||
shared.gradio_theme = gr.themes.ThemeClass.from_hub(theme_name)
|
||||
shared.gradio_theme.dump(theme_cache_path)
|
||||
except Exception as e:
|
||||
errors.display(e, "changing gradio theme")
|
||||
shared.gradio_theme = gr.themes.Default(**default_theme_args)
|
|
@ -0,0 +1,49 @@
|
|||
import os
|
||||
|
||||
import torch
|
||||
|
||||
from modules import shared
|
||||
from modules.shared import cmd_opts
|
||||
|
||||
|
||||
def initialize():
|
||||
"""Initializes fields inside the shared module in a controlled manner.
|
||||
|
||||
Should be called early because some other modules you can import mingt need these fields to be already set.
|
||||
"""
|
||||
|
||||
os.makedirs(cmd_opts.hypernetwork_dir, exist_ok=True)
|
||||
|
||||
from modules import options, shared_options
|
||||
shared.options_templates = shared_options.options_templates
|
||||
shared.opts = options.Options(shared_options.options_templates, shared_options.restricted_opts)
|
||||
shared.restricted_opts = shared_options.restricted_opts
|
||||
if os.path.exists(shared.config_filename):
|
||||
shared.opts.load(shared.config_filename)
|
||||
|
||||
from modules import devices
|
||||
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
|
||||
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
|
||||
|
||||
devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
|
||||
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
|
||||
|
||||
shared.device = devices.device
|
||||
shared.weight_load_location = None if cmd_opts.lowram else "cpu"
|
||||
|
||||
from modules import shared_state
|
||||
shared.state = shared_state.State()
|
||||
|
||||
from modules import styles
|
||||
shared.prompt_styles = styles.StyleDatabase(shared.styles_filename)
|
||||
|
||||
from modules import interrogate
|
||||
shared.interrogator = interrogate.InterrogateModels("interrogate")
|
||||
|
||||
from modules import shared_total_tqdm
|
||||
shared.total_tqdm = shared_total_tqdm.TotalTQDM()
|
||||
|
||||
from modules import memmon, devices
|
||||
shared.mem_mon = memmon.MemUsageMonitor("MemMon", devices.device, shared.opts)
|
||||
shared.mem_mon.start()
|
||||
|
|
@ -1,3 +1,6 @@
|
|||
import sys
|
||||
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
|
||||
def realesrgan_models_names():
|
||||
|
@ -41,6 +44,28 @@ def refresh_unet_list():
|
|||
modules.sd_unet.list_unets()
|
||||
|
||||
|
||||
def list_checkpoint_tiles():
|
||||
import modules.sd_models
|
||||
return modules.sd_models.checkpoint_tiles()
|
||||
|
||||
|
||||
def refresh_checkpoints():
|
||||
import modules.sd_models
|
||||
return modules.sd_models.list_models()
|
||||
|
||||
|
||||
def list_samplers():
|
||||
import modules.sd_samplers
|
||||
return modules.sd_samplers.all_samplers
|
||||
|
||||
|
||||
def reload_hypernetworks():
|
||||
from modules.hypernetworks import hypernetwork
|
||||
from modules import shared
|
||||
|
||||
shared.hypernetworks = hypernetwork.list_hypernetworks(cmd_opts.hypernetwork_dir)
|
||||
|
||||
|
||||
ui_reorder_categories_builtin_items = [
|
||||
"inpaint",
|
||||
"sampler",
|
||||
|
@ -67,3 +92,27 @@ def ui_reorder_categories():
|
|||
yield from sections
|
||||
|
||||
yield "scripts"
|
||||
|
||||
|
||||
class Shared(sys.modules[__name__].__class__):
|
||||
"""
|
||||
this class is here to provide sd_model field as a property, so that it can be created and loaded on demand rather than
|
||||
at program startup.
|
||||
"""
|
||||
|
||||
sd_model_val = None
|
||||
|
||||
@property
|
||||
def sd_model(self):
|
||||
import modules.sd_models
|
||||
|
||||
return modules.sd_models.model_data.get_sd_model()
|
||||
|
||||
@sd_model.setter
|
||||
def sd_model(self, value):
|
||||
import modules.sd_models
|
||||
|
||||
modules.sd_models.model_data.set_sd_model(value)
|
||||
|
||||
|
||||
sys.modules['modules.shared'].__class__ = Shared
|
||||
|
|
|
@ -0,0 +1,318 @@
|
|||
import gradio as gr
|
||||
|
||||
from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes
|
||||
from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir # noqa: F401
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
from modules.options import options_section, OptionInfo, OptionHTML
|
||||
|
||||
options_templates = {}
|
||||
hide_dirs = shared.hide_dirs
|
||||
|
||||
restricted_opts = {
|
||||
"samples_filename_pattern",
|
||||
"directories_filename_pattern",
|
||||
"outdir_samples",
|
||||
"outdir_txt2img_samples",
|
||||
"outdir_img2img_samples",
|
||||
"outdir_extras_samples",
|
||||
"outdir_grids",
|
||||
"outdir_txt2img_grids",
|
||||
"outdir_save",
|
||||
"outdir_init_images"
|
||||
}
|
||||
|
||||
options_templates.update(options_section(('saving-images', "Saving images/grids"), {
|
||||
"samples_save": OptionInfo(True, "Always save all generated images"),
|
||||
"samples_format": OptionInfo('png', 'File format for images'),
|
||||
"samples_filename_pattern": OptionInfo("", "Images filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
|
||||
"save_images_add_number": OptionInfo(True, "Add number to filename when saving", component_args=hide_dirs),
|
||||
|
||||
"grid_save": OptionInfo(True, "Always save all generated image grids"),
|
||||
"grid_format": OptionInfo('png', 'File format for grids'),
|
||||
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
|
||||
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
|
||||
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
|
||||
"grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
|
||||
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
|
||||
"font": OptionInfo("", "Font for image grids that have text"),
|
||||
"grid_text_active_color": OptionInfo("#000000", "Text color for image grids", ui_components.FormColorPicker, {}),
|
||||
"grid_text_inactive_color": OptionInfo("#999999", "Inactive text color for image grids", ui_components.FormColorPicker, {}),
|
||||
"grid_background_color": OptionInfo("#ffffff", "Background color for image grids", ui_components.FormColorPicker, {}),
|
||||
|
||||
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
|
||||
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
|
||||
"save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."),
|
||||
"save_images_before_highres_fix": OptionInfo(False, "Save a copy of image before applying highres fix."),
|
||||
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
|
||||
"save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"),
|
||||
"save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"),
|
||||
"jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}),
|
||||
"webp_lossless": OptionInfo(False, "Use lossless compression for webp images"),
|
||||
"export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"),
|
||||
"img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number),
|
||||
"target_side_length": OptionInfo(4000, "Width/height limit for the above option, in pixels", gr.Number),
|
||||
"img_max_size_mp": OptionInfo(200, "Maximum image size", gr.Number).info("in megapixels"),
|
||||
|
||||
"use_original_name_batch": OptionInfo(True, "Use original name for output filename during batch process in extras tab"),
|
||||
"use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"),
|
||||
"save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"),
|
||||
"save_init_img": OptionInfo(False, "Save init images when using img2img"),
|
||||
|
||||
"temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"),
|
||||
"clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"),
|
||||
|
||||
"save_incomplete_images": OptionInfo(False, "Save incomplete images").info("save images that has been interrupted in mid-generation; even if not saved, they will still show up in webui output."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('saving-paths', "Paths for saving"), {
|
||||
"outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs),
|
||||
"outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs),
|
||||
"outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs),
|
||||
"outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs),
|
||||
"outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs),
|
||||
"outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs),
|
||||
"outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs),
|
||||
"outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs),
|
||||
"outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
|
||||
"save_to_dirs": OptionInfo(True, "Save images to a subdirectory"),
|
||||
"grid_save_to_dirs": OptionInfo(True, "Save grids to a subdirectory"),
|
||||
"use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
|
||||
"directories_filename_pattern": OptionInfo("[date]", "Directory name pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
|
||||
"directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1, **hide_dirs}),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('upscaling', "Upscaling"), {
|
||||
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
|
||||
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap for ESRGAN upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
|
||||
"realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI.", gr.CheckboxGroup, lambda: {"choices": shared_items.realesrgan_models_names()}),
|
||||
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('face-restoration', "Face restoration"), {
|
||||
"face_restoration": OptionInfo(False, "Restore faces", infotext='Face restoration').info("will use a third-party model on generation result to reconstruct faces"),
|
||||
"face_restoration_model": OptionInfo("CodeFormer", "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in shared.face_restorers]}),
|
||||
"code_former_weight": OptionInfo(0.5, "CodeFormer weight", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}).info("0 = maximum effect; 1 = minimum effect"),
|
||||
"face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('system', "System"), {
|
||||
"auto_launch_browser": OptionInfo("Local", "Automatically open webui in browser on startup", gr.Radio, lambda: {"choices": ["Disable", "Local", "Remote"]}),
|
||||
"show_warnings": OptionInfo(False, "Show warnings in console.").needs_reload_ui(),
|
||||
"show_gradio_deprecation_warnings": OptionInfo(True, "Show gradio deprecation warnings in console.").needs_reload_ui(),
|
||||
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}).info("0 = disable"),
|
||||
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
|
||||
"multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
|
||||
"print_hypernet_extra": OptionInfo(False, "Print extra hypernetwork information to console."),
|
||||
"list_hidden_files": OptionInfo(True, "Load models/files in hidden directories").info("directory is hidden if its name starts with \".\""),
|
||||
"disable_mmap_load_safetensors": OptionInfo(False, "Disable memmapping for loading .safetensors files.").info("fixes very slow loading speed in some cases"),
|
||||
"hide_ldm_prints": OptionInfo(True, "Prevent Stability-AI's ldm/sgm modules from printing noise to console."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('training', "Training"), {
|
||||
"unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
|
||||
"pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."),
|
||||
"save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."),
|
||||
"save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."),
|
||||
"dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
|
||||
"dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
|
||||
"training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),
|
||||
"training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"),
|
||||
"training_xattention_optimizations": OptionInfo(False, "Use cross attention optimizations while training"),
|
||||
"training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."),
|
||||
"training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."),
|
||||
"training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('sd', "Stable Diffusion"), {
|
||||
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": shared_items.list_checkpoint_tiles()}, refresh=shared_items.refresh_checkpoints, infotext='Model hash'),
|
||||
"sd_checkpoints_limit": OptionInfo(1, "Maximum number of checkpoints loaded at the same time", gr.Slider, {"minimum": 1, "maximum": 10, "step": 1}),
|
||||
"sd_checkpoints_keep_in_cpu": OptionInfo(True, "Only keep one model on device").info("will keep models other than the currently used one in RAM rather than VRAM"),
|
||||
"sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"),
|
||||
"sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
|
||||
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(),
|
||||
"enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"),
|
||||
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
|
||||
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
|
||||
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"),
|
||||
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
|
||||
"randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU", "NV"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors; use NV to produce same picture as on NVidia videocards"),
|
||||
"tiling": OptionInfo(False, "Tiling", infotext='Tiling').info("produce a tileable picture"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('sdxl', "Stable Diffusion XL"), {
|
||||
"sdxl_crop_top": OptionInfo(0, "crop top coordinate"),
|
||||
"sdxl_crop_left": OptionInfo(0, "crop left coordinate"),
|
||||
"sdxl_refiner_low_aesthetic_score": OptionInfo(2.5, "SDXL low aesthetic score", gr.Number).info("used for refiner model negative prompt"),
|
||||
"sdxl_refiner_high_aesthetic_score": OptionInfo(6.0, "SDXL high aesthetic score", gr.Number).info("used for refiner model prompt"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('vae', "VAE"), {
|
||||
"sd_vae_explanation": OptionHTML("""
|
||||
<abbr title='Variational autoencoder'>VAE</abbr> is a neural network that transforms a standard <abbr title='red/green/blue'>RGB</abbr>
|
||||
image into latent space representation and back. Latent space representation is what stable diffusion is working on during sampling
|
||||
(i.e. when the progress bar is between empty and full). For txt2img, VAE is used to create a resulting image after the sampling is finished.
|
||||
For img2img, VAE is used to process user's input image before the sampling, and to create an image after sampling.
|
||||
"""),
|
||||
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
|
||||
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list, infotext='VAE').info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
|
||||
"sd_vae_overrides_per_model_preferences": OptionInfo(True, "Selected VAE overrides per-model preferences").info("you can set per-model VAE either by editing user metadata for checkpoints, or by making the VAE have same name as checkpoint"),
|
||||
"auto_vae_precision": OptionInfo(True, "Automatically revert VAE to 32-bit floats").info("triggers when a tensor with NaNs is produced in VAE; disabling the option in this case will result in a black square image"),
|
||||
"sd_vae_encode_method": OptionInfo("Full", "VAE type for encode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Encoder').info("method to encode image to latent (use in img2img, hires-fix or inpaint mask)"),
|
||||
"sd_vae_decode_method": OptionInfo("Full", "VAE type for decode", gr.Radio, {"choices": ["Full", "TAESD"]}, infotext='VAE Decoder').info("method to decode latent to image"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('img2img', "img2img"), {
|
||||
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Conditional mask weight'),
|
||||
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}, infotext='Noise multiplier'),
|
||||
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
|
||||
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies.").info("normally you'd do less with less denoising"),
|
||||
"img2img_background_color": OptionInfo("#ffffff", "With img2img, fill transparent parts of the input image with this color.", ui_components.FormColorPicker, {}),
|
||||
"img2img_editor_height": OptionInfo(720, "Height of the image editor", gr.Slider, {"minimum": 80, "maximum": 1600, "step": 1}).info("in pixels").needs_reload_ui(),
|
||||
"img2img_sketch_default_brush_color": OptionInfo("#ffffff", "Sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img sketch").needs_reload_ui(),
|
||||
"img2img_inpaint_mask_brush_color": OptionInfo("#ffffff", "Inpaint mask brush color", ui_components.FormColorPicker, {}).info("brush color of inpaint mask").needs_reload_ui(),
|
||||
"img2img_inpaint_sketch_default_brush_color": OptionInfo("#ffffff", "Inpaint sketch initial brush color", ui_components.FormColorPicker, {}).info("default brush color of img2img inpaint sketch").needs_reload_ui(),
|
||||
"return_mask": OptionInfo(False, "For inpainting, include the greyscale mask in results for web"),
|
||||
"return_mask_composite": OptionInfo(False, "For inpainting, include masked composite in results for web"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('optimizations', "Optimizations"), {
|
||||
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
|
||||
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
|
||||
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
|
||||
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
|
||||
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
|
||||
"pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
|
||||
"persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("Do not recalculate conds from prompts if prompts have not changed since previous calculation"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('compatibility', "Compatibility"), {
|
||||
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
|
||||
"use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."),
|
||||
"no_dpmpp_sde_batch_determinism": OptionInfo(False, "Do not make DPM++ SDE deterministic across different batch sizes."),
|
||||
"use_old_hires_fix_width_height": OptionInfo(False, "For hires fix, use width/height sliders to set final resolution rather than first pass (disables Upscale by, Resize width/height to)."),
|
||||
"dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."),
|
||||
"hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('interrogate', "Interrogate"), {
|
||||
"interrogate_keep_models_in_memory": OptionInfo(False, "Keep models in VRAM"),
|
||||
"interrogate_return_ranks": OptionInfo(False, "Include ranks of model tags matches in results.").info("booru only"),
|
||||
"interrogate_clip_num_beams": OptionInfo(1, "BLIP: num_beams", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}),
|
||||
"interrogate_clip_min_length": OptionInfo(24, "BLIP: minimum description length", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}),
|
||||
"interrogate_clip_max_length": OptionInfo(48, "BLIP: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}),
|
||||
"interrogate_clip_dict_limit": OptionInfo(1500, "CLIP: maximum number of lines in text file").info("0 = No limit"),
|
||||
"interrogate_clip_skip_categories": OptionInfo([], "CLIP: skip inquire categories", gr.CheckboxGroup, lambda: {"choices": interrogate.category_types()}, refresh=interrogate.category_types),
|
||||
"interrogate_deepbooru_score_threshold": OptionInfo(0.5, "deepbooru: score threshold", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}),
|
||||
"deepbooru_sort_alpha": OptionInfo(True, "deepbooru: sort tags alphabetically").info("if not: sort by score"),
|
||||
"deepbooru_use_spaces": OptionInfo(True, "deepbooru: use spaces in tags").info("if not: use underscores"),
|
||||
"deepbooru_escape": OptionInfo(True, "deepbooru: escape (\\) brackets").info("so they are used as literal brackets and not for emphasis"),
|
||||
"deepbooru_filter_tags": OptionInfo("", "deepbooru: filter out those tags").info("separate by comma"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('extra_networks', "Extra Networks"), {
|
||||
"extra_networks_show_hidden_directories": OptionInfo(True, "Show hidden directories").info("directory is hidden if its name starts with \".\"."),
|
||||
"extra_networks_hidden_models": OptionInfo("When searched", "Show cards for models in hidden directories", gr.Radio, {"choices": ["Always", "When searched", "Never"]}).info('"When searched" option will only show the item when the search string has 4 characters or more'),
|
||||
"extra_networks_default_multiplier": OptionInfo(1.0, "Default multiplier for extra networks", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}),
|
||||
"extra_networks_card_width": OptionInfo(0, "Card width for Extra Networks").info("in pixels"),
|
||||
"extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"),
|
||||
"extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"),
|
||||
"extra_networks_card_show_desc": OptionInfo(True, "Show description on card"),
|
||||
"extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"),
|
||||
"ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(),
|
||||
"textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"),
|
||||
"textual_inversion_add_hashes_to_infotext": OptionInfo(True, "Add Textual Inversion hashes to infotext"),
|
||||
"sd_hypernetwork": OptionInfo("None", "Add hypernetwork to prompt", gr.Dropdown, lambda: {"choices": ["None", *shared.hypernetworks]}, refresh=shared_items.reload_hypernetworks),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('ui', "User interface"), {
|
||||
"localization": OptionInfo("None", "Localization", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)).needs_reload_ui(),
|
||||
"gradio_theme": OptionInfo("Default", "Gradio theme", ui_components.DropdownEditable, lambda: {"choices": ["Default"] + shared_gradio_themes.gradio_hf_hub_themes}).info("you can also manually enter any of themes from the <a href='https://huggingface.co/spaces/gradio/theme-gallery'>gallery</a>.").needs_reload_ui(),
|
||||
"gradio_themes_cache": OptionInfo(True, "Cache gradio themes locally").info("disable to update the selected Gradio theme"),
|
||||
"return_grid": OptionInfo(True, "Show grid in results for web"),
|
||||
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
|
||||
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
|
||||
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
|
||||
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
|
||||
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
|
||||
"js_modal_lightbox_gamepad": OptionInfo(False, "Navigate image viewer with gamepad"),
|
||||
"js_modal_lightbox_gamepad_repeat": OptionInfo(250, "Gamepad repeat period, in milliseconds"),
|
||||
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
|
||||
"samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group").needs_reload_ui(),
|
||||
"dimensions_and_batch_together": OptionInfo(True, "Show Width/Height and Batch sliders in same row").needs_reload_ui(),
|
||||
"keyedit_precision_attention": OptionInfo(0.1, "Ctrl+up/down precision when editing (attention:1.1)", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
|
||||
"keyedit_precision_extra": OptionInfo(0.05, "Ctrl+up/down precision when editing <extra networks:0.9>", gr.Slider, {"minimum": 0.01, "maximum": 0.2, "step": 0.001}),
|
||||
"keyedit_delimiters": OptionInfo(".,\\/!?%^*;:{}=`~()", "Ctrl+up/down word delimiters"),
|
||||
"keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"),
|
||||
"quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_reload_ui(),
|
||||
"ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
|
||||
"hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(shared.tab_names)}).needs_reload_ui(),
|
||||
"ui_reorder_list": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_reload_ui(),
|
||||
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires checkpoint and sampler selection").needs_reload_ui(),
|
||||
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_reload_ui(),
|
||||
"disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(),
|
||||
}))
|
||||
|
||||
|
||||
options_templates.update(options_section(('infotext', "Infotext"), {
|
||||
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
|
||||
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
|
||||
"add_user_name_to_info": OptionInfo(False, "Add user name to generation information when authenticated"),
|
||||
"add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
|
||||
"disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
|
||||
"infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""<ul style='margin-left: 1.5em'>
|
||||
<li>Ignore: keep prompt and styles dropdown as it is.</li>
|
||||
<li>Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).</li>
|
||||
<li>Discard: remove style text from prompt, keep styles dropdown as it is.</li>
|
||||
<li>Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.</li>
|
||||
</ul>"""),
|
||||
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('ui', "Live previews"), {
|
||||
"show_progressbar": OptionInfo(True, "Show progressbar"),
|
||||
"live_previews_enable": OptionInfo(True, "Show live previews of the created image"),
|
||||
"live_previews_image_format": OptionInfo("png", "Live preview file format", gr.Radio, {"choices": ["jpeg", "png", "webp"]}),
|
||||
"show_progress_grid": OptionInfo(True, "Show previews of all images generated in a batch as a grid"),
|
||||
"show_progress_every_n_steps": OptionInfo(10, "Live preview display period", gr.Slider, {"minimum": -1, "maximum": 32, "step": 1}).info("in sampling steps - show new live preview image every N sampling steps; -1 = only show after completion of batch"),
|
||||
"show_progress_type": OptionInfo("Approx NN", "Live preview method", gr.Radio, {"choices": ["Full", "Approx NN", "Approx cheap", "TAESD"]}).info("Full = slow but pretty; Approx NN and TAESD = fast but low quality; Approx cheap = super fast but terrible otherwise"),
|
||||
"live_preview_content": OptionInfo("Prompt", "Live preview subject", gr.Radio, {"choices": ["Combined", "Prompt", "Negative prompt"]}),
|
||||
"live_preview_refresh_period": OptionInfo(1000, "Progressbar and preview update period").info("in milliseconds"),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
|
||||
"hide_samplers": OptionInfo([], "Hide samplers in user interface", gr.CheckboxGroup, lambda: {"choices": [x.name for x in shared_items.list_samplers()]}).needs_reload_ui(),
|
||||
"eta_ddim": OptionInfo(0.0, "Eta for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta DDIM').info("noise multiplier; higher = more unperdictable results"),
|
||||
"eta_ancestral": OptionInfo(1.0, "Eta for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext='Eta').info("noise multiplier; applies to Euler a and other samplers that have a in them"),
|
||||
"ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
|
||||
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 100.0, "step": 0.01}, infotext='Sigma churn').info('amount of stochasticity; only applies to Euler, Heun, and DPM2'),
|
||||
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 10.0, "step": 0.01}, infotext='Sigma tmin').info('enable stochasticity; start value of the sigma range; only applies to Euler, Heun, and DPM2'),
|
||||
's_tmax': OptionInfo(0.0, "sigma tmax", gr.Slider, {"minimum": 0.0, "maximum": 999.0, "step": 0.01}, infotext='Sigma tmax').info("0 = inf; end value of the sigma range; only applies to Euler, Heun, and DPM2"),
|
||||
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.1, "step": 0.001}, infotext='Sigma noise').info('amount of additional noise to counteract loss of detail during sampling; only applies to Euler, Heun, and DPM2'),
|
||||
'k_sched_type': OptionInfo("Automatic", "Scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}, infotext='Schedule type').info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
|
||||
'sigma_min': OptionInfo(0.0, "sigma min", gr.Number, infotext='Schedule max sigma').info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
|
||||
'sigma_max': OptionInfo(0.0, "sigma max", gr.Number, infotext='Schedule min sigma').info("0 = default (~14.6); maximum noise strength for k-diffusion noise scheduler"),
|
||||
'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"),
|
||||
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
|
||||
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
|
||||
'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'),
|
||||
'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
|
||||
'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
|
||||
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('postprocessing', "Postprocessing"), {
|
||||
'postprocessing_enable_in_main_ui': OptionInfo([], "Enable postprocessing operations in txt2img and img2img tabs", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
|
||||
'postprocessing_operation_order': OptionInfo([], "Postprocessing operation order", ui_components.DropdownMulti, lambda: {"choices": [x.name for x in shared_items.postprocessing_scripts()]}),
|
||||
'upscaling_max_images_in_cache': OptionInfo(5, "Maximum number of images in upscaling cache", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section((None, "Hidden options"), {
|
||||
"disabled_extensions": OptionInfo([], "Disable these extensions"),
|
||||
"disable_all_extensions": OptionInfo("none", "Disable all extensions (preserves the list of disabled extensions)", gr.Radio, {"choices": ["none", "extra", "all"]}),
|
||||
"restore_config_state_file": OptionInfo("", "Config state file to restore from, under 'config-states/' folder"),
|
||||
"sd_checkpoint_hash": OptionInfo("", "SHA256 hash of the current checkpoint"),
|
||||
}))
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
import datetime
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
|
||||
from modules import errors, shared, devices
|
||||
from typing import Optional
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class State:
|
||||
skipped = False
|
||||
interrupted = False
|
||||
job = ""
|
||||
job_no = 0
|
||||
job_count = 0
|
||||
processing_has_refined_job_count = False
|
||||
job_timestamp = '0'
|
||||
sampling_step = 0
|
||||
sampling_steps = 0
|
||||
current_latent = None
|
||||
current_image = None
|
||||
current_image_sampling_step = 0
|
||||
id_live_preview = 0
|
||||
textinfo = None
|
||||
time_start = None
|
||||
server_start = None
|
||||
_server_command_signal = threading.Event()
|
||||
_server_command: Optional[str] = None
|
||||
|
||||
def __init__(self):
|
||||
self.server_start = time.time()
|
||||
|
||||
@property
|
||||
def need_restart(self) -> bool:
|
||||
# Compatibility getter for need_restart.
|
||||
return self.server_command == "restart"
|
||||
|
||||
@need_restart.setter
|
||||
def need_restart(self, value: bool) -> None:
|
||||
# Compatibility setter for need_restart.
|
||||
if value:
|
||||
self.server_command = "restart"
|
||||
|
||||
@property
|
||||
def server_command(self):
|
||||
return self._server_command
|
||||
|
||||
@server_command.setter
|
||||
def server_command(self, value: Optional[str]) -> None:
|
||||
"""
|
||||
Set the server command to `value` and signal that it's been set.
|
||||
"""
|
||||
self._server_command = value
|
||||
self._server_command_signal.set()
|
||||
|
||||
def wait_for_server_command(self, timeout: Optional[float] = None) -> Optional[str]:
|
||||
"""
|
||||
Wait for server command to get set; return and clear the value and signal.
|
||||
"""
|
||||
if self._server_command_signal.wait(timeout):
|
||||
self._server_command_signal.clear()
|
||||
req = self._server_command
|
||||
self._server_command = None
|
||||
return req
|
||||
return None
|
||||
|
||||
def request_restart(self) -> None:
|
||||
self.interrupt()
|
||||
self.server_command = "restart"
|
||||
log.info("Received restart request")
|
||||
|
||||
def skip(self):
|
||||
self.skipped = True
|
||||
log.info("Received skip request")
|
||||
|
||||
def interrupt(self):
|
||||
self.interrupted = True
|
||||
log.info("Received interrupt request")
|
||||
|
||||
def nextjob(self):
|
||||
if shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps == -1:
|
||||
self.do_set_current_image()
|
||||
|
||||
self.job_no += 1
|
||||
self.sampling_step = 0
|
||||
self.current_image_sampling_step = 0
|
||||
|
||||
def dict(self):
|
||||
obj = {
|
||||
"skipped": self.skipped,
|
||||
"interrupted": self.interrupted,
|
||||
"job": self.job,
|
||||
"job_count": self.job_count,
|
||||
"job_timestamp": self.job_timestamp,
|
||||
"job_no": self.job_no,
|
||||
"sampling_step": self.sampling_step,
|
||||
"sampling_steps": self.sampling_steps,
|
||||
}
|
||||
|
||||
return obj
|
||||
|
||||
def begin(self, job: str = "(unknown)"):
|
||||
self.sampling_step = 0
|
||||
self.job_count = -1
|
||||
self.processing_has_refined_job_count = False
|
||||
self.job_no = 0
|
||||
self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
self.current_latent = None
|
||||
self.current_image = None
|
||||
self.current_image_sampling_step = 0
|
||||
self.id_live_preview = 0
|
||||
self.skipped = False
|
||||
self.interrupted = False
|
||||
self.textinfo = None
|
||||
self.time_start = time.time()
|
||||
self.job = job
|
||||
devices.torch_gc()
|
||||
log.info("Starting job %s", job)
|
||||
|
||||
def end(self):
|
||||
duration = time.time() - self.time_start
|
||||
log.info("Ending job %s (%.2f seconds)", self.job, duration)
|
||||
self.job = ""
|
||||
self.job_count = 0
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
def set_current_image(self):
|
||||
"""sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this"""
|
||||
if not shared.parallel_processing_allowed:
|
||||
return
|
||||
|
||||
if self.sampling_step - self.current_image_sampling_step >= shared.opts.show_progress_every_n_steps and shared.opts.live_previews_enable and shared.opts.show_progress_every_n_steps != -1:
|
||||
self.do_set_current_image()
|
||||
|
||||
def do_set_current_image(self):
|
||||
if self.current_latent is None:
|
||||
return
|
||||
|
||||
import modules.sd_samplers
|
||||
|
||||
try:
|
||||
if shared.opts.show_progress_grid:
|
||||
self.assign_current_image(modules.sd_samplers.samples_to_image_grid(self.current_latent))
|
||||
else:
|
||||
self.assign_current_image(modules.sd_samplers.sample_to_image(self.current_latent))
|
||||
|
||||
self.current_image_sampling_step = self.sampling_step
|
||||
|
||||
except Exception:
|
||||
# when switching models during genration, VAE would be on CPU, so creating an image will fail.
|
||||
# we silently ignore this error
|
||||
errors.record_exception()
|
||||
|
||||
def assign_current_image(self, image):
|
||||
self.current_image = image
|
||||
self.id_live_preview += 1
|
|
@ -0,0 +1,37 @@
|
|||
import tqdm
|
||||
|
||||
from modules import shared
|
||||
|
||||
|
||||
class TotalTQDM:
|
||||
def __init__(self):
|
||||
self._tqdm = None
|
||||
|
||||
def reset(self):
|
||||
self._tqdm = tqdm.tqdm(
|
||||
desc="Total progress",
|
||||
total=shared.state.job_count * shared.state.sampling_steps,
|
||||
position=1,
|
||||
file=shared.progress_print_out
|
||||
)
|
||||
|
||||
def update(self):
|
||||
if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
|
||||
return
|
||||
if self._tqdm is None:
|
||||
self.reset()
|
||||
self._tqdm.update()
|
||||
|
||||
def updateTotal(self, new_total):
|
||||
if not shared.opts.multiple_tqdm or shared.cmd_opts.disable_console_progressbars:
|
||||
return
|
||||
if self._tqdm is None:
|
||||
self.reset()
|
||||
self._tqdm.total = new_total
|
||||
|
||||
def clear(self):
|
||||
if self._tqdm is not None:
|
||||
self._tqdm.refresh()
|
||||
self._tqdm.close()
|
||||
self._tqdm = None
|
||||
|
|
@ -10,7 +10,7 @@ import psutil
|
|||
import re
|
||||
|
||||
import launch
|
||||
from modules import paths_internal, timer
|
||||
from modules import paths_internal, timer, shared, extensions, errors
|
||||
|
||||
checksum_token = "DontStealMyGamePlz__WINNERS_DONT_USE_DRUGS__DONT_COPY_THAT_FLOPPY"
|
||||
environment_whitelist = {
|
||||
|
@ -115,8 +115,6 @@ def format_exception(e, tb):
|
|||
|
||||
def get_exceptions():
|
||||
try:
|
||||
from modules import errors
|
||||
|
||||
return list(reversed(errors.exception_records))
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
@ -142,8 +140,6 @@ def get_torch_sysinfo():
|
|||
def get_extensions(*, enabled):
|
||||
|
||||
try:
|
||||
from modules import extensions
|
||||
|
||||
def to_json(x: extensions.Extension):
|
||||
return {
|
||||
"name": x.name,
|
||||
|
@ -160,7 +156,6 @@ def get_extensions(*, enabled):
|
|||
|
||||
def get_config():
|
||||
try:
|
||||
from modules import shared
|
||||
return shared.opts.data
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
|
|
@ -9,7 +9,7 @@ from modules.ui import plaintext_to_html
|
|||
import gradio as gr
|
||||
|
||||
|
||||
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
|
||||
def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, request: gr.Request, *args):
|
||||
override_settings = create_override_settings_dict(override_settings_texts)
|
||||
|
||||
p = processing.StableDiffusionProcessingTxt2Img(
|
||||
|
@ -32,8 +32,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
|
|||
cfg_scale=cfg_scale,
|
||||
width=width,
|
||||
height=height,
|
||||
restore_faces=restore_faces,
|
||||
tiling=tiling,
|
||||
enable_hr=enable_hr,
|
||||
denoising_strength=denoising_strength if enable_hr else None,
|
||||
hr_scale=hr_scale,
|
||||
|
@ -42,7 +40,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
|
|||
hr_resize_x=hr_resize_x,
|
||||
hr_resize_y=hr_resize_y,
|
||||
hr_checkpoint_name=None if hr_checkpoint_name == 'Use same checkpoint' else hr_checkpoint_name,
|
||||
hr_sampler_name=hr_sampler_name,
|
||||
hr_sampler_name=None if hr_sampler_name == 'Use same sampler' else hr_sampler_name,
|
||||
hr_prompt=hr_prompt,
|
||||
hr_negative_prompt=hr_negative_prompt,
|
||||
override_settings=override_settings,
|
||||
|
|
|
@ -13,8 +13,8 @@ from PIL import Image, PngImagePlugin # noqa: F401
|
|||
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
|
||||
|
||||
from modules import gradio_extensons # noqa: F401
|
||||
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers
|
||||
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
|
||||
from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings, timer, sysinfo, ui_checkpoint_merger, ui_prompt_styles, scripts, sd_samplers, processing, ui_extra_networks
|
||||
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML, InputAccordion
|
||||
from modules.paths import script_path
|
||||
from modules.ui_common import create_refresh_button
|
||||
from modules.ui_gradio_extensions import reload_javascript
|
||||
|
@ -78,7 +78,6 @@ extra_networks_symbol = '\U0001F3B4' # 🎴
|
|||
switch_values_symbol = '\U000021C5' # ⇅
|
||||
restore_progress_symbol = '\U0001F300' # 🌀
|
||||
detect_image_size_symbol = '\U0001F4D0' # 📐
|
||||
up_down_symbol = '\u2195\ufe0f' # ↕️
|
||||
|
||||
|
||||
plaintext_to_html = ui_common.plaintext_to_html
|
||||
|
@ -91,17 +90,13 @@ def send_gradio_gallery_to_image(x):
|
|||
|
||||
|
||||
def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y):
|
||||
from modules import processing, devices
|
||||
|
||||
if not enable:
|
||||
return ""
|
||||
|
||||
p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y)
|
||||
p.calculate_target_resolution()
|
||||
|
||||
with devices.autocast():
|
||||
p.init([""], [0], [0])
|
||||
|
||||
return f"resize: from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
|
||||
return f"from <span class='resolution'>{p.width}x{p.height}</span> to <span class='resolution'>{p.hr_resize_x or p.hr_upscale_to_x}x{p.hr_resize_y or p.hr_upscale_to_y}</span>"
|
||||
|
||||
|
||||
def resize_from_to_html(width, height, scale_by):
|
||||
|
@ -149,7 +144,11 @@ def interrogate_deepbooru(image):
|
|||
|
||||
def create_seed_inputs(target_interface):
|
||||
with FormRow(elem_id=f"{target_interface}_seed_row", variant="compact"):
|
||||
seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=f"{target_interface}_seed")
|
||||
if cmd_opts.use_textbox_seed:
|
||||
seed = gr.Textbox(label='Seed', value="", elem_id=f"{target_interface}_seed")
|
||||
else:
|
||||
seed = gr.Number(label='Seed', value=-1, elem_id=f"{target_interface}_seed", precision=0)
|
||||
|
||||
random_seed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_seed", label='Random seed')
|
||||
reuse_seed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_seed", label='Reuse seed')
|
||||
|
||||
|
@ -160,7 +159,7 @@ def create_seed_inputs(target_interface):
|
|||
|
||||
with FormRow(visible=False, elem_id=f"{target_interface}_subseed_row") as seed_extra_row_1:
|
||||
seed_extras.append(seed_extra_row_1)
|
||||
subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed")
|
||||
subseed = gr.Number(label='Variation seed', value=-1, elem_id=f"{target_interface}_subseed", precision=0)
|
||||
random_subseed = ToolButton(random_symbol, elem_id=f"{target_interface}_random_subseed")
|
||||
reuse_subseed = ToolButton(reuse_symbol, elem_id=f"{target_interface}_reuse_subseed")
|
||||
subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=f"{target_interface}_subseed_strength")
|
||||
|
@ -437,13 +436,13 @@ def create_ui():
|
|||
|
||||
elif category == "checkboxes":
|
||||
with FormRow(elem_classes="checkboxes-row", variant="compact"):
|
||||
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
|
||||
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
|
||||
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
|
||||
hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
|
||||
pass
|
||||
|
||||
elif category == "hires_fix":
|
||||
with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options:
|
||||
with InputAccordion(False, label="Hires. fix") as enable_hr:
|
||||
with enable_hr.extra():
|
||||
hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False, min_width=0)
|
||||
|
||||
with FormRow(elem_id="txt2img_hires_fix_row1", variant="compact"):
|
||||
hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode)
|
||||
hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps")
|
||||
|
@ -520,8 +519,6 @@ def create_ui():
|
|||
toprow.ui_styles.dropdown,
|
||||
steps,
|
||||
sampler_name,
|
||||
restore_faces,
|
||||
tiling,
|
||||
batch_count,
|
||||
batch_size,
|
||||
cfg_scale,
|
||||
|
@ -571,19 +568,11 @@ def create_ui():
|
|||
show_progress=False,
|
||||
)
|
||||
|
||||
enable_hr.change(
|
||||
fn=lambda x: gr_show(x),
|
||||
inputs=[enable_hr],
|
||||
outputs=[hr_options],
|
||||
show_progress = False,
|
||||
)
|
||||
|
||||
txt2img_paste_fields = [
|
||||
(toprow.prompt, "Prompt"),
|
||||
(toprow.negative_prompt, "Negative prompt"),
|
||||
(steps, "Steps"),
|
||||
(sampler_name, "Sampler"),
|
||||
(restore_faces, "Face restoration"),
|
||||
(cfg_scale, "CFG scale"),
|
||||
(seed, "Seed"),
|
||||
(width, "Size-1"),
|
||||
|
@ -597,7 +586,6 @@ def create_ui():
|
|||
(toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
|
||||
(denoising_strength, "Denoising strength"),
|
||||
(enable_hr, lambda d: "Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d)),
|
||||
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d and ("Hires upscale" in d or "Hires upscaler" in d or "Hires resize-1" in d))),
|
||||
(hr_scale, "Hires upscale"),
|
||||
(hr_upscaler, "Hires upscaler"),
|
||||
(hr_second_pass_steps, "Hires steps"),
|
||||
|
@ -630,7 +618,6 @@ def create_ui():
|
|||
toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter])
|
||||
toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter])
|
||||
|
||||
from modules import ui_extra_networks
|
||||
extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img')
|
||||
ui_extra_networks.setup_ui(extra_networks_ui, txt2img_gallery)
|
||||
|
||||
|
@ -805,8 +792,7 @@ def create_ui():
|
|||
|
||||
elif category == "checkboxes":
|
||||
with FormRow(elem_classes="checkboxes-row", variant="compact"):
|
||||
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
|
||||
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
|
||||
pass
|
||||
|
||||
elif category == "batch":
|
||||
if not opts.dimensions_and_batch_together:
|
||||
|
@ -879,8 +865,6 @@ def create_ui():
|
|||
mask_blur,
|
||||
mask_alpha,
|
||||
inpainting_fill,
|
||||
restore_faces,
|
||||
tiling,
|
||||
batch_count,
|
||||
batch_size,
|
||||
cfg_scale,
|
||||
|
@ -972,7 +956,6 @@ def create_ui():
|
|||
(toprow.negative_prompt, "Negative prompt"),
|
||||
(steps, "Steps"),
|
||||
(sampler_name, "Sampler"),
|
||||
(restore_faces, "Face restoration"),
|
||||
(cfg_scale, "CFG scale"),
|
||||
(image_cfg_scale, "Image CFG scale"),
|
||||
(seed, "Seed"),
|
||||
|
@ -995,7 +978,6 @@ def create_ui():
|
|||
paste_button=toprow.paste, tabname="img2img", source_text_component=toprow.prompt, source_image_component=None,
|
||||
))
|
||||
|
||||
from modules import ui_extra_networks
|
||||
extra_networks_ui_img2img = ui_extra_networks.create_ui(img2img_interface, [img2img_generation_tab], 'img2img')
|
||||
ui_extra_networks.setup_ui(extra_networks_ui_img2img, img2img_gallery)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ from modules import call_queue, shared
|
|||
from modules.generation_parameters_copypaste import image_from_url_text
|
||||
import modules.images
|
||||
from modules.ui_components import ToolButton
|
||||
|
||||
import modules.generation_parameters_copypaste as parameters_copypaste
|
||||
|
||||
folder_symbol = '\U0001f4c2' # 📂
|
||||
refresh_symbol = '\U0001f504' # 🔄
|
||||
|
@ -105,8 +105,6 @@ def save_files(js_data, images, do_make_zip, index):
|
|||
|
||||
|
||||
def create_output_panel(tabname, outdir):
|
||||
from modules import shared
|
||||
import modules.generation_parameters_copypaste as parameters_copypaste
|
||||
|
||||
def open_folder(f):
|
||||
if not os.path.exists(f):
|
||||
|
|
|
@ -72,3 +72,52 @@ class DropdownEditable(FormComponent, gr.Dropdown):
|
|||
def get_block_name(self):
|
||||
return "dropdown"
|
||||
|
||||
|
||||
class InputAccordion(gr.Checkbox):
|
||||
"""A gr.Accordion that can be used as an input - returns True if open, False if closed.
|
||||
|
||||
Actaully just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox.
|
||||
"""
|
||||
|
||||
global_index = 0
|
||||
|
||||
def __init__(self, value, **kwargs):
|
||||
self.accordion_id = kwargs.get('elem_id')
|
||||
if self.accordion_id is None:
|
||||
self.accordion_id = f"input-accordion-{InputAccordion.global_index}"
|
||||
InputAccordion.global_index += 1
|
||||
|
||||
kwargs['elem_id'] = self.accordion_id + "-checkbox"
|
||||
kwargs['visible'] = False
|
||||
super().__init__(value, **kwargs)
|
||||
|
||||
self.change(fn=None, _js='function(checked){ inputAccordionChecked("' + self.accordion_id + '", checked); }', inputs=[self])
|
||||
|
||||
self.accordion = gr.Accordion(kwargs.get('label', 'Accordion'), open=value, elem_id=self.accordion_id, elem_classes=['input-accordion'])
|
||||
|
||||
def extra(self):
|
||||
"""Allows you to put something into the label of the accordion.
|
||||
|
||||
Use it like this:
|
||||
|
||||
```
|
||||
with InputAccordion(False, label="Accordion") as acc:
|
||||
with acc.extra():
|
||||
FormHTML(value="hello", min_width=0)
|
||||
|
||||
...
|
||||
```
|
||||
"""
|
||||
|
||||
return gr.Column(elem_id=self.accordion_id + '-extra', elem_classes='input-accordion-extra', min_width=0)
|
||||
|
||||
def __enter__(self):
|
||||
self.accordion.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.accordion.__exit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
def get_block_name(self):
|
||||
return "checkbox"
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@ from pathlib import Path
|
|||
|
||||
from modules import shared, ui_extra_networks_user_metadata, errors, extra_networks
|
||||
from modules.images import read_info_from_image, save_image_with_geninfo
|
||||
from modules.ui import up_down_symbol
|
||||
import gradio as gr
|
||||
import json
|
||||
import html
|
||||
|
@ -348,6 +347,8 @@ def pages_in_preferred_order(pages):
|
|||
|
||||
|
||||
def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
|
||||
from modules.ui import switch_values_symbol
|
||||
|
||||
ui = ExtraNetworksUi()
|
||||
ui.pages = []
|
||||
ui.pages_contents = []
|
||||
|
@ -373,7 +374,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname):
|
|||
|
||||
edit_search = gr.Textbox('', show_label=False, elem_id=tabname+"_extra_search", elem_classes="search", placeholder="Search...", visible=False, interactive=True)
|
||||
dropdown_sort = gr.Dropdown(choices=['Default Sort', 'Date Created', 'Date Modified', 'Name'], value='Default Sort', elem_id=tabname+"_extra_sort", elem_classes="sort", multiselect=False, visible=False, show_label=False, interactive=True, label=tabname+"_extra_sort_order")
|
||||
button_sortorder = ToolButton(up_down_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False)
|
||||
button_sortorder = ToolButton(switch_values_symbol, elem_id=tabname+"_extra_sortorder", elem_classes="sortorder", visible=False)
|
||||
button_refresh = gr.Button('Refresh', elem_id=tabname+"_extra_refresh", visible=False)
|
||||
checkbox_show_dirs = gr.Checkbox(True, label='Show dirs', elem_id=tabname+"_extra_show_dirs", elem_classes="show-dirs", visible=False)
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@ class UserMetadataEditor:
|
|||
item = self.page.items.get(name, {})
|
||||
|
||||
user_metadata = item.get('user_metadata', None)
|
||||
if user_metadata is None:
|
||||
user_metadata = {}
|
||||
if not user_metadata:
|
||||
user_metadata = {'description': item.get('description', '')}
|
||||
item['user_metadata'] = user_metadata
|
||||
|
||||
return user_metadata
|
||||
|
|
|
@ -8,7 +8,7 @@ from modules.ui_components import ToolButton
|
|||
|
||||
|
||||
class UiLoadsave:
|
||||
"""allows saving and restorig default values for gradio components"""
|
||||
"""allows saving and restoring default values for gradio components"""
|
||||
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
|
@ -48,6 +48,11 @@ class UiLoadsave:
|
|||
elif condition and not condition(saved_value):
|
||||
pass
|
||||
else:
|
||||
if isinstance(x, gr.Textbox) and field == 'value': # due to an undersirable behavior of gr.Textbox, if you give it an int value instead of str, everything dies
|
||||
saved_value = str(saved_value)
|
||||
elif isinstance(x, gr.Number) and field == 'value':
|
||||
saved_value = float(saved_value)
|
||||
|
||||
setattr(obj, field, saved_value)
|
||||
if init_field is not None:
|
||||
init_field(saved_value)
|
||||
|
|
|
@ -57,8 +57,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"):
|
|||
return file_obj.name
|
||||
|
||||
|
||||
# override save to file function so that it also writes PNG info
|
||||
gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
|
||||
def install_ui_tempdir_override():
|
||||
"""override save to file function so that it also writes PNG info"""
|
||||
gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
|
||||
|
||||
|
||||
def on_tmpdir_changed():
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from modules import shared
|
||||
from modules.paths_internal import script_path
|
||||
|
||||
|
||||
def natural_sort_key(s, regex=re.compile('([0-9]+)')):
|
||||
return [int(text) if text.isdigit() else text.lower() for text in regex.split(s)]
|
||||
|
||||
|
||||
def listfiles(dirname):
|
||||
filenames = [os.path.join(dirname, x) for x in sorted(os.listdir(dirname), key=natural_sort_key) if not x.startswith(".")]
|
||||
return [file for file in filenames if os.path.isfile(file)]
|
||||
|
||||
|
||||
def html_path(filename):
|
||||
return os.path.join(script_path, "html", filename)
|
||||
|
||||
|
||||
def html(filename):
|
||||
path = html_path(filename)
|
||||
|
||||
if os.path.exists(path):
|
||||
with open(path, encoding="utf8") as file:
|
||||
return file.read()
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def walk_files(path, allowed_extensions=None):
|
||||
if not os.path.exists(path):
|
||||
return
|
||||
|
||||
if allowed_extensions is not None:
|
||||
allowed_extensions = set(allowed_extensions)
|
||||
|
||||
items = list(os.walk(path, followlinks=True))
|
||||
items = sorted(items, key=lambda x: natural_sort_key(x[0]))
|
||||
|
||||
for root, _, files in items:
|
||||
for filename in sorted(files, key=natural_sort_key):
|
||||
if allowed_extensions is not None:
|
||||
_, ext = os.path.splitext(filename)
|
||||
if ext not in allowed_extensions:
|
||||
continue
|
||||
|
||||
if not shared.opts.list_hidden_files and ("/." in root or "\\." in root):
|
||||
continue
|
||||
|
||||
yield os.path.join(root, filename)
|
||||
|
||||
|
||||
def ldm_print(*args, **kwargs):
|
||||
if shared.opts.hide_ldm_prints:
|
||||
return
|
||||
|
||||
print(*args, **kwargs)
|
|
@ -6,6 +6,7 @@ basicsr
|
|||
blendmodes
|
||||
clean-fid
|
||||
einops
|
||||
fastapi>=0.90.1
|
||||
gfpgan
|
||||
gradio==3.39.0
|
||||
inflection
|
||||
|
|
38
style.css
38
style.css
|
@ -43,13 +43,15 @@ div.form{
|
|||
.block.gradio-radio,
|
||||
.block.gradio-checkboxgroup,
|
||||
.block.gradio-number,
|
||||
.block.gradio-colorpicker,
|
||||
div.gradio-group
|
||||
{
|
||||
.block.gradio-colorpicker {
|
||||
border-width: 0 !important;
|
||||
box-shadow: none !important;
|
||||
}
|
||||
|
||||
div.gradio-group, div.styler{
|
||||
border-width: 0 !important;
|
||||
background: none;
|
||||
}
|
||||
.gap.compact{
|
||||
padding: 0;
|
||||
gap: 0.2em 0;
|
||||
|
@ -135,12 +137,8 @@ a{
|
|||
cursor: pointer;
|
||||
}
|
||||
|
||||
div.styler{
|
||||
border: none;
|
||||
background: var(--background-fill-primary);
|
||||
}
|
||||
|
||||
.block.gradio-textbox{
|
||||
/* gradio 3.39 puts a lot of overflow: hidden all over the place for an unknown reqasaon. */
|
||||
.block.gradio-textbox, div.gradio-group, div.gradio-group div, div.gradio-dropdown{
|
||||
overflow: visible !important;
|
||||
}
|
||||
|
||||
|
@ -194,6 +192,13 @@ button.custom-button{
|
|||
text-align: center;
|
||||
}
|
||||
|
||||
div.gradio-accordion {
|
||||
border: 1px solid var(--block-border-color) !important;
|
||||
border-radius: 8px !important;
|
||||
margin: 2px 0;
|
||||
padding: 8px 8px;
|
||||
}
|
||||
|
||||
|
||||
/* txt2img/img2img specific */
|
||||
|
||||
|
@ -324,12 +329,6 @@ button.custom-button{
|
|||
border-radius: 0 0.5rem 0.5rem 0;
|
||||
}
|
||||
|
||||
#txtimg_hr_finalres{
|
||||
min-height: 0 !important;
|
||||
padding: .625rem .75rem;
|
||||
margin-left: -0.75em
|
||||
}
|
||||
|
||||
#img2img_scale_resolution_preview.block{
|
||||
display: flex;
|
||||
align-items: end;
|
||||
|
@ -1011,3 +1010,12 @@ div.block.gradio-box.popup-dialog, .popup-dialog {
|
|||
div.block.gradio-box.popup-dialog > div:last-child, .popup-dialog > div:last-child{
|
||||
margin-top: 1em;
|
||||
}
|
||||
|
||||
div.block.input-accordion{
|
||||
margin-bottom: 0.4em;
|
||||
}
|
||||
|
||||
.input-accordion-extra{
|
||||
flex: 0 0 auto !important;
|
||||
margin: 0 0.5em 0 auto;
|
||||
}
|
||||
|
|
|
@ -1,17 +1,25 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
from PIL import Image
|
||||
from gradio.processing_utils import encode_pil_to_base64
|
||||
import base64
|
||||
|
||||
|
||||
test_files_path = os.path.dirname(__file__) + "/test_files"
|
||||
|
||||
|
||||
def file_to_base64(filename):
|
||||
with open(filename, "rb") as file:
|
||||
data = file.read()
|
||||
|
||||
base64_str = str(base64.b64encode(data), "utf-8")
|
||||
return "data:image/png;base64," + base64_str
|
||||
|
||||
|
||||
@pytest.fixture(scope="session") # session so we don't read this over and over
|
||||
def img2img_basic_image_base64() -> str:
|
||||
return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "img2img_basic.png")))
|
||||
return file_to_base64(os.path.join(test_files_path, "img2img_basic.png"))
|
||||
|
||||
|
||||
@pytest.fixture(scope="session") # session so we don't read this over and over
|
||||
def mask_basic_image_base64() -> str:
|
||||
return encode_pil_to_base64(Image.open(os.path.join(test_files_path, "mask_basic.png")))
|
||||
return file_to_base64(os.path.join(test_files_path, "mask_basic.png"))
|
||||
|
|
367
webui.py
367
webui.py
|
@ -1,348 +1,41 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import importlib
|
||||
import signal
|
||||
import re
|
||||
import warnings
|
||||
import json
|
||||
from threading import Thread
|
||||
from typing import Iterable
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.gzip import GZipMiddleware
|
||||
|
||||
import logging
|
||||
|
||||
# We can't use cmd_opts for this because it will not have been initialized at this point.
|
||||
log_level = os.environ.get("SD_WEBUI_LOG_LEVEL")
|
||||
if log_level:
|
||||
log_level = getattr(logging, log_level.upper(), None) or logging.INFO
|
||||
logging.basicConfig(
|
||||
level=log_level,
|
||||
format='%(asctime)s %(levelname)s [%(name)s] %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S',
|
||||
)
|
||||
|
||||
logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh...
|
||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||
|
||||
from modules import timer
|
||||
from modules import initialize_util
|
||||
from modules import initialize
|
||||
|
||||
startup_timer = timer.startup_timer
|
||||
startup_timer.record("launcher")
|
||||
|
||||
import torch
|
||||
import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
|
||||
warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning")
|
||||
warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision")
|
||||
startup_timer.record("import torch")
|
||||
initialize.imports()
|
||||
|
||||
import gradio # noqa: F401
|
||||
startup_timer.record("import gradio")
|
||||
|
||||
from modules import paths, timer, import_hook, errors, devices # noqa: F401
|
||||
startup_timer.record("setup paths")
|
||||
|
||||
import ldm.modules.encoders.modules # noqa: F401
|
||||
startup_timer.record("import ldm")
|
||||
|
||||
|
||||
from modules import extra_networks
|
||||
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, queue_lock # noqa: F401
|
||||
|
||||
# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors
|
||||
if ".dev" in torch.__version__ or "+git" in torch.__version__:
|
||||
torch.__long_version__ = torch.__version__
|
||||
torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0)
|
||||
|
||||
from modules import shared
|
||||
|
||||
if not shared.cmd_opts.skip_version_check:
|
||||
errors.check_versions()
|
||||
|
||||
import modules.codeformer_model as codeformer
|
||||
import modules.gfpgan_model as gfpgan
|
||||
from modules import sd_samplers, upscaler, extensions, localization, ui_tempdir, ui_extra_networks, config_states
|
||||
import modules.face_restoration
|
||||
import modules.img2img
|
||||
|
||||
import modules.lowvram
|
||||
import modules.scripts
|
||||
import modules.sd_hijack
|
||||
import modules.sd_hijack_optimizations
|
||||
import modules.sd_models
|
||||
import modules.sd_vae
|
||||
import modules.sd_unet
|
||||
import modules.txt2img
|
||||
import modules.script_callbacks
|
||||
import modules.textual_inversion.textual_inversion
|
||||
import modules.progress
|
||||
|
||||
import modules.ui
|
||||
from modules import modelloader
|
||||
from modules.shared import cmd_opts
|
||||
import modules.hypernetworks.hypernetwork
|
||||
|
||||
startup_timer.record("other imports")
|
||||
|
||||
|
||||
if cmd_opts.server_name:
|
||||
server_name = cmd_opts.server_name
|
||||
else:
|
||||
server_name = "0.0.0.0" if cmd_opts.listen else None
|
||||
|
||||
|
||||
def fix_asyncio_event_loop_policy():
|
||||
"""
|
||||
The default `asyncio` event loop policy only automatically creates
|
||||
event loops in the main threads. Other threads must create event
|
||||
loops explicitly or `asyncio.get_event_loop` (and therefore
|
||||
`.IOLoop.current`) will fail. Installing this policy allows event
|
||||
loops to be created automatically on any thread, matching the
|
||||
behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2).
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
||||
if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
|
||||
# "Any thread" and "selector" should be orthogonal, but there's not a clean
|
||||
# interface for composing policies so pick the right base.
|
||||
_BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore
|
||||
else:
|
||||
_BasePolicy = asyncio.DefaultEventLoopPolicy
|
||||
|
||||
class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore
|
||||
"""Event loop policy that allows loop creation on any thread.
|
||||
Usage::
|
||||
|
||||
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
|
||||
"""
|
||||
|
||||
def get_event_loop(self) -> asyncio.AbstractEventLoop:
|
||||
try:
|
||||
return super().get_event_loop()
|
||||
except (RuntimeError, AssertionError):
|
||||
# This was an AssertionError in python 3.4.2 (which ships with debian jessie)
|
||||
# and changed to a RuntimeError in 3.4.3.
|
||||
# "There is no current event loop in thread %r"
|
||||
loop = self.new_event_loop()
|
||||
self.set_event_loop(loop)
|
||||
return loop
|
||||
|
||||
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
|
||||
|
||||
|
||||
def restore_config_state_file():
|
||||
config_state_file = shared.opts.restore_config_state_file
|
||||
if config_state_file == "":
|
||||
return
|
||||
|
||||
shared.opts.restore_config_state_file = ""
|
||||
shared.opts.save(shared.config_filename)
|
||||
|
||||
if os.path.isfile(config_state_file):
|
||||
print(f"*** About to restore extension state from file: {config_state_file}")
|
||||
with open(config_state_file, "r", encoding="utf-8") as f:
|
||||
config_state = json.load(f)
|
||||
config_states.restore_extension_config(config_state)
|
||||
startup_timer.record("restore extension config")
|
||||
elif config_state_file:
|
||||
print(f"!!! Config state backup not found: {config_state_file}")
|
||||
|
||||
|
||||
def validate_tls_options():
|
||||
if not (cmd_opts.tls_keyfile and cmd_opts.tls_certfile):
|
||||
return
|
||||
|
||||
try:
|
||||
if not os.path.exists(cmd_opts.tls_keyfile):
|
||||
print("Invalid path to TLS keyfile given")
|
||||
if not os.path.exists(cmd_opts.tls_certfile):
|
||||
print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
|
||||
except TypeError:
|
||||
cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
|
||||
print("TLS setup invalid, running webui without TLS")
|
||||
else:
|
||||
print("Running with TLS")
|
||||
startup_timer.record("TLS")
|
||||
|
||||
|
||||
def get_gradio_auth_creds() -> Iterable[tuple[str, ...]]:
|
||||
"""
|
||||
Convert the gradio_auth and gradio_auth_path commandline arguments into
|
||||
an iterable of (username, password) tuples.
|
||||
"""
|
||||
def process_credential_line(s) -> tuple[str, ...] | None:
|
||||
s = s.strip()
|
||||
if not s:
|
||||
return None
|
||||
return tuple(s.split(':', 1))
|
||||
|
||||
if cmd_opts.gradio_auth:
|
||||
for cred in cmd_opts.gradio_auth.split(','):
|
||||
cred = process_credential_line(cred)
|
||||
if cred:
|
||||
yield cred
|
||||
|
||||
if cmd_opts.gradio_auth_path:
|
||||
with open(cmd_opts.gradio_auth_path, 'r', encoding="utf8") as file:
|
||||
for line in file.readlines():
|
||||
for cred in line.strip().split(','):
|
||||
cred = process_credential_line(cred)
|
||||
if cred:
|
||||
yield cred
|
||||
|
||||
|
||||
def configure_sigint_handler():
|
||||
# make the program just exit at ctrl+c without waiting for anything
|
||||
def sigint_handler(sig, frame):
|
||||
print(f'Interrupted with signal {sig} in {frame}')
|
||||
os._exit(0)
|
||||
|
||||
if not os.environ.get("COVERAGE_RUN"):
|
||||
# Don't install the immediate-quit handler when running under coverage,
|
||||
# as then the coverage report won't be generated.
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
|
||||
def configure_opts_onchange():
|
||||
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()), call=False)
|
||||
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
||||
shared.opts.onchange("sd_vae_overrides_per_model_preferences", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
||||
shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed)
|
||||
shared.opts.onchange("gradio_theme", shared.reload_gradio_theme)
|
||||
shared.opts.onchange("cross_attention_optimization", wrap_queued_call(lambda: modules.sd_hijack.model_hijack.redo_hijack(shared.sd_model)), call=False)
|
||||
startup_timer.record("opts onchange")
|
||||
|
||||
|
||||
def initialize():
|
||||
fix_asyncio_event_loop_policy()
|
||||
validate_tls_options()
|
||||
configure_sigint_handler()
|
||||
modelloader.cleanup_models()
|
||||
configure_opts_onchange()
|
||||
|
||||
modules.sd_models.setup_model()
|
||||
startup_timer.record("setup SD model")
|
||||
|
||||
codeformer.setup_model(cmd_opts.codeformer_models_path)
|
||||
startup_timer.record("setup codeformer")
|
||||
|
||||
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
|
||||
startup_timer.record("setup gfpgan")
|
||||
|
||||
initialize_rest(reload_script_modules=False)
|
||||
|
||||
|
||||
def initialize_rest(*, reload_script_modules=False):
|
||||
"""
|
||||
Called both from initialize() and when reloading the webui.
|
||||
"""
|
||||
sd_samplers.set_samplers()
|
||||
extensions.list_extensions()
|
||||
startup_timer.record("list extensions")
|
||||
|
||||
restore_config_state_file()
|
||||
|
||||
if cmd_opts.ui_debug_mode:
|
||||
shared.sd_upscalers = upscaler.UpscalerLanczos().scalers
|
||||
modules.scripts.load_scripts()
|
||||
return
|
||||
|
||||
modules.sd_models.list_models()
|
||||
startup_timer.record("list SD models")
|
||||
|
||||
localization.list_localizations(cmd_opts.localizations_dir)
|
||||
|
||||
with startup_timer.subcategory("load scripts"):
|
||||
modules.scripts.load_scripts()
|
||||
|
||||
if reload_script_modules:
|
||||
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
|
||||
importlib.reload(module)
|
||||
startup_timer.record("reload script modules")
|
||||
|
||||
modelloader.load_upscalers()
|
||||
startup_timer.record("load upscalers")
|
||||
|
||||
modules.sd_vae.refresh_vae_list()
|
||||
startup_timer.record("refresh VAE")
|
||||
modules.textual_inversion.textual_inversion.list_textual_inversion_templates()
|
||||
startup_timer.record("refresh textual inversion templates")
|
||||
|
||||
modules.script_callbacks.on_list_optimizers(modules.sd_hijack_optimizations.list_optimizers)
|
||||
modules.sd_hijack.list_optimizers()
|
||||
startup_timer.record("scripts list_optimizers")
|
||||
|
||||
modules.sd_unet.list_unets()
|
||||
startup_timer.record("scripts list_unets")
|
||||
|
||||
def load_model():
|
||||
"""
|
||||
Accesses shared.sd_model property to load model.
|
||||
After it's available, if it has been loaded before this access by some extension,
|
||||
its optimization may be None because the list of optimizaers has neet been filled
|
||||
by that time, so we apply optimization again.
|
||||
"""
|
||||
|
||||
shared.sd_model # noqa: B018
|
||||
|
||||
if modules.sd_hijack.current_optimizer is None:
|
||||
modules.sd_hijack.apply_optimizations()
|
||||
|
||||
devices.first_time_calculation()
|
||||
|
||||
Thread(target=load_model).start()
|
||||
|
||||
shared.reload_hypernetworks()
|
||||
startup_timer.record("reload hypernetworks")
|
||||
|
||||
ui_extra_networks.initialize()
|
||||
ui_extra_networks.register_default_pages()
|
||||
|
||||
extra_networks.initialize()
|
||||
extra_networks.register_default_extra_networks()
|
||||
startup_timer.record("initialize extra networks")
|
||||
|
||||
|
||||
def setup_middleware(app):
|
||||
app.middleware_stack = None # reset current middleware to allow modifying user provided list
|
||||
app.add_middleware(GZipMiddleware, minimum_size=1000)
|
||||
configure_cors_middleware(app)
|
||||
app.build_middleware_stack() # rebuild middleware stack on-the-fly
|
||||
|
||||
|
||||
def configure_cors_middleware(app):
|
||||
cors_options = {
|
||||
"allow_methods": ["*"],
|
||||
"allow_headers": ["*"],
|
||||
"allow_credentials": True,
|
||||
}
|
||||
if cmd_opts.cors_allow_origins:
|
||||
cors_options["allow_origins"] = cmd_opts.cors_allow_origins.split(',')
|
||||
if cmd_opts.cors_allow_origins_regex:
|
||||
cors_options["allow_origin_regex"] = cmd_opts.cors_allow_origins_regex
|
||||
app.add_middleware(CORSMiddleware, **cors_options)
|
||||
initialize.check_versions()
|
||||
|
||||
|
||||
def create_api(app):
|
||||
from modules.api.api import Api
|
||||
from modules.call_queue import queue_lock
|
||||
|
||||
api = Api(app, queue_lock)
|
||||
return api
|
||||
|
||||
|
||||
def api_only():
|
||||
initialize()
|
||||
from fastapi import FastAPI
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
initialize.initialize()
|
||||
|
||||
app = FastAPI()
|
||||
setup_middleware(app)
|
||||
initialize_util.setup_middleware(app)
|
||||
api = create_api(app)
|
||||
|
||||
modules.script_callbacks.before_ui_callback()
|
||||
modules.script_callbacks.app_started_callback(None, app)
|
||||
from modules import script_callbacks
|
||||
script_callbacks.before_ui_callback()
|
||||
script_callbacks.app_started_callback(None, app)
|
||||
|
||||
print(f"Startup time: {startup_timer.summary()}.")
|
||||
api.launch(
|
||||
|
@ -353,24 +46,28 @@ def api_only():
|
|||
|
||||
|
||||
def webui():
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
launch_api = cmd_opts.api
|
||||
initialize()
|
||||
initialize.initialize()
|
||||
|
||||
from modules import shared, ui_tempdir, script_callbacks, ui, progress, ui_extra_networks
|
||||
|
||||
while 1:
|
||||
if shared.opts.clean_temp_dir_at_start:
|
||||
ui_tempdir.cleanup_tmpdr()
|
||||
startup_timer.record("cleanup temp dir")
|
||||
|
||||
modules.script_callbacks.before_ui_callback()
|
||||
script_callbacks.before_ui_callback()
|
||||
startup_timer.record("scripts before_ui_callback")
|
||||
|
||||
shared.demo = modules.ui.create_ui()
|
||||
shared.demo = ui.create_ui()
|
||||
startup_timer.record("create ui")
|
||||
|
||||
if not cmd_opts.no_gradio_queue:
|
||||
shared.demo.queue(64)
|
||||
|
||||
gradio_auth_creds = list(get_gradio_auth_creds()) or None
|
||||
gradio_auth_creds = list(initialize_util.get_gradio_auth_creds()) or None
|
||||
|
||||
auto_launch_browser = False
|
||||
if os.getenv('SD_WEBUI_RESTARTING') != '1':
|
||||
|
@ -381,7 +78,7 @@ def webui():
|
|||
|
||||
app, local_url, share_url = shared.demo.launch(
|
||||
share=cmd_opts.share,
|
||||
server_name=server_name,
|
||||
server_name=initialize_util.gradio_server_name(),
|
||||
server_port=cmd_opts.port,
|
||||
ssl_keyfile=cmd_opts.tls_keyfile,
|
||||
ssl_certfile=cmd_opts.tls_certfile,
|
||||
|
@ -406,10 +103,10 @@ def webui():
|
|||
# running its code. We disable this here. Suggested by RyotaK.
|
||||
app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
|
||||
|
||||
setup_middleware(app)
|
||||
initialize_util.setup_middleware(app)
|
||||
|
||||
modules.progress.setup_progress_api(app)
|
||||
modules.ui.setup_ui_api(app)
|
||||
progress.setup_progress_api(app)
|
||||
ui.setup_ui_api(app)
|
||||
|
||||
if launch_api:
|
||||
create_api(app)
|
||||
|
@ -419,7 +116,7 @@ def webui():
|
|||
startup_timer.record("add APIs")
|
||||
|
||||
with startup_timer.subcategory("app_started_callback"):
|
||||
modules.script_callbacks.app_started_callback(shared.demo, app)
|
||||
script_callbacks.app_started_callback(shared.demo, app)
|
||||
|
||||
timer.startup_record = startup_timer.dump()
|
||||
print(f"Startup time: {startup_timer.summary()}.")
|
||||
|
@ -449,14 +146,16 @@ def webui():
|
|||
shared.demo.close()
|
||||
time.sleep(0.5)
|
||||
startup_timer.reset()
|
||||
modules.script_callbacks.app_reload_callback()
|
||||
script_callbacks.app_reload_callback()
|
||||
startup_timer.record("app reload callback")
|
||||
modules.script_callbacks.script_unloaded_callback()
|
||||
script_callbacks.script_unloaded_callback()
|
||||
startup_timer.record("scripts unloaded callback")
|
||||
initialize_rest(reload_script_modules=True)
|
||||
initialize.initialize_rest(reload_script_modules=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from modules.shared_cmd_options import cmd_opts
|
||||
|
||||
if cmd_opts.nowebui:
|
||||
api_only()
|
||||
else:
|
||||
|
|
Loading…
Reference in New Issue