From c9647c8d23efa8c939c6af39878784e246082122 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Sat, 25 Mar 2023 16:11:41 -0400 Subject: [PATCH 01/76] Support Gradio's theme API --- modules/shared.py | 35 +++++++++++++++++++++++++++++++++++ modules/ui.py | 2 +- webui.py | 1 + 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 11be3985d..2f7892cd6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -4,6 +4,7 @@ import json import os import sys import time +import requests from PIL import Image import gradio as gr @@ -54,6 +55,21 @@ ui_reorder_categories = [ "scripts", ] +# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json +gradio_hf_hub_themes = [ + "gradio/glass", + "gradio/monochrome", + "gradio/seafoam", + "gradio/soft", + "freddyaboulton/dracula_revamped", + "gradio/dracula_test", + "abidlabs/dracula_test", + "abidlabs/pakistan", + "dawood/microsoft_windows", + "ysharma/steampunk" +] + + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -387,6 +403,7 @@ options_templates.update(options_section(('ui', "User interface"), { "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order"), "localization": OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), + "gradio_theme": OptionInfo("Default", "Gradio theme (requires restart)", gr.Dropdown, lambda: {"choices": ["Default"] + gradio_hf_hub_themes}) })) options_templates.update(options_section(('ui', "Live previews"), { @@ -599,6 +616,24 @@ clip_model = None progress_print_out = sys.stdout +gradio_theme = gr.themes.Base() + + +def reload_gradio_theme(theme_name=None): + global gradio_theme + if not theme_name: + theme_name = opts.gradio_theme + + if theme_name == "Default": + gradio_theme = gr.themes.Default() + else: + try: + gradio_theme = gr.themes.ThemeClass.from_hub(theme_name) + except requests.exceptions.ConnectionError: + print("Can't access HuggingFace Hub, falling back to default Gradio theme") + gradio_theme = gr.themes.Default() + + class TotalTQDM: def __init__(self): diff --git a/modules/ui.py b/modules/ui.py index af8546c29..6e0498811 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1592,7 +1592,7 @@ def create_ui(): for _interface, label, _ifid in interfaces: shared.tab_names.append(label) - with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: + with gr.Blocks(css=css, theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings", variant="compact"): for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) diff --git a/webui.py b/webui.py index 30f3e4a1f..6986e576a 100644 --- a/webui.py +++ b/webui.py @@ -150,6 +150,7 @@ def initialize(): shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("temp_dir", ui_tempdir.on_tmpdir_changed) + shared.opts.onchange("gradio_theme", shared.reload_gradio_theme) startup_timer.record("opts onchange") shared.reload_hypernetworks() From 9ecf3471339d011983f2e3c878e920e49718ff90 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Mon, 27 Mar 2023 20:01:19 +0200 Subject: [PATCH 02/76] fix: lightboxModal, selectedTab --- javascript/generationParams.js | 2 +- javascript/imageviewer.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f050939..06a771bc7 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,7 +16,7 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText + let selectedTab = gradioApp().querySelector('#tabs div button')?.innerText if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') gradioApp().getElementById(selectedTab+"_generation_info_button").click() }); diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index d64835622..bb61ee244 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,7 +251,7 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().appendChild(modal) + gradioApp().body.appendChild(modal) document.body.appendChild(modal); From 5a25826d841a13574ab6afbeb9c50c81a491fa21 Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Tue, 28 Mar 2023 23:28:46 +0200 Subject: [PATCH 03/76] try both versions of appendChild --- javascript/imageviewer.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index bb61ee244..3deffa9be 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -251,8 +251,11 @@ document.addEventListener("DOMContentLoaded", function() { modal.appendChild(modalNext) - gradioApp().body.appendChild(modal) - + try { + gradioApp().appendChild(modal); + } catch (e) { + gradioApp().body.appendChild(modal); + } document.body.appendChild(modal); From 42082e8a3239c1c32cd9e2a03a20b610af857b51 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 18:18:28 -0400 Subject: [PATCH 04/76] performance increase --- modules/processing.py | 4 +++- modules/sd_samplers_kdiffusion.py | 22 +++++++++++++++++----- modules/shared.py | 1 + scripts/xyz_grid.py | 1 + 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..9f00ce3cc 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -105,7 +105,7 @@ class StableDiffusionProcessing: """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -140,6 +140,7 @@ class StableDiffusionProcessing: self.denoising_strength: float = denoising_strength self.sampler_noise_scheduler_override = None self.ddim_discretize = ddim_discretize or opts.ddim_discretize + self.s_min_uncond = s_min_uncond or opts.s_min_uncond self.s_churn = s_churn or opts.s_churn self.s_tmin = s_tmin or opts.s_tmin self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option @@ -162,6 +163,7 @@ class StableDiffusionProcessing: self.all_seeds = None self.all_subseeds = None self.iteration = 0 + @property def sd_model(self): diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index e9f08518f..6a54ce32b 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -76,7 +76,7 @@ class CFGDenoiser(torch.nn.Module): return denoised - def forward(self, x, sigma, uncond, cond, cond_scale, image_cond): + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -116,6 +116,12 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond + sigma_thresh = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] + if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: cond_in = torch.cat([tensor, uncond]) @@ -144,7 +150,8 @@ class CFGDenoiser(torch.nn.Module): x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b])) - x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) + if uncond.shape[0]: + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict([uncond], image_cond_in[-uncond.shape[0]:])) denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps) cfg_denoised_callback(denoised_params) @@ -157,7 +164,10 @@ class CFGDenoiser(torch.nn.Module): sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) if not is_edit_model: - denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + if uncond.shape[0]: + denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale) + else: + denoised = x_out else: denoised = self.combine_denoised_for_edit_model(x_out, cond_scale) @@ -165,7 +175,6 @@ class CFGDenoiser(torch.nn.Module): denoised = self.init_latent * self.mask + self.nmask * denoised self.step += 1 - return denoised @@ -244,6 +253,7 @@ class KDiffusionSampler: self.model_wrap_cfg.step = 0 self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None) self.eta = p.eta if p.eta is not None else opts.eta_ancestral + self.s_min_uncond = getattr(p, 's_min_uncond', 0.0) k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else []) @@ -326,6 +336,7 @@ class KDiffusionSampler: 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond } samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) @@ -359,7 +370,8 @@ class KDiffusionSampler: 'cond': conditioning, 'image_cond': image_conditioning, 'uncond': unconditional_conditioning, - 'cond_scale': p.cfg_scale + 'cond_scale': p.cfg_scale, + 's_min_uncond': self.s_min_uncond }, disable=False, callback=self.callback_state, **extra_params_kwargs)) return samples diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecbd..0bdd30b8c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,6 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..d6a44b1c8 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -212,6 +212,7 @@ axis_options = [ AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Negative Guidance minimum sigma", float, apply_field("s_min_uncond")), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From bc90592031d26d3a6ed5c1b65ee9801452b5ece5 Mon Sep 17 00:00:00 2001 From: devdn Date: Tue, 28 Mar 2023 20:59:31 -0400 Subject: [PATCH 05/76] increase range of negative guidance minimum sigma option --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 0bdd30b8c..0e9f2d549 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -405,7 +405,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_min_uncond': OptionInfo(0, "minimum sigma to use unconditioned guidance", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}), + 's_min_uncond': OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}), 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), From 70a0a11783747d2e77a08a63d9feeceb7d2d4f63 Mon Sep 17 00:00:00 2001 From: Vespinian Date: Tue, 28 Mar 2023 23:52:51 -0400 Subject: [PATCH 06/76] Changed behavior that puts the args from alwayson_script request in the script_args, so don't accidently resize the arg list if we get less arg then or default list has --- modules/api/api.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 518b2a61f..8c5cd1853 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -272,7 +272,9 @@ class Api: raise HTTPException(status_code=422, detail=f"Cannot have a selectable script in the always on scripts params") # always on script with no arg should always run so you don't really need to add them to the requests if "args" in request.alwayson_scripts[alwayson_script_name]: - script_args[alwayson_script.args_from:alwayson_script.args_to] = request.alwayson_scripts[alwayson_script_name]["args"] + # min between arg length in scriptrunner and arg length in the request + for idx in range(0, min((alwayson_script.args_to - alwayson_script.args_from), len(request.alwayson_scripts[alwayson_script_name]["args"]))): + script_args[alwayson_script.args_from + idx] = request.alwayson_scripts[alwayson_script_name]["args"][idx] return script_args def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): From 79d57d02f15aa406e38997d65ec8ed99374691b7 Mon Sep 17 00:00:00 2001 From: space-nuko <24979496+space-nuko@users.noreply.github.com> Date: Wed, 29 Mar 2023 01:52:34 -0500 Subject: [PATCH 07/76] Improve custom code extension - Uses `gr.Code` component - Includes example - Can return out of body --- scripts/custom_code.py | 63 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 7 deletions(-) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index d29113e67..4071d86d8 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -1,9 +1,40 @@ import modules.scripts as scripts import gradio as gr +import ast +import copy from modules.processing import Processed from modules.shared import opts, cmd_opts, state + +def convertExpr2Expression(expr): + expr.lineno = 0 + expr.col_offset = 0 + result = ast.Expression(expr.value, lineno=0, col_offset = 0) + + return result + + +def exec_with_return(code, module): + """ + like exec() but can return values + https://stackoverflow.com/a/52361938/5862977 + """ + code_ast = ast.parse(code) + + init_ast = copy.deepcopy(code_ast) + init_ast.body = code_ast.body[:-1] + + last_ast = copy.deepcopy(code_ast) + last_ast.body = code_ast.body[-1:] + + exec(compile(init_ast, "", "exec"), module.__dict__) + if type(last_ast.body[0]) == ast.Expr: + return eval(compile(convertExpr2Expression(last_ast.body[0]), "", "eval"), module.__dict__) + else: + exec(compile(last_ast, "", "exec"), module.__dict__) + + class Script(scripts.Script): def title(self): @@ -13,12 +44,23 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code")) + example = """from modules.processing import process_images - return [code] +p.width = 768 +p.height = 768 +p.batch_size = 2 +p.steps = 10 + +return process_images(p) +""" - def run(self, p, code): + code = gr.Code(value=example, language="python", label="Python code", elem_id=self.elem_id("code")) + indent_level = gr.Number(label='Indent level', value=2, precision=0, elem_id=self.elem_id("indent_level")) + + return [code, indent_level] + + def run(self, p, code, indent_level): assert cmd_opts.allow_code, '--allow-code option must be enabled' display_result_data = [[], -1, ""] @@ -29,13 +71,20 @@ class Script(scripts.Script): display_result_data[2] = i from types import ModuleType - compiled = compile(code, '', 'exec') module = ModuleType("testmodule") module.__dict__.update(globals()) module.p = p module.display = display - exec(compiled, module.__dict__) + + indent = " " * indent_level + indented = code.replace('\n', '\n' + indent) + body = f"""def __webuitemp__(): +{indent}{indented} +__webuitemp__()""" + + result = exec_with_return(body, module) + + if isinstance(result, Processed): + return result return Processed(p, *display_result_data) - - \ No newline at end of file From 44e8e9c36807d4a71c2fc84129ebcf5ba4f77f21 Mon Sep 17 00:00:00 2001 From: devdn Date: Thu, 30 Mar 2023 00:54:28 -0400 Subject: [PATCH 08/76] fix live preview & alternate uncond guidance for better quality --- modules/sd_samplers_kdiffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 6a54ce32b..17d24df49 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -116,11 +116,13 @@ class CFGDenoiser(torch.nn.Module): tensor = denoiser_params.text_cond uncond = denoiser_params.text_uncond - sigma_thresh = s_min_uncond - if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_thresh*sigma_thresh) and not is_edit_model): - uncond = torch.zeros([0,0,uncond.shape[2]]) - x_in=x_in[:x_in.shape[0]//2] - sigma_in=sigma_in[:sigma_in.shape[0]//2] + if self.step % 2 and s_min_uncond > 0 and not is_edit_model: + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it + sigma_threshold = s_min_uncond + if(torch.dot(sigma,sigma) < sigma.shape[0] * (sigma_threshold*sigma_threshold) ): + uncond = torch.zeros([0,0,uncond.shape[2]]) + x_in=x_in[:x_in.shape[0]//2] + sigma_in=sigma_in[:sigma_in.shape[0]//2] if tensor.shape[1] == uncond.shape[1]: if not is_edit_model: @@ -159,7 +161,7 @@ class CFGDenoiser(torch.nn.Module): devices.test_for_nans(x_out, "unet") if opts.live_preview_content == "Prompt": - sd_samplers_common.store_latent(x_out[0:uncond.shape[0]]) + sd_samplers_common.store_latent(x_out[0:x_out.shape[0]-uncond.shape[0]]) elif opts.live_preview_content == "Negative prompt": sd_samplers_common.store_latent(x_out[-uncond.shape[0]:]) From d5063e07e8b4737621978feffd37b18077b9ea64 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 30 Mar 2023 10:57:54 -0400 Subject: [PATCH 09/76] update torch --- .gitignore | 2 +- environment-wsl2.yaml | 11 ++++++----- launch.py | 6 +++--- requirements.txt | 1 + requirements_versions.txt | 4 ++-- webui-macos-env.sh | 2 +- webui.py | 2 ++ 7 files changed, 16 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 0b1d17ca3..3b48ba9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,4 @@ notification.mp3 /extensions /test/stdout.txt /test/stderr.txt -/cache.json +/cache.json* diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index f88727507..061345650 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -4,8 +4,9 @@ channels: - defaults dependencies: - python=3.10 - - pip=22.2.2 - - cudatoolkit=11.3 - - pytorch=1.12.1 - - torchvision=0.13.1 - - numpy=1.23.1 \ No newline at end of file + - pip=23.0 + - cudatoolkit=11.8 + - pytorch=2.0 + - torchvision=0.15 + - numpy=1.23 + \ No newline at end of file diff --git a/launch.py b/launch.py index 68e08114d..37c8b5164 100644 --- a/launch.py +++ b/launch.py @@ -225,10 +225,10 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 --extra-index-url https://download.pytorch.org/whl/cu117") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.16rc425') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") @@ -296,7 +296,7 @@ def prepare_environment(): if not os.path.isfile(requirements_file): requirements_file = os.path.join(script_path, requirements_file) - run_pip(f"install -r \"{requirements_file}\"", "requirements for Web UI") + run_pip(f"install -r \"{requirements_file}\"", "requirements") run_extensions_installers(settings_file=args.ui_settings_file) diff --git a/requirements.txt b/requirements.txt index c72b2927e..36cdae6c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +astunparse blendmodes accelerate basicsr diff --git a/requirements_versions.txt b/requirements_versions.txt index df65431a3..6487f1c30 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -1,10 +1,10 @@ blendmodes==2022 transformers==4.25.1 -accelerate==0.12.0 +accelerate==0.18.0 basicsr==1.4.2 gfpgan==1.3.8 gradio==3.23 -numpy==1.23.3 +numpy==1.23.5 Pillow==9.4.0 realesrgan==0.3.0 torch diff --git a/webui-macos-env.sh b/webui-macos-env.sh index 37cac4fb0..65d804134 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -11,7 +11,7 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1" +export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118" export K_DIFFUSION_REPO="https://github.com/brkirch/k-diffusion.git" export K_DIFFUSION_COMMIT_HASH="51c9778f269cedb55a4d88c79c0246d35bdadb71" export PYTORCH_ENABLE_MPS_FALLBACK=1 diff --git a/webui.py b/webui.py index b570895fb..54552cdd6 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,8 @@ startup_timer = timer.Timer() import torch import pytorch_lightning # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them warnings.filterwarnings(action="ignore", category=DeprecationWarning, module="pytorch_lightning") +warnings.filterwarnings(action="ignore", category=UserWarning, module="torchvision") + startup_timer.record("import torch") import gradio From a73f3bf0cfc89cde294b42f5c566017daf4b2ccd Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:19:40 -0600 Subject: [PATCH 10/76] Change extras "scale to" to sliders --- scripts/postprocessing_upscale.py | 14 ++++++++++---- style.css | 4 ++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index 11eab31a5..bc43719bd 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -4,8 +4,9 @@ import numpy as np from modules import scripts_postprocessing, shared import gradio as gr -from modules.ui_components import FormRow +from modules.ui_components import FormRow, ToolButton +switch_values_symbol = '\U000021C5' # ⇅ upscale_cache = {} @@ -25,9 +26,12 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to: with FormRow(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") + with gr.Column(elem_id="upscaling_column_size", scale=4): + upscaling_resize_w = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="extras_upscaling_resize_w") + upscaling_resize_h = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="extras_upscaling_resize_w") + with gr.Column(elem_id="upscaling_dimensions_row", scale=1, elem_classes="dimensions-tools"): + upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn") + upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with FormRow(): extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) @@ -36,6 +40,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name) extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility") + upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False) tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab]) tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab]) @@ -45,6 +50,7 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, + "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, diff --git a/style.css b/style.css index de16a7f2f..aafc23627 100644 --- a/style.css +++ b/style.css @@ -312,6 +312,10 @@ div.dimensions-tools{ align-content: center; } +div#extras_scale_to_tab div.form{ + flex-direction: row; +} + #mode_img2img .gradio-image > div.fixed-height, #mode_img2img .gradio-image > div.fixed-height img{ height: 480px !important; max-height: 480px !important; From 69ad46b047678a7a97a152a20e702bac61e37b8b Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Thu, 30 Mar 2023 23:25:39 -0600 Subject: [PATCH 11/76] Import switch_values_symbol --- scripts/postprocessing_upscale.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bc43719bd..bf27b64d0 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -5,8 +5,7 @@ from modules import scripts_postprocessing, shared import gradio as gr from modules.ui_components import FormRow, ToolButton - -switch_values_symbol = '\U000021C5' # ⇅ +from modules.ui import switch_values_symbol upscale_cache = {} From 3ebdd2afd3769046289880d44bbe1322a832073f Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Fri, 31 Mar 2023 00:56:38 -0600 Subject: [PATCH 12/76] Don't return upscaling_res_switch_btn --- scripts/postprocessing_upscale.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/postprocessing_upscale.py b/scripts/postprocessing_upscale.py index bf27b64d0..e60208ac3 100644 --- a/scripts/postprocessing_upscale.py +++ b/scripts/postprocessing_upscale.py @@ -49,7 +49,6 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing): "upscale_to_width": upscaling_resize_w, "upscale_to_height": upscaling_resize_h, "upscale_crop": upscaling_crop, - "upscaling_res_switch_btn": upscaling_res_switch_btn, "upscaler_1_name": extras_upscaler_1, "upscaler_2_name": extras_upscaler_2, "upscaler_2_visibility": extras_upscaler_2_visibility, From c938b172a49433291e246b04f9835f3383bad0c8 Mon Sep 17 00:00:00 2001 From: bbonvi <6573230@gmail.com> Date: Fri, 31 Mar 2023 19:29:34 +0600 Subject: [PATCH 13/76] fix missing live preview and progress during certain tasks Sometimes tasks take longer than 5 seconds to start, resulting in missing progress bar and livepreviews, so we have to keep pulling for progress a bit longer (5s -> 20s). --- javascript/progressbar.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 4ac9b8db1..eb44aab93 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -138,7 +138,7 @@ function requestProgress(id_task, progressbarContainer, gallery, atEnd, onProgre return } - if(elapsedFromStart > 5 && !res.queued && !res.active){ + if(elapsedFromStart > 20 && !res.queued && !res.active){ removeProgressBar() return } From aef42bfec09a9ca93d1222b7b47256f37e192a32 Mon Sep 17 00:00:00 2001 From: keith <1868690+wk5ovc@users.noreply.github.com> Date: Mon, 3 Apr 2023 17:05:49 +0800 Subject: [PATCH 14/76] Fix #9046 /sdapi/v1/txt2img endpoint not working **Describe what this pull request is trying to achieve.** Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/9046 **Environment this was tested in** * OS: Linux * Browser: chrome * Graphics card: RTX 3090 --- webui.py | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/webui.py b/webui.py index b570895fb..8927aa330 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,7 @@ import importlib import signal import re import warnings +import asyncio from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -66,6 +67,46 @@ if cmd_opts.server_name: else: server_name = "0.0.0.0" if cmd_opts.listen else None +if sys.platform == "win32" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"): + # "Any thread" and "selector" should be orthogonal, but there's not a clean + # interface for composing policies so pick the right base. + _BasePolicy = asyncio.WindowsSelectorEventLoopPolicy # type: ignore +else: + _BasePolicy = asyncio.DefaultEventLoopPolicy + + +class AnyThreadEventLoopPolicy(_BasePolicy): # type: ignore + """Event loop policy that allows loop creation on any thread. + + The default `asyncio` event loop policy only automatically creates + event loops in the main threads. Other threads must create event + loops explicitly or `asyncio.get_event_loop` (and therefore + `.IOLoop.current`) will fail. Installing this policy allows event + loops to be created automatically on any thread, matching the + behavior of Tornado versions prior to 5.0 (or 5.0 on Python 2). + + Usage:: + + asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + + .. versionadded:: 5.0 + + """ + + def get_event_loop(self) -> asyncio.AbstractEventLoop: + try: + return super().get_event_loop() + except (RuntimeError, AssertionError): + # This was an AssertionError in python 3.4.2 (which ships with debian jessie) + # and changed to a RuntimeError in 3.4.3. + # "There is no current event loop in thread %r" + loop = self.new_event_loop() + self.set_event_loop(loop) + return loop + + +asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) + def check_versions(): if shared.cmd_opts.skip_version_check: From f7215906af573f6e60b006cd430e82691ba4f8d6 Mon Sep 17 00:00:00 2001 From: gmasil <54176035+gmasil@users.noreply.github.com> Date: Mon, 3 Apr 2023 18:19:57 +0200 Subject: [PATCH 15/76] allow styles.csv to be symlinked or mounted in docker without moving the file around --- modules/styles.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 990d56236..9ed859911 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -72,16 +72,14 @@ class StyleDatabase: return apply_styles_to_prompt(prompt, [self.styles.get(x, self.no_style).negative_prompt for x in styles]) def save_styles(self, path: str) -> None: - # Write to temporary file first, so we don't nuke the file if something goes wrong - fd, temp_path = tempfile.mkstemp(".csv") + # Always keep a backup file around + if os.path.exists(path): + shutil.copy(path, path + ".bak") + + fd = os.open(path, os.O_RDWR|os.O_CREAT) with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file: # _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple, # and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict() writer = csv.DictWriter(file, fieldnames=PromptStyle._fields) writer.writeheader() writer.writerows(style._asdict() for k, style in self.styles.items()) - - # Always keep a backup file around - if os.path.exists(path): - shutil.move(path, path + ".bak") - shutil.move(temp_path, path) From 54fd00ff8f6fc1396ce0396772962b45609e7a9c Mon Sep 17 00:00:00 2001 From: Liam Date: Mon, 3 Apr 2023 13:28:20 -0400 Subject: [PATCH 16/76] fixed logic for updating the displayed generation params when the image modal is closed --- javascript/generationParams.js | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/javascript/generationParams.js b/javascript/generationParams.js index 95f050939..1266a266c 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -16,9 +16,9 @@ onUiUpdate(function(){ let modalObserver = new MutationObserver(function(mutations) { mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText - if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img') - gradioApp().getElementById(selectedTab+"_generation_info_button").click() + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) + gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() }); }); From 4fa59b045add1d23350e884e201dc77bc34864e6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 3 Apr 2023 15:23:35 -0400 Subject: [PATCH 17/76] update xformers --- environment-wsl2.yaml | 1 - launch.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/environment-wsl2.yaml b/environment-wsl2.yaml index 061345650..0c4ae6809 100644 --- a/environment-wsl2.yaml +++ b/environment-wsl2.yaml @@ -9,4 +9,3 @@ dependencies: - pytorch=2.0 - torchvision=0.15 - numpy=1.23 - \ No newline at end of file diff --git a/launch.py b/launch.py index 37c8b5164..c5ae90926 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From 80752f43b22acd85bf6ab54b2e4788f144a0c813 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 4 Apr 2023 17:27:27 -0400 Subject: [PATCH 18/76] revert xformers --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index c5ae90926..37c8b5164 100644 --- a/launch.py +++ b/launch.py @@ -228,7 +228,7 @@ def prepare_environment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") - xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.18') + xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") openclip_package = os.environ.get('OPENCLIP_PACKAGE', "git+https://github.com/mlfoundations/open_clip.git@bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b") From c01dc1cb30f7cd87e1df6458580da99c702ee513 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 19:22:51 +0800 Subject: [PATCH 19/76] add dropdown for X/Y/Z plot --- scripts/xyz_grid.py | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..774fa2c76 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -374,16 +374,19 @@ class Script(scripts.Script): with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) + x_values_dropdown = gr.Dropdown(label="X values",visible=False,multiselect=True,interactive=True) fill_x_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_x_tool_button", visible=False) with gr.Row(): y_type = gr.Dropdown(label="Y type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) + y_values_dropdown = gr.Dropdown(label="Y values",visible=False,multiselect=True,interactive=True) fill_y_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_y_tool_button", visible=False) with gr.Row(): z_type = gr.Dropdown(label="Z type", choices=[x.label for x in self.current_axis_options], value=self.current_axis_options[0].label, type="index", elem_id=self.elem_id("z_type")) z_values = gr.Textbox(label="Z values", lines=1, elem_id=self.elem_id("z_values")) + z_values_dropdown = gr.Dropdown(label="Z values",visible=False,multiselect=True,interactive=True) fill_z_button = ToolButton(value=fill_values_symbol, elem_id="xyz_grid_fill_z_tool_button", visible=False) with gr.Row(variant="compact", elem_id="axis_options"): @@ -413,18 +416,20 @@ class Script(scripts.Script): def fill(x_type): axis = self.current_axis_options[x_type] - return ", ".join(axis.choices()) if axis.choices else gr.update() + return axis.choices() if axis.choices else gr.update() - fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values]) - fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values]) - fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values]) + fill_x_button.click(fn=fill, inputs=[x_type], outputs=[x_values_dropdown]) + fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) + fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) def select_axis(x_type): - return gr.Button.update(visible=self.current_axis_options[x_type].choices is not None) + choices = self.current_axis_options[x_type].choices + has_choices = choices is not None + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button]) + x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) self.infotext_fields = ( (x_type, "X Type"), @@ -435,20 +440,23 @@ class Script(scripts.Script): (z_values, "Z Values"), ) - return [x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] + return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] - def run(self, p, x_type, x_values, y_type, y_values, z_type, z_values, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): + def run(self, p, x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size): if not no_fixed_seeds: modules.processing.fix_seed(p) if not opts.return_grid: p.batch_size = 1 - def process_axis(opt, vals): + def process_axis(opt, vals, vals_dropdown): if opt.label == 'Nothing': return [0] - valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + if opt.choices is not None: + valslist = vals_dropdown + else: + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] if opt.type == int: valslist_ext = [] @@ -506,13 +514,13 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] - xs = process_axis(x_opt, x_values) + xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] - ys = process_axis(y_opt, y_values) + ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] - zs = process_axis(z_opt, z_values) + zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes From 3ac5f9c471e4cfb5b664f9f0a7f7e7b171b1cee1 Mon Sep 17 00:00:00 2001 From: pangbo13 <373108669@qq.com> Date: Wed, 5 Apr 2023 21:43:27 +0800 Subject: [PATCH 20/76] fix axis swap and infotxt --- scripts/xyz_grid.py | 43 ++++++++++++++++++++++++++++++++----------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 774fa2c76..52ae1c6e1 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -404,14 +404,14 @@ class Script(scripts.Script): swap_yz_axes_button = gr.Button(value="Swap Y/Z axes", elem_id="yz_grid_swap_axes_button") swap_xz_axes_button = gr.Button(value="Swap X/Z axes", elem_id="xz_grid_swap_axes_button") - def swap_axes(axis1_type, axis1_values, axis2_type, axis2_values): - return self.current_axis_options[axis2_type].label, axis2_values, self.current_axis_options[axis1_type].label, axis1_values + def swap_axes(axis1_type, axis1_values, axis1_values_dropdown, axis2_type, axis2_values, axis2_values_dropdown): + return self.current_axis_options[axis2_type].label, axis2_values, axis2_values_dropdown, self.current_axis_options[axis1_type].label, axis1_values, axis1_values_dropdown - xy_swap_args = [x_type, x_values, y_type, y_values] + xy_swap_args = [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown] swap_xy_axes_button.click(swap_axes, inputs=xy_swap_args, outputs=xy_swap_args) - yz_swap_args = [y_type, y_values, z_type, z_values] + yz_swap_args = [y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown] swap_yz_axes_button.click(swap_axes, inputs=yz_swap_args, outputs=yz_swap_args) - xz_swap_args = [x_type, x_values, z_type, z_values] + xz_swap_args = [x_type, x_values, x_values_dropdown, z_type, z_values, z_values_dropdown] swap_xz_axes_button.click(swap_axes, inputs=xz_swap_args, outputs=xz_swap_args) def fill(x_type): @@ -422,22 +422,37 @@ class Script(scripts.Script): fill_y_button.click(fn=fill, inputs=[y_type], outputs=[y_values_dropdown]) fill_z_button.click(fn=fill, inputs=[z_type], outputs=[z_values_dropdown]) - def select_axis(x_type): - choices = self.current_axis_options[x_type].choices + def select_axis(axis_type,axis_values_dropdown): + choices = self.current_axis_options[axis_type].choices has_choices = choices is not None - return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices() if has_choices else None,visible=has_choices,value=[]) + current_values = axis_values_dropdown + if has_choices: + choices = choices() + if isinstance(current_values,str): + current_values = current_values.split(",") + current_values = list(filter(lambda x: x in choices, current_values)) + return gr.Button.update(visible=has_choices),gr.Textbox.update(visible=not has_choices),gr.update(choices=choices if has_choices else None,visible=has_choices,value=current_values) - x_type.change(fn=select_axis, inputs=[x_type], outputs=[fill_x_button,x_values,x_values_dropdown]) - y_type.change(fn=select_axis, inputs=[y_type], outputs=[fill_y_button,y_values,y_values_dropdown]) - z_type.change(fn=select_axis, inputs=[z_type], outputs=[fill_z_button,z_values,z_values_dropdown]) + x_type.change(fn=select_axis, inputs=[x_type,x_values_dropdown], outputs=[fill_x_button,x_values,x_values_dropdown]) + y_type.change(fn=select_axis, inputs=[y_type,y_values_dropdown], outputs=[fill_y_button,y_values,y_values_dropdown]) + z_type.change(fn=select_axis, inputs=[z_type,z_values_dropdown], outputs=[fill_z_button,z_values,z_values_dropdown]) + + def get_dropdown_update_from_params(axis,params): + val_key = axis + " Values" + vals = params.get(val_key,"") + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals))) if x] + return gr.update(value = valslist) self.infotext_fields = ( (x_type, "X Type"), (x_values, "X Values"), + (x_values_dropdown, lambda params:get_dropdown_update_from_params("X",params)), (y_type, "Y Type"), (y_values, "Y Values"), + (y_values_dropdown, lambda params:get_dropdown_update_from_params("Y",params)), (z_type, "Z Type"), (z_values, "Z Values"), + (z_values_dropdown, lambda params:get_dropdown_update_from_params("Z",params)), ) return [x_type, x_values, x_values_dropdown, y_type, y_values, y_values_dropdown, z_type, z_values, z_values_dropdown, draw_legend, include_lone_images, include_sub_grids, no_fixed_seeds, margin_size] @@ -514,12 +529,18 @@ class Script(scripts.Script): return valslist x_opt = self.current_axis_options[x_type] + if x_opt.choices is not None: + x_values = ",".join(x_values_dropdown) xs = process_axis(x_opt, x_values, x_values_dropdown) y_opt = self.current_axis_options[y_type] + if y_opt.choices is not None: + y_values = ",".join(y_values_dropdown) ys = process_axis(y_opt, y_values, y_values_dropdown) z_opt = self.current_axis_options[z_type] + if z_opt.choices is not None: + z_values = ",".join(z_values_dropdown) zs = process_axis(z_opt, z_values, z_values_dropdown) # this could be moved to common code, but unlikely to be ever triggered anywhere else From 3a5b47e26e8cb1cd640fedfe7cb5263a588d7088 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Thu, 6 Apr 2023 01:36:27 +0200 Subject: [PATCH 21/76] Forcing PyTorch version for AMD GPUs automatic install The old code tries to install the newest versions of pytorch, wich is currently 2.0. Forcing it to 1.13.1 --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 8cdad22d3..7d9a8538a 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From b3593d0997bfdcca7f8aa01663e81720db50e494 Mon Sep 17 00:00:00 2001 From: For Sure Date: Thu, 6 Apr 2023 19:42:26 +0300 Subject: [PATCH 22/76] Add support for saving init images in img2img --- modules/processing.py | 8 ++++++++ modules/shared.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..5556afc5e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -3,6 +3,7 @@ import math import os import sys import warnings +import hashlib import torch import numpy as np @@ -476,6 +477,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None, "Clip skip": None if clip_skip <= 1 else clip_skip, "ENSD": None if opts.eta_noise_seed_delta == 0 else opts.eta_noise_seed_delta, + "Init image hash": getattr(p, 'init_img_hash', None) } generation_params.update(p.extra_generation_params) @@ -1007,6 +1009,12 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.color_corrections = [] imgs = [] for img in self.init_images: + + # Save init image + if opts.save_init_img: + self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest() + images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False) + image = images.flatten(img, opts.img2img_background_color) if crop_region is None and self.resize_mode != 3: diff --git a/modules/shared.py b/modules/shared.py index 5fd0eecbd..69c2b21e6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -39,6 +39,7 @@ restricted_opts = { "outdir_grids", "outdir_txt2img_grids", "outdir_save", + "outdir_init_images" } ui_reorder_categories = [ @@ -253,6 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), + "save_init_img": OptionInfo(True, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), @@ -268,6 +270,7 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), + "outdir_init_images": OptionInfo("outputs/init-images", "Directory for saving init images when using img2img", component_args=hide_dirs), })) options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { From 63a6f9b4d98a192bb359910cb284cf00582baabf Mon Sep 17 00:00:00 2001 From: forsurefr <67145502+forsurefr@users.noreply.github.com> Date: Fri, 7 Apr 2023 12:13:51 +0300 Subject: [PATCH 23/76] Do not save init image by default --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 69c2b21e6..c5a1b5ad2 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -254,7 +254,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "use_upscaler_name_as_suffix": OptionInfo(False, "Use upscaler name as filename suffix in the extras tab"), "save_selected_only": OptionInfo(True, "When using 'Save' button, only save a single selected image"), "do_not_add_watermark": OptionInfo(False, "Do not add watermark to images"), - "save_init_img": OptionInfo(True, "Save init images when using img2img"), + "save_init_img": OptionInfo(False, "Save init images when using img2img"), "temp_dir": OptionInfo("", "Directory for temporary images; leave empty for default"), "clean_temp_dir_at_start": OptionInfo(False, "Cleanup non-default temporary directory when starting webui"), From 27b9ec60e4ede748ec23615fecddb70e48daa623 Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Sat, 8 Apr 2023 15:58:00 -0400 Subject: [PATCH 24/76] sort embeddings by name (case insensitive) --- modules/textual_inversion/textual_inversion.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d2e62e589..7c50839f2 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple +from collections import namedtuple, OrderedDict import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = {} + self.word_embeddings = OrderedDict() self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -233,6 +233,9 @@ class EmbeddingDatabase: self.load_from_dir(embdir) embdir.update() + # re-sort word_embeddings because load_from_dir may not load in alphabetic order. + self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: self.previously_displayed_embeddings = displayed_embeddings From 1aba8d82cbb816a755d012c5c729d8bafeb1b8ed Mon Sep 17 00:00:00 2001 From: yike5460 Date: Sun, 9 Apr 2023 22:22:43 +0800 Subject: [PATCH 25/76] feat: add branch support for extension installation --- modules/ui_extensions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index efd6cda28..d9487f834 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -129,7 +129,7 @@ def normalize_git_url(url): return url -def install_extension_from_url(dirname, url): +def install_extension_from_url(dirname, branch_name, url): check_access() assert url, 'No URL specified' @@ -150,7 +150,7 @@ def install_extension_from_url(dirname, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir) as repo: + with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: repo.remote().fetch() for submodule in repo.submodules: submodule.update() @@ -376,13 +376,14 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") + install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") install_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_url, extra_outputs=[gr.update()]), - inputs=[install_dirname, install_url], + inputs=[install_dirname, install_branch, install_url], outputs=[extensions_table, install_result], ) From c19618f37059b425b1e53429ad8def2caa78cdec Mon Sep 17 00:00:00 2001 From: Ilya Khadykin Date: Sun, 9 Apr 2023 21:33:09 +0200 Subject: [PATCH 26/76] fix(extras): fix batch image processing on 'Extras\Batch Process' tab This change fixes an issue where an incorrect type was passed to the PIL.Image.open() function that caused the whole process to fail. Scope of this change is limited to only batch image processing, and it shouldn't affect other functionality. --- modules/postprocessing.py | 6 ++++-- modules/ui_postprocessing.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 09d8e6056..c27ad8db3 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -1,4 +1,6 @@ import os +import tempfile +from typing import List from PIL import Image @@ -6,7 +8,7 @@ from modules import shared, images, devices, scripts, scripts_postprocessing, ui from modules.shared import opts -def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, show_extras_results, *args, save_output: bool = True): +def run_postprocessing(extras_mode, image, image_folder: List[tempfile.NamedTemporaryFile], input_dir, output_dir, show_extras_results, *args, save_output: bool = True): devices.torch_gc() shared.state.begin() @@ -18,7 +20,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: - image = Image.open(img) + image = Image.open(os.path.abspath(img.name)) image_data.append(image) image_names.append(os.path.splitext(img.orig_name)[0]) elif extras_mode == 2: diff --git a/modules/ui_postprocessing.py b/modules/ui_postprocessing.py index b418d9553..d278e1b60 100644 --- a/modules/ui_postprocessing.py +++ b/modules/ui_postprocessing.py @@ -13,7 +13,7 @@ def create_ui(): extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab") as tab_batch: - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") + image_batch = gr.Files(label="Batch Process", interactive=True, elem_id="extras_image_batch") with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab") as tab_batch_dir: extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") From c84118d70d7c3dd2f741f9e89b9a8a4643f27f98 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 4 Apr 2023 23:42:39 +0800 Subject: [PATCH 27/76] feat(xyz): try sort Checkpoint name values --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 3895a795c..8aaee2441 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -211,7 +211,7 @@ axis_options = [ AxisOption("Prompt order", str_permutations, apply_order, format_value=format_value_join_list), AxisOptionTxt2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]), AxisOptionImg2Img("Sampler", str, apply_sampler, format_value=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers_for_img2img]), - AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: list(sd_models.checkpoints_list)), + AxisOption("Checkpoint name", str, apply_checkpoint, format_value=format_value, confirm=confirm_checkpoints, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list, key=str.casefold)), AxisOption("Sigma Churn", float, apply_field("s_churn")), AxisOption("Sigma min", float, apply_field("s_tmin")), AxisOption("Sigma max", float, apply_field("s_tmax")), From 7c62bb2788d9cec10bab9d0154bd24f3658f7a83 Mon Sep 17 00:00:00 2001 From: yike5460 Date: Mon, 10 Apr 2023 09:38:26 +0800 Subject: [PATCH 28/76] fix: support for default branch --- modules/ui_extensions.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d9487f834..b402bc8bf 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -150,10 +150,17 @@ def install_extension_from_url(dirname, branch_name, url): try: shutil.rmtree(tmpdir, True) - with git.Repo.clone_from(url, tmpdir, branch=branch_name if branch_name else '') as repo: - repo.remote().fetch() - for submodule in repo.submodules: - submodule.update() + if branch_name == '': + # if no branch is specified, use the default branch + with git.Repo.clone_from(url, tmpdir) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() + else: + with git.Repo.clone_from(url, tmpdir, branch=branch_name) as repo: + repo.remote().fetch() + for submodule in repo.submodules: + submodule.update() try: os.rename(tmpdir, target_dir) except OSError as err: @@ -376,7 +383,7 @@ def create_ui(): with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") - install_branch = gr.Text(label="Branch name for extension's git repository", placeholder="Leave empty for default branch") + install_branch = gr.Text(label="Specific branch name", placeholder="Leave empty for default main branch") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") install_button = gr.Button(value="Install", variant="primary") install_result = gr.HTML(elem_id="extension_install_result") From 9edd4b6e516ec327e15cc00a3933c681fc4b2f75 Mon Sep 17 00:00:00 2001 From: DGdev91 Date: Tue, 11 Apr 2023 11:22:28 +0200 Subject: [PATCH 29/76] Using --index-url instead of --extra-index-url following new PyTorch install command --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index 7d9a8538a..3b92e184f 100755 --- a/webui.sh +++ b/webui.sh @@ -118,7 +118,7 @@ case "$gpu_info" in esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then - export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --extra-index-url https://download.pytorch.org/whl/rocm5.2" + export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2" fi for preq in "${GIT}" "${python_cmd}" From 8af4b3bbe46dcb7f701d3650b6e4d31d6dd268a7 Mon Sep 17 00:00:00 2001 From: gk Date: Thu, 13 Apr 2023 08:46:59 +0900 Subject: [PATCH 30/76] Try using TCMalloc on Linux by default --- README.md | 1 + webui-user.sh | 3 +++ webui.sh | 21 ++++++++++++++++++--- 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index b67e2296a..20f74531c 100644 --- a/README.md +++ b/README.md @@ -120,6 +120,7 @@ sudo pacman -S wget git python3 bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` 3. Run `webui.sh`. +4. Check `webui-user.sh` for options. ### Installation on Apple Silicon Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). diff --git a/webui-user.sh b/webui-user.sh index bfa53cb7c..49a426ff9 100644 --- a/webui-user.sh +++ b/webui-user.sh @@ -43,4 +43,7 @@ # Uncomment to enable accelerated launch #export ACCELERATE="True" +# Uncomment to disable TCMalloc +#export NO_TCMALLOC="True" + ########################################### diff --git a/webui.sh b/webui.sh index 8cdad22d3..1122b9650 100755 --- a/webui.sh +++ b/webui.sh @@ -113,13 +113,13 @@ case "$gpu_info" in printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half" printf "\n%s\n" "${delimiter}" ;; - *) + *) ;; esac if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2" -fi +fi for preq in "${GIT}" "${python_cmd}" do @@ -172,15 +172,30 @@ else exit 1 fi +# Try using TCMalloc on Linux +prepare_tcmalloc() { + if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then + TCMALLOC="$(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1)" + if [[ ! -z "${TCMALLOC}" ]]; then + echo "Using TCMalloc: ${TCMALLOC}" + export LD_PRELOAD="${TCMALLOC}" + else + printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n" + fi + fi +} + if [[ ! -z "${ACCELERATE}" ]] && [ ${ACCELERATE}="True" ] && [ -x "$(command -v accelerate)" ] then printf "\n%s\n" "${delimiter}" printf "Accelerating launch.py..." printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" else printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." - printf "\n%s\n" "${delimiter}" + printf "\n%s\n" "${delimiter}" + prepare_tcmalloc exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" fi From 7fb72edaffd3d4f2336d2478a424fc455f2376a6 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Thu, 13 Apr 2023 06:47:48 -0400 Subject: [PATCH 31/76] change index url --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 37c8b5164..4256ef256 100644 --- a/launch.py +++ b/launch.py @@ -225,7 +225,7 @@ def run_extensions_installers(settings_file): def prepare_environment(): global skip_install - torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cu118") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17') From fcc194afad8acd68c3fe3fd43e0bd3bac0371199 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Thu, 13 Apr 2023 22:42:20 +0300 Subject: [PATCH 32/76] prompt-bracket-checker: Simplify + improve error reporting --- .../javascript/prompt-bracket-checker.js | 119 +++++------------- 1 file changed, 29 insertions(+), 90 deletions(-) diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index f0918e260..5c7a836a2 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -1,103 +1,42 @@ // Stable Diffusion WebUI - Bracket checker -// Version 1.0 -// By Hingashi no Florin/Bwin4L +// By Hingashi no Florin/Bwin4L & @akx // Counts open and closed brackets (round, square, curly) in the prompt and negative prompt text boxes in the txt2img and img2img tabs. // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. -function checkBrackets(evt, textArea, counterElt) { - errorStringParen = '(...) - Different number of opening and closing parentheses detected.\n'; - errorStringSquare = '[...] - Different number of opening and closing square brackets detected.\n'; - errorStringCurly = '{...} - Different number of opening and closing curly brackets detected.\n'; +function checkBrackets(textArea, counterElt) { + var counts = {}; + (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; - openBracketRegExp = /\(/g; - closeBracketRegExp = /\)/g; - - openSquareBracketRegExp = /\[/g; - closeSquareBracketRegExp = /\]/g; - - openCurlyBracketRegExp = /\{/g; - closeCurlyBracketRegExp = /\}/g; - - totalOpenBracketMatches = 0; - totalCloseBracketMatches = 0; - totalOpenSquareBracketMatches = 0; - totalCloseSquareBracketMatches = 0; - totalOpenCurlyBracketMatches = 0; - totalCloseCurlyBracketMatches = 0; - - openBracketMatches = textArea.value.match(openBracketRegExp); - if(openBracketMatches) { - totalOpenBracketMatches = openBracketMatches.length; - } - - closeBracketMatches = textArea.value.match(closeBracketRegExp); - if(closeBracketMatches) { - totalCloseBracketMatches = closeBracketMatches.length; - } - - openSquareBracketMatches = textArea.value.match(openSquareBracketRegExp); - if(openSquareBracketMatches) { - totalOpenSquareBracketMatches = openSquareBracketMatches.length; - } - - closeSquareBracketMatches = textArea.value.match(closeSquareBracketRegExp); - if(closeSquareBracketMatches) { - totalCloseSquareBracketMatches = closeSquareBracketMatches.length; - } - - openCurlyBracketMatches = textArea.value.match(openCurlyBracketRegExp); - if(openCurlyBracketMatches) { - totalOpenCurlyBracketMatches = openCurlyBracketMatches.length; - } - - closeCurlyBracketMatches = textArea.value.match(closeCurlyBracketRegExp); - if(closeCurlyBracketMatches) { - totalCloseCurlyBracketMatches = closeCurlyBracketMatches.length; - } - - if(totalOpenBracketMatches != totalCloseBracketMatches) { - if(!counterElt.title.includes(errorStringParen)) { - counterElt.title += errorStringParen; + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); } - } else { - counterElt.title = counterElt.title.replace(errorStringParen, ''); } - if(totalOpenSquareBracketMatches != totalCloseSquareBracketMatches) { - if(!counterElt.title.includes(errorStringSquare)) { - counterElt.title += errorStringSquare; - } - } else { - counterElt.title = counterElt.title.replace(errorStringSquare, ''); - } + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); +} - if(totalOpenCurlyBracketMatches != totalCloseCurlyBracketMatches) { - if(!counterElt.title.includes(errorStringCurly)) { - counterElt.title += errorStringCurly; - } - } else { - counterElt.title = counterElt.title.replace(errorStringCurly, ''); - } +function setupBracketChecking(id_prompt, id_counter) { + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter) - if(counterElt.title != '') { - counterElt.classList.add('error'); - } else { - counterElt.classList.remove('error'); + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); } } -function setupBracketChecking(id_prompt, id_counter){ - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) - - textarea.addEventListener("input", function(evt){ - checkBrackets(evt, textarea, counter) - }); -} - -onUiLoaded(function(){ - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter') - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter') - setupBracketChecking('img2img_prompt', 'img2img_token_counter') - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter') -}) \ No newline at end of file +onUiLoaded(function () { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +}); From dab5002c59ce1f68deae5e6e0c03e5e2c27155db Mon Sep 17 00:00:00 2001 From: Brad Smith Date: Thu, 13 Apr 2023 23:12:33 -0400 Subject: [PATCH 33/76] sort self.word_embeddings without instantiating it a new dict --- modules/textual_inversion/textual_inversion.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7c50839f2..379df2430 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -2,7 +2,7 @@ import os import sys import traceback import inspect -from collections import namedtuple, OrderedDict +from collections import namedtuple import torch import tqdm @@ -108,7 +108,7 @@ class DirWithTextualInversionEmbeddings: class EmbeddingDatabase: def __init__(self): self.ids_lookup = {} - self.word_embeddings = OrderedDict() + self.word_embeddings = {} self.skipped_embeddings = {} self.expected_shape = -1 self.embedding_dirs = {} @@ -234,7 +234,10 @@ class EmbeddingDatabase: embdir.update() # re-sort word_embeddings because load_from_dir may not load in alphabetic order. - self.word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + # using a temporary copy so we don't reinitialize self.word_embeddings in case other objects have a reference to it. + sorted_word_embeddings = {e.name: e for e in sorted(self.word_embeddings.values(), key=lambda e: e.name.lower())} + self.word_embeddings.clear() + self.word_embeddings.update(sorted_word_embeddings) displayed_embeddings = (tuple(self.word_embeddings.keys()), tuple(self.skipped_embeddings.keys())) if self.previously_displayed_embeddings != displayed_embeddings: From 3af152d488db0c521f6058676e1a65c7157ccc14 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:17:14 -0400 Subject: [PATCH 34/76] Fix image mask composite for weird resolutions --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6d9c6a8de..f49992d90 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -708,7 +708,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: image_mask = p.mask_for_overlay.convert('RGB') - image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), p.mask_for_overlay.convert('L')).convert('RGBA') + image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') if opts.save_mask: images.save_image(image_mask, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask") From fbab3fc6d122fb4e6648dd82291a70fc348c0b4a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Fri, 14 Apr 2023 17:24:55 -0400 Subject: [PATCH 35/76] Only handle image mask if any option enabled --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index f49992d90..5c6edc60b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -706,7 +706,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: image.info["parameters"] = text output_images.append(image) - if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay: + if hasattr(p, 'mask_for_overlay') and p.mask_for_overlay and any([opts.save_mask, opts.save_mask_composite, opts.return_mask, opts.return_mask_composite]): image_mask = p.mask_for_overlay.convert('RGB') image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA') From 02e351880796422eac3bbaf7aa86332b588651ce Mon Sep 17 00:00:00 2001 From: tqwuliao Date: Sat, 15 Apr 2023 23:20:08 +0800 Subject: [PATCH 36/76] Add new FilenameGenerator [hasprompt..] --- javascript/hints.js | 4 ++-- modules/images.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 7f4101b23..730ce7bd4 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -67,8 +67,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime