From 2b717bb195a3034853ed45a52c5752f010e1302b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 23 Apr 2024 02:35:25 +0900 Subject: [PATCH 01/89] fix initial corrupt model loop if for some reason the initial loading model at loading phase of webui is corrupted after entering this state the user will not be able to load even a good model is selected, due the the unload_model_weights > send_model_to_cpu > m.lowvram attribute check will fail becaules m is None webui will be stuck in the loop unable to recover without manual intervention --- modules/sd_models.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..1747ca621 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -659,10 +659,11 @@ def get_empty_cond(sd_model): def send_model_to_cpu(m): - if m.lowvram: - lowvram.send_everything_to_cpu() - else: - m.to(devices.cpu) + if m is not None: + if m.lowvram: + lowvram.send_everything_to_cpu() + else: + m.to(devices.cpu) devices.torch_gc() From 4bc39d234d6535e3d8f8531d0c0f4e049261c922 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 23 Apr 2024 02:39:45 +0900 Subject: [PATCH 02/89] Show LoRA if model is None --- .../Lora/ui_extra_networks_lora.py | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index b627f7dc2..e35d90c6e 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -60,18 +60,19 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): else: sd_version = lora_on_disk.sd_version - if shared.opts.lora_show_all or not enable_filter: - pass - elif sd_version == network.SdVersion.Unknown: - model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 - if model_version.name in shared.opts.lora_hide_unknown_for_versions: + if shared.sd_model is not None: # still show LoRA in case an error occurs during initial model loading + if shared.opts.lora_show_all or not enable_filter: + pass + elif sd_version == network.SdVersion.Unknown: + model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 + if model_version.name in shared.opts.lora_hide_unknown_for_versions: + return None + elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: + return None + elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: + return None + elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: return None - elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: - return None - elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: - return None - elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: - return None return item From 246c269af87757998f57bb27ddda59fdc7cff976 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:08:09 +0900 Subject: [PATCH 03/89] add option to check file hash after download if the sha256 hash does not match it will be automatically deleted --- modules/modelloader.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/modelloader.py b/modules/modelloader.py index 115415c8e..5421e59b0 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -23,6 +23,7 @@ def load_file_from_url( model_dir: str, progress: bool = True, file_name: str | None = None, + hash_prefix: str | None = None, ) -> str: """Download a file from `url` into `model_dir`, using the file present if possible. @@ -36,11 +37,11 @@ def load_file_from_url( if not os.path.exists(cached_file): print(f'Downloading: "{url}" to {cached_file}\n') from torch.hub import download_url_to_file - download_url_to_file(url, cached_file, progress=progress) + download_url_to_file(url, cached_file, progress=progress, hash_prefix=hash_prefix) return cached_file -def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list: +def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None, hash_prefix=None) -> list: """ A one-and done loader to try finding the desired models in specified directories. @@ -49,6 +50,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None @param model_path: The location to store/find models in. @param command_path: A command-line argument to search for models in first. @param ext_filter: An optional list of filename extensions to filter by + @param hash_prefix: the expected sha256 of the model_url @return: A list of paths containing the desired model(s) """ output = [] @@ -78,7 +80,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None if model_url is not None and len(output) == 0: if download_name is not None: - output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name)) + output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name, hash_prefix=hash_prefix)) else: output.append(model_url) From c69773d7e8f23f8b6c46a8e177b50386e1f1b8e8 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:08:57 +0900 Subject: [PATCH 04/89] ensure integrity for initial sd model download --- modules/sd_models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..35d5952af 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -149,10 +149,12 @@ def list_models(): cmd_ckpt = shared.cmd_opts.ckpt if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt): model_url = None + expected_sha256 = None else: model_url = f"{shared.hf_endpoint}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + expected_sha256 = '6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa' - model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"]) + model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"], hash_prefix=expected_sha256) if os.path.exists(cmd_ckpt): checkpoint_info = CheckpointInfo(cmd_ckpt) From a1aa0af8a45f4c30f1d3fce5635c090d64d4e55b Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:38:44 -0400 Subject: [PATCH 05/89] add code for skipping CFG on early steps --- modules/sd_samplers_cfg_denoiser.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 93581c9ac..8ccc837aa 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -212,6 +212,11 @@ class CFGDenoiser(torch.nn.Module): uncond = denoiser_params.text_uncond skip_uncond = False + if self.step < shared.opts.skip_cond_steps: + skip_uncond = True + x_in = x_in[:-batch_size] + sigma_in = sigma_in[:-batch_size] + # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: skip_uncond = True From 8016d78a4b9c8bdd02b0031694ad56553f89161e Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 22 Apr 2024 23:42:24 -0400 Subject: [PATCH 06/89] add option for early cfg skip --- modules/shared_options.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 326a317e0..2f70ef65a 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -380,7 +380,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'), 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'), - 'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models") + 'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models"), + 'skip_cond_steps': OptionInfo(0, "Skip CFG on first N steps of sampling", gr.Slider, {"minimum": 0, "maximum": 50, "step": 1}, infotext="Skip CFG first steps"), })) options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), { From 83266205d0b55ddbff34ea36b47f69c5ea11cc28 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 23 Apr 2024 00:09:43 -0400 Subject: [PATCH 07/89] Add KL Optimal scheduler --- modules/sd_schedulers.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 75eb3ac03..10ae4e081 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -31,6 +31,15 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): return torch.FloatTensor(sigs).to(device) +def kl_optimal(n, sigma_min, sigma_max, device): + alpha_min = torch.arctan(torch.tensor(sigma_min, device=device)) + alpha_max = torch.arctan(torch.tensor(sigma_max, device=device)) + sigmas = torch.empty((n+1,), device=device) + for i in range(n+1): + sigmas[i] = torch.tan((i/n) * alpha_min + (1.0-i/n) * alpha_max) + return sigmas + + schedulers = [ Scheduler('automatic', 'Automatic', None), Scheduler('uniform', 'Uniform', uniform, need_inner_model=True), @@ -38,6 +47,7 @@ schedulers = [ Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential), Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0), Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]), + Scheduler('kl_optimal', 'KL Optimal', kl_optimal), ] schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}} From 83182d2799f12ee2b5e5425d750db062ad67eb90 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:07:25 -0400 Subject: [PATCH 08/89] change skip early cond option name and to float --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 2f70ef65a..91ba72b5e 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -381,7 +381,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'), 'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models"), - 'skip_cond_steps': OptionInfo(0, "Skip CFG on first N steps of sampling", gr.Slider, {"minimum": 0, "maximum": 50, "step": 1}, infotext="Skip CFG first steps"), + 'skip_early_cond': OptionInfo(0, "Skip CFG during early sampling", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext="Skip Early CFG").info("CFG will be disabled (set to 1) on early steps, can both improve sample diversity/quality and speed up sampling"), })) options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), { From 6e9b69a33853e1bcee81cea6f01cf13de612fef7 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:08:28 -0400 Subject: [PATCH 09/89] change skip_early_cond code to use float --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 8ccc837aa..fba5c48c0 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -212,7 +212,7 @@ class CFGDenoiser(torch.nn.Module): uncond = denoiser_params.text_uncond skip_uncond = False - if self.step < shared.opts.skip_cond_steps: + if self.step / self.total_steps <= shared.opts.skip_early_cond: skip_uncond = True x_in = x_in[:-batch_size] sigma_in = sigma_in[:-batch_size] From 33cbbf9f8b46666a2325c98b723b6cb2ec192ef7 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:15:00 -0400 Subject: [PATCH 10/89] add s_min_uncond_all option --- modules/shared_options.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared_options.py b/modules/shared_options.py index 91ba72b5e..c711fa5f6 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -210,6 +210,7 @@ options_templates.update(options_section(('img2img', "img2img", "sd"), { options_templates.update(options_section(('optimizations', "Optimizations", "sd"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), + "s_min_uncond_all": OptionInfo(False, "NGMS: Skip every step").info("makes Negative Guidance minimum sigma skip negative guidance on every step instead of only half"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), From 029adbe5318b57c04dbc0d92273cce38e1ecf457 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 23 Apr 2024 03:15:56 -0400 Subject: [PATCH 11/89] implement option to skip uncond on all steps below ngms --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index fba5c48c0..082a4f63c 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -218,7 +218,7 @@ class CFGDenoiser(torch.nn.Module): sigma_in = sigma_in[:-batch_size] # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + if (self.step % 2 or shared.opts.s_min_uncond_all) and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: skip_uncond = True x_in = x_in[:-batch_size] sigma_in = sigma_in[:-batch_size] From 50bb6e1179745799038b26a228b8acd8cacfffc5 Mon Sep 17 00:00:00 2001 From: pinanew <851673+pinanew@users.noreply.github.com> Date: Tue, 23 Apr 2024 18:45:42 +0300 Subject: [PATCH 12/89] AVIF has quality setting too --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c0ff8a630..f4eb6f71a 100644 --- a/modules/images.py +++ b/modules/images.py @@ -608,7 +608,7 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p }) - image.save(filename,format=image_format, exif=exif_bytes) + image.save(filename,format=image_format, quality=opts.jpeg_quality, exif=exif_bytes) elif extension.lower() == ".gif": image.save(filename, format=image_format, comment=geninfo) else: From 8fa3fa76c39200e2af63ab86926c0c20cf02eb25 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 24 Apr 2024 02:41:31 +0900 Subject: [PATCH 13/89] fix exif_bytes referenced before assignment --- modules/images.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index f4eb6f71a..36b610322 100644 --- a/modules/images.py +++ b/modules/images.py @@ -606,7 +606,8 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(geninfo or "", encoding="unicode") }, }) - + else: + exif_bytes = None image.save(filename,format=image_format, quality=opts.jpeg_quality, exif=exif_bytes) elif extension.lower() == ".gif": From 1091e3a37eb363d6ac5f4d3eb596526a85dea551 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 24 Apr 2024 02:54:26 +0900 Subject: [PATCH 14/89] update jpeg_quality description --- modules/shared_options.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 326a317e0..98d477f52 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -54,7 +54,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids" "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "save_mask": OptionInfo(False, "For inpainting, save a copy of the greyscale mask"), "save_mask_composite": OptionInfo(False, "For inpainting, save a masked composite"), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), + "jpeg_quality": OptionInfo(80, "Quality for saved jpeg and avif images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), "webp_lossless": OptionInfo(False, "Use lossless compression for webp images"), "export_for_4chan": OptionInfo(True, "Save copy of large images as JPG").info("if the file size is above the limit, or either width or height are above the limit"), "img_downscale_threshold": OptionInfo(4.0, "File size limit for the above option, MB", gr.Number), From e85e327ae0409a6c7e6f98011465f07290b78567 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 25 Apr 2024 13:26:26 +0400 Subject: [PATCH 15/89] more extension tag filtering options --- modules/ui_extensions.py | 68 +++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index d822c0b89..9bfd5f3b3 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -396,15 +396,15 @@ def install_extension_from_url(dirname, url, branch_name=None): shutil.rmtree(tmpdir, True) -def install_extension_from_index(url, hide_tags, sort_column, filter_text): +def install_extension_from_index(url, selected_tags, showing_type, filtering_type, sort_column, filter_text): ext_table, message = install_extension_from_url(None, url) - code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text) + code, _ = refresh_available_extensions_from_data(selected_tags, showing_type, filtering_type, sort_column, filter_text) return code, ext_table, message, '' -def refresh_available_extensions(url, hide_tags, sort_column): +def refresh_available_extensions(url, selected_tags, showing_type, filtering_type, sort_column): global available_extensions import urllib.request @@ -413,19 +413,19 @@ def refresh_available_extensions(url, hide_tags, sort_column): available_extensions = json.loads(text) - code, tags = refresh_available_extensions_from_data(hide_tags, sort_column) + code, tags = refresh_available_extensions_from_data(selected_tags, showing_type, filtering_type, sort_column) return url, code, gr.CheckboxGroup.update(choices=tags), '', '' -def refresh_available_extensions_for_tags(hide_tags, sort_column, filter_text): - code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text) +def refresh_available_extensions_for_tags(selected_tags, showing_type, filtering_type, sort_column, filter_text): + code, _ = refresh_available_extensions_from_data(selected_tags, showing_type, filtering_type, sort_column, filter_text) return code, '' -def search_extensions(filter_text, hide_tags, sort_column): - code, _ = refresh_available_extensions_from_data(hide_tags, sort_column, filter_text) +def search_extensions(filter_text, selected_tags, showing_type, filtering_type, sort_column): + code, _ = refresh_available_extensions_from_data(selected_tags, showing_type, filtering_type, sort_column, filter_text) return code, '' @@ -450,13 +450,13 @@ def get_date(info: dict, key): return '' -def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=""): +def refresh_available_extensions_from_data(selected_tags, showing_type, filtering_type, sort_column, filter_text=""): extlist = available_extensions["extensions"] installed_extensions = {extension.name for extension in extensions.extensions} installed_extension_urls = {normalize_git_url(extension.remote) for extension in extensions.extensions if extension.remote is not None} tags = available_extensions.get("tags", {}) - tags_to_hide = set(hide_tags) + selected_tags = set(selected_tags) hidden = 0 code = f""" @@ -489,9 +489,19 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text=" existing = get_extension_dirname_from_url(url) in installed_extensions or normalize_git_url(url) in installed_extension_urls extension_tags = extension_tags + ["installed"] if existing else extension_tags - if any(x for x in extension_tags if x in tags_to_hide): - hidden += 1 - continue + if len(selected_tags) > 0: + matched_tags = [x for x in extension_tags if x in selected_tags] + if filtering_type == 'or': + need_hide = len(matched_tags) > 0 + else: + need_hide = len(matched_tags) == len(selected_tags) + + if showing_type == 'show': + need_hide = not need_hide + + if need_hide: + hidden += 1 + continue if filter_text and filter_text.strip(): if filter_text.lower() not in html.escape(name).lower() and filter_text.lower() not in html.escape(description).lower(): @@ -594,9 +604,13 @@ def create_ui(): install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) with gr.Row(): - hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) + selected_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Extension tags", choices=["script", "ads", "localization", "installed"]) sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index") + with gr.Row(): + showing_type = gr.Radio(value="hide", label="Showing type", choices=["hide", "show"]) + filtering_type = gr.Radio(value="or", label="Filtering type", choices=["or", "and"]) + with gr.Row(): search_extensions_text = gr.Text(label="Search", container=False) @@ -605,31 +619,43 @@ def create_ui(): refresh_available_extensions_button.click( fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update(), gr.update()]), - inputs=[available_extensions_index, hide_tags, sort_column], - outputs=[available_extensions_index, available_extensions_table, hide_tags, search_extensions_text, install_result], + inputs=[available_extensions_index, selected_tags, showing_type, filtering_type, sort_column], + outputs=[available_extensions_index, available_extensions_table, selected_tags, search_extensions_text, install_result], ) install_extension_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]), - inputs=[extension_to_install, hide_tags, sort_column, search_extensions_text], + inputs=[extension_to_install, selected_tags, showing_type, filtering_type, sort_column, search_extensions_text], outputs=[available_extensions_table, extensions_table, install_result], ) search_extensions_text.change( fn=modules.ui.wrap_gradio_call(search_extensions, extra_outputs=[gr.update()]), - inputs=[search_extensions_text, hide_tags, sort_column], + inputs=[search_extensions_text, selected_tags, showing_type, filtering_type, sort_column], outputs=[available_extensions_table, install_result], ) - hide_tags.change( + selected_tags.change( fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), - inputs=[hide_tags, sort_column, search_extensions_text], + inputs=[selected_tags, showing_type, filtering_type, sort_column, search_extensions_text], + outputs=[available_extensions_table, install_result] + ) + + showing_type.change( + fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), + inputs=[selected_tags, showing_type, filtering_type, sort_column, search_extensions_text], + outputs=[available_extensions_table, install_result] + ) + + filtering_type.change( + fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), + inputs=[selected_tags, showing_type, filtering_type, sort_column, search_extensions_text], outputs=[available_extensions_table, install_result] ) sort_column.change( fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), - inputs=[hide_tags, sort_column, search_extensions_text], + inputs=[selected_tags, showing_type, filtering_type, sort_column, search_extensions_text], outputs=[available_extensions_table, install_result] ) From d5f6fdb3c44204495067d4166a6a980a9f1165ed Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 26 Apr 2024 18:47:04 +0900 Subject: [PATCH 16/89] compact-checkbox-group --- modules/ui_extensions.py | 8 ++++---- style.css | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 9bfd5f3b3..6b6403f23 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -604,12 +604,12 @@ def create_ui(): install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) with gr.Row(): - selected_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Extension tags", choices=["script", "ads", "localization", "installed"]) - sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index") + selected_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Extension tags", choices=["script", "ads", "localization", "installed"], elem_classes=['compact-checkbox-group']) + sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order",'update time', 'create time', "stars"], type="index", elem_classes=['compact-checkbox-group']) with gr.Row(): - showing_type = gr.Radio(value="hide", label="Showing type", choices=["hide", "show"]) - filtering_type = gr.Radio(value="or", label="Filtering type", choices=["or", "and"]) + showing_type = gr.Radio(value="hide", label="Showing type", choices=["hide", "show"], elem_classes=['compact-checkbox-group']) + filtering_type = gr.Radio(value="or", label="Filtering type", choices=["or", "and"], elem_classes=['compact-checkbox-group']) with gr.Row(): search_extensions_text = gr.Text(label="Search", container=False) diff --git a/style.css b/style.css index f6a89b8f9..cca5456cc 100644 --- a/style.css +++ b/style.css @@ -854,6 +854,10 @@ table.popup-table .link{ display: inline-block; } +.compact-checkbox-group div label { + padding: 0.1em 0.3em !important; +} + /* extensions tab table row hover highlight */ #extensions tr:hover td, From 3902aa222b00a24f2d7b7158b79efaac9f318923 Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 06:44:41 -0400 Subject: [PATCH 17/89] Better error handling to skip non-standard ss_tag_frequency content --- extensions-builtin/Lora/ui_edit_user_metadata.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/extensions-builtin/Lora/ui_edit_user_metadata.py b/extensions-builtin/Lora/ui_edit_user_metadata.py index 7a07a544e..b6c4d1c6a 100644 --- a/extensions-builtin/Lora/ui_edit_user_metadata.py +++ b/extensions-builtin/Lora/ui_edit_user_metadata.py @@ -21,10 +21,12 @@ re_comma = re.compile(r" *, *") def build_tags(metadata): tags = {} - for _, tags_dict in metadata.get("ss_tag_frequency", {}).items(): - for tag, tag_count in tags_dict.items(): - tag = tag.strip() - tags[tag] = tags.get(tag, 0) + int(tag_count) + ss_tag_frequency = metadata.get("ss_tag_frequency", {}) + if ss_tag_frequency is not None and hasattr(ss_tag_frequency, 'items'): + for _, tags_dict in ss_tag_frequency.items(): + for tag, tag_count in tags_dict.items(): + tag = tag.strip() + tags[tag] = tags.get(tag, 0) + int(tag_count) if tags and is_non_comma_tagset(tags): new_tags = {} From 8dc920228e7c5181cc990845f0febd2ac4b42d87 Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 06:52:21 -0400 Subject: [PATCH 18/89] Better error handling when unable to read metadata from safetensors file --- modules/sd_models.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..59742d311 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -280,18 +280,22 @@ def read_metadata_from_safetensors(filename): json_start = file.read(2) assert metadata_len > 2 and json_start in (b'{"', b"{'"), f"{filename} is not a safetensors file" - json_data = json_start + file.read(metadata_len-2) - json_obj = json.loads(json_data) res = {} - for k, v in json_obj.get("__metadata__", {}).items(): - res[k] = v - if isinstance(v, str) and v[0:1] == '{': - try: - res[k] = json.loads(v) - except Exception: - pass + try: + json_data = json_start + file.read(metadata_len-2) + json_obj = json.loads(json_data) + for k, v in json_obj.get("__metadata__", {}).items(): + res[k] = v + if isinstance(v, str) and v[0:1] == '{': + try: + res[k] = json.loads(v) + except Exception: + pass + except: + errors.report(f"Error reading metadata from file: {filename}", exc_info=True) + return res From c5b7559856c5f64792c2425d11890a121497e6bc Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 06:57:32 -0400 Subject: [PATCH 19/89] Better error handling when unable to extract contents of embedding/TI file --- modules/textual_inversion/textual_inversion.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 253f219c4..dc7833e93 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -181,12 +181,16 @@ class EmbeddingDatabase: else: return - embedding = create_embedding_from_data(data, name, filename=filename, filepath=path) + if data is not None: + embedding = create_embedding_from_data(data, name, filename=filename, filepath=path) - if self.expected_shape == -1 or self.expected_shape == embedding.shape: - self.register_embedding(embedding, shared.sd_model) + if self.expected_shape == -1 or self.expected_shape == embedding.shape: + self.register_embedding(embedding, shared.sd_model) + else: + self.skipped_embeddings[name] = embedding else: - self.skipped_embeddings[name] = embedding + print(f"Unable to load Textual inversion embedding due to data issue: '{name}'.") + def load_from_dir(self, embdir): if not os.path.isdir(embdir.path): From c5ae2254182b803618a4b01c12fa88c42642e806 Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 07:55:39 -0400 Subject: [PATCH 20/89] Better handling of embeddings with two rare, but not unusual, files in them I have encountered pickled embeddings with a short byteorder file at the top-level, as well as a .data/serialization_id file. Both load fine after allowing these files in the dataset. I do not think it is likely adding them to the safe unpickle regular expression would be a security risk, but that's for the maintainers to decide. --- modules/safe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/safe.py b/modules/safe.py index b1d08a792..ee8789268 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -65,7 +65,7 @@ class RestrictedUnpickler(pickle.Unpickler): # Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/' -allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$") +allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|byteorder|(\.data\/serialization_id)|version|(data\.pkl))$") data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$") def check_zip_filenames(filename, names): From 44afb48447c2ef40f8546fe704bd817881da5a14 Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 08:17:37 -0400 Subject: [PATCH 21/89] Linter fix - extraneous whitespace --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 59742d311..06e881207 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -295,7 +295,7 @@ def read_metadata_from_safetensors(filename): pass except: errors.report(f"Error reading metadata from file: {filename}", exc_info=True) - + return res From 60c079995824ebe861029839ee12ca0df6a26e8d Mon Sep 17 00:00:00 2001 From: Brendan Hoar Date: Fri, 26 Apr 2024 08:21:12 -0400 Subject: [PATCH 22/89] Linter - except must not be bare. --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 06e881207..06a7cf3f0 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -293,7 +293,7 @@ def read_metadata_from_safetensors(filename): res[k] = json.loads(v) except Exception: pass - except: + except Exception: errors.report(f"Error reading metadata from file: {filename}", exc_info=True) return res From 9d964d3fc3285b3df877479081968ebf6dbccce4 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 27 Apr 2024 19:21:34 +0900 Subject: [PATCH 23/89] no-referrer --- modules/ui_gradio_extensions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_gradio_extensions.py b/modules/ui_gradio_extensions.py index f5278d22f..18fbd6777 100644 --- a/modules/ui_gradio_extensions.py +++ b/modules/ui_gradio_extensions.py @@ -50,7 +50,7 @@ def reload_javascript(): def template_response(*args, **kwargs): res = shared.GradioTemplateResponseOriginal(*args, **kwargs) - res.body = res.body.replace(b'', f'{js}'.encode("utf8")) + res.body = res.body.replace(b'', f'{js}'.encode("utf8")) res.body = res.body.replace(b'', f'{css}'.encode("utf8")) res.init_headers() return res From 3a215deff23d28c06c8de98423c12628b8ce6326 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sun, 28 Apr 2024 00:15:58 -0400 Subject: [PATCH 24/89] vectorize kl-optimal sigma calculation Co-authored-by: mamei16 --- modules/sd_schedulers.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 10ae4e081..99a6f7be2 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -34,9 +34,8 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): def kl_optimal(n, sigma_min, sigma_max, device): alpha_min = torch.arctan(torch.tensor(sigma_min, device=device)) alpha_max = torch.arctan(torch.tensor(sigma_max, device=device)) - sigmas = torch.empty((n+1,), device=device) - for i in range(n+1): - sigmas[i] = torch.tan((i/n) * alpha_min + (1.0-i/n) * alpha_max) + step_indices = torch.arange(n + 1, device=device) + sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max) return sigmas From 3d3fc81f4858cae75fa33e55e7b88ede853d28ae Mon Sep 17 00:00:00 2001 From: huchenlei Date: Sun, 28 Apr 2024 16:14:12 -0400 Subject: [PATCH 25/89] Add correct mimetype for .mjs files --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index 403425f29..c6c058fea 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -38,6 +38,7 @@ warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() mimetypes.add_type('application/javascript', '.js') +mimetypes.add_type('application/javascript', '.mjs') # Likewise, add explicit content-type header for certain missing image types mimetypes.add_type('image/webp', '.webp') From 579f1ef278080ff7545be3a42c5fe36fc2890887 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sun, 28 Apr 2024 22:36:43 -0600 Subject: [PATCH 26/89] Allow old sampler names in API --- modules/api/api.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index f468c3852..b1201fe77 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -48,6 +48,15 @@ def validate_sampler_name(name): return name +def parse_old_sampler_name(name): + for scheduler in sd_schedulers.schedulers: + for scheduler_name in [scheduler.label, scheduler.name, *(scheduler.aliases or [])]: + if name.endswith(" " + scheduler_name): + return name[0:-(len(scheduler_name) + 1)], scheduler_name + + return name, "Automatic" + + def setUpscalers(req: dict): reqDict = vars(req) reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None) @@ -438,15 +447,19 @@ class Api: self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner) + sampler, scheduler = parse_old_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index) populate = txt2imgreq.copy(update={ # Override __init__ params - "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), + "sampler_name": validate_sampler_name(sampler), "do_not_save_samples": not txt2imgreq.save_images, "do_not_save_grid": not txt2imgreq.save_images, }) if populate.sampler_name: populate.sampler_index = None # prevent a warning later on + if not populate.scheduler: + populate.scheduler = scheduler + args = vars(populate) args.pop('script_name', None) args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them @@ -502,9 +515,10 @@ class Api: self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner) + sampler, scheduler = parse_old_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index) populate = img2imgreq.copy(update={ # Override __init__ params - "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), + "sampler_name": validate_sampler_name(sampler), "do_not_save_samples": not img2imgreq.save_images, "do_not_save_grid": not img2imgreq.save_images, "mask": mask, @@ -512,6 +526,9 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on + if not populate.scheduler: + populate.scheduler = scheduler + args = vars(populate) args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. args.pop('script_name', None) From 4c7b22d37d14c8469b4510a11710f162940cdaa6 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sun, 28 Apr 2024 22:46:11 -0600 Subject: [PATCH 27/89] Fix dragging text within prompt input --- javascript/dragdrop.js | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 0c0183564..882562d73 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -56,6 +56,15 @@ function eventHasFiles(e) { return false; } +function isURL(url) { + try { + const _ = new URL(url); + return true; + } catch { + return false; + } +} + function dragDropTargetIsPrompt(target) { if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true; if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true; @@ -77,7 +86,7 @@ window.document.addEventListener('dragover', e => { window.document.addEventListener('drop', async e => { const target = e.composedPath()[0]; const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain'); - if (!eventHasFiles(e) && !url) return; + if (!eventHasFiles(e) && !isURL(url)) return; if (dragDropTargetIsPrompt(target)) { e.stopPropagation(); From c8336c45b98c2226923503e17b1d7f9170af0f8a Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Tue, 30 Apr 2024 01:53:41 -0600 Subject: [PATCH 28/89] Use existing function for old sampler names --- modules/api/api.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index b1201fe77..d8e54529b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -48,15 +48,6 @@ def validate_sampler_name(name): return name -def parse_old_sampler_name(name): - for scheduler in sd_schedulers.schedulers: - for scheduler_name in [scheduler.label, scheduler.name, *(scheduler.aliases or [])]: - if name.endswith(" " + scheduler_name): - return name[0:-(len(scheduler_name) + 1)], scheduler_name - - return name, "Automatic" - - def setUpscalers(req: dict): reqDict = vars(req) reqDict['extras_upscaler_1'] = reqDict.pop('upscaler_1', None) @@ -447,7 +438,7 @@ class Api: self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner) - sampler, scheduler = parse_old_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index) + sampler, scheduler = sd_samplers.get_sampler_and_scheduler(txt2imgreq.sampler_name or txt2imgreq.sampler_index, txt2imgreq.scheduler) populate = txt2imgreq.copy(update={ # Override __init__ params "sampler_name": validate_sampler_name(sampler), @@ -457,7 +448,7 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on - if not populate.scheduler: + if not populate.scheduler and scheduler != "Automatic": populate.scheduler = scheduler args = vars(populate) @@ -515,7 +506,7 @@ class Api: self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args) selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner) - sampler, scheduler = parse_old_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index) + sampler, scheduler = sd_samplers.get_sampler_and_scheduler(img2imgreq.sampler_name or img2imgreq.sampler_index, img2imgreq.scheduler) populate = img2imgreq.copy(update={ # Override __init__ params "sampler_name": validate_sampler_name(sampler), @@ -526,7 +517,7 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on - if not populate.scheduler: + if not populate.scheduler and scheduler != "Automatic": populate.scheduler = scheduler args = vars(populate) From 9d393807056199deade14154d885fcd07dee24b7 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 30 Apr 2024 19:17:53 +0900 Subject: [PATCH 29/89] fix extra batch mode P Transparency red, green, blue = transparency TypeError: cannot unpack non-iterable int object --- modules/postprocessing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/postprocessing.py b/modules/postprocessing.py index 812cbccae..8ec122b7c 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -62,11 +62,13 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, else: image_data = image_placeholder + image_data = image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB") + parameters, existing_pnginfo = images.read_info_from_image(image_data) if parameters: existing_pnginfo["parameters"] = parameters - initial_pp = scripts_postprocessing.PostprocessedImage(image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB")) + initial_pp = scripts_postprocessing.PostprocessedImage(image_data) scripts.scripts_postproc.run(initial_pp, args) From 89103b47475ba7bb8b9c4b36f8078c6416132ab0 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 1 May 2024 19:41:02 +0900 Subject: [PATCH 30/89] lora bundled TI infotext Co-Authored-By: Morgon Kanter <9632805+mx@users.noreply.github.com> --- extensions-builtin/Lora/networks.py | 9 +++++++++ extensions-builtin/Lora/scripts/lora_script.py | 1 + 2 files changed, 10 insertions(+) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 42b14dc23..aa55fe242 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -143,6 +143,14 @@ def assign_network_names_to_compvis_modules(sd_model): sd_model.network_layer_mapping = network_layer_mapping +class BundledTIHash(str): + def __init__(self, hash_str): + self.hash = hash_str + + def __str__(self): + return self.hash if shared.opts.lora_bundled_ti_to_infotext else '' + + def load_network(name, network_on_disk): net = network.Network(name, network_on_disk) net.mtime = os.path.getmtime(network_on_disk.filename) @@ -229,6 +237,7 @@ def load_network(name, network_on_disk): for emb_name, data in bundle_embeddings.items(): embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name) embedding.loaded = None + embedding.shorthash = BundledTIHash(name) embeddings[emb_name] = embedding net.bundle_embeddings = embeddings diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 1518f7e5c..d3ea369ae 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -36,6 +36,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra "sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks), "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), "lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"), + "lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'), "lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"), "lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}), "lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}), From 0e0e41eabc5753034091e7c673100df66b3640ab Mon Sep 17 00:00:00 2001 From: Andray Date: Wed, 1 May 2024 16:54:47 +0400 Subject: [PATCH 31/89] use gradio theme colors in css --- style.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/style.css b/style.css index f6a89b8f9..df4aca020 100644 --- a/style.css +++ b/style.css @@ -780,9 +780,9 @@ table.popup-table .link{ position:absolute; display:block; padding:0px 0; - border:2px solid #a55000; + border:2px solid var(--primary-800); border-radius:8px; - box-shadow:1px 1px 2px #CE6400; + box-shadow:1px 1px 2px var(--primary-500); width: 200px; } @@ -799,7 +799,7 @@ table.popup-table .link{ } .context-menu-items a:hover{ - background: #a55000; + background: var(--primary-700); } From 5d5224b322e8dbd817469a32d6c5578faff2df2f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 2 May 2024 02:25:16 +0900 Subject: [PATCH 32/89] fix_p_invalid_sampler_and_scheduler --- modules/processing.py | 3 +++ modules/sd_samplers.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 76557dd7f..cb646e2bf 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -842,6 +842,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio()) + # backwards compatibility, fix sampler and scheduler if invalid + sd_samplers.fix_p_invalid_sampler_and_scheduler(p) + res = process_images_inner(p) finally: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 6b7b84b6d..b8abac4a9 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -1,7 +1,7 @@ from __future__ import annotations import functools - +import logging from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common, sd_schedulers # imports for functions that previously were here and are used by other modules @@ -122,4 +122,11 @@ def get_sampler_and_scheduler(sampler_name, scheduler_name): return sampler.name, found_scheduler.label +def fix_p_invalid_sampler_and_scheduler(p): + i_sampler_name, i_scheduler = p.sampler_name, p.scheduler + p.sampler_name, p.scheduler = get_sampler_and_scheduler(p.sampler_name, p.scheduler) + if p.sampler_name != i_sampler_name or i_scheduler != p.scheduler: + logging.warning(f'Sampler Scheduler autocorrection: "{i_sampler_name}" -> "{p.sampler_name}", "{i_scheduler}" -> "{p.scheduler}"') + + set_samplers() From 7195c4d42cf410c53d4d2f7a74d7059715d357a7 Mon Sep 17 00:00:00 2001 From: Andray Date: Wed, 1 May 2024 22:50:46 +0400 Subject: [PATCH 33/89] two fingers press to open context menu --- javascript/contextMenus.js | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index ccae242f2..a00c3de9f 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -107,16 +107,23 @@ var contextMenuInit = function() { oldMenu.remove(); } }); - gradioApp().addEventListener("contextmenu", function(e) { - let oldMenu = gradioApp().querySelector('#context-menu'); - if (oldMenu) { - oldMenu.remove(); - } - menuSpecs.forEach(function(v, k) { - if (e.composedPath()[0].matches(k)) { - showContextMenu(e, e.composedPath()[0], v); - e.preventDefault(); + ['contextmenu', 'touchstart'].forEach((eventType) => { + gradioApp().addEventListener(eventType, function(e) { + let ev = e; + if (eventType.startsWith('touch')) { + if (e.touches.length !== 2) return; + ev = e.touches[0]; } + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + menuSpecs.forEach(function(v, k) { + if (e.composedPath()[0].matches(k)) { + showContextMenu(ev, e.composedPath()[0], v); + e.preventDefault(); + } + }); }); }); eventListenerApplied = true; From f12886aefa4f2ac5d8e64a206a6b4d6df9d85b6b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 4 May 2024 23:42:37 +0900 Subject: [PATCH 34/89] use script_path for webui root in launch_utils --- modules/launch_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 5812b0e58..e22da4ec6 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -76,7 +76,7 @@ def git_tag(): except Exception: try: - changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md") + changelog_md = os.path.join(script_path, "CHANGELOG.md") with open(changelog_md, "r", encoding="utf-8") as file: line = next((line.strip() for line in file if line.strip()), "") line = line.replace("## ", "") @@ -231,7 +231,7 @@ def run_extension_installer(extension_dir): try: env = os.environ.copy() - env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}" + env['PYTHONPATH'] = f"{script_path}{os.pathsep}{env.get('PYTHONPATH', '')}" stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip() if stdout: From dd93c47abfd9ed357f5d5827311d836ea399a236 Mon Sep 17 00:00:00 2001 From: bluelovers Date: Tue, 7 May 2024 19:53:18 +0800 Subject: [PATCH 35/89] Update imageviewer.js --- javascript/imageviewer.js | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index d4d4f016d..a3f08ad16 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -51,14 +51,7 @@ function modalImageSwitch(offset) { var galleryButtons = all_gallery_buttons(); if (galleryButtons.length > 1) { - var currentButton = selected_gallery_button(); - - var result = -1; - galleryButtons.forEach(function(v, i) { - if (v == currentButton) { - result = i; - } - }); + var result = selected_gallery_index(); if (result != -1) { var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]; From dbda59e58a7c90752ab9911a779dd1381ae530e1 Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 7 May 2024 19:26:16 +0400 Subject: [PATCH 36/89] fix context menu position --- javascript/contextMenus.js | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index a00c3de9f..e01fd67e8 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -8,9 +8,6 @@ var contextMenuInit = function() { }; function showContextMenu(event, element, menuEntries) { - let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; - let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; - let oldMenu = gradioApp().querySelector('#context-menu'); if (oldMenu) { oldMenu.remove(); @@ -23,10 +20,8 @@ var contextMenuInit = function() { contextMenu.style.background = baseStyle.background; contextMenu.style.color = baseStyle.color; contextMenu.style.fontFamily = baseStyle.fontFamily; - contextMenu.style.top = posy + 'px'; - contextMenu.style.left = posx + 'px'; - - + contextMenu.style.top = event.pageY + 'px'; + contextMenu.style.left = event.pageX + 'px'; const contextMenuList = document.createElement('ul'); contextMenuList.className = 'context-menu-items'; @@ -43,21 +38,6 @@ var contextMenuInit = function() { }); gradioApp().appendChild(contextMenu); - - let menuWidth = contextMenu.offsetWidth + 4; - let menuHeight = contextMenu.offsetHeight + 4; - - let windowWidth = window.innerWidth; - let windowHeight = window.innerHeight; - - if ((windowWidth - posx) < menuWidth) { - contextMenu.style.left = windowWidth - menuWidth + "px"; - } - - if ((windowHeight - posy) < menuHeight) { - contextMenu.style.top = windowHeight - menuHeight + "px"; - } - } function appendContextMenuOption(targetElementSelector, entryName, entryFunction) { From e736c3b36b5e450c3883719d1b73acf84bdf29f7 Mon Sep 17 00:00:00 2001 From: JLipnerPitt <122459494+JLipnerPitt@users.noreply.github.com> Date: Wed, 8 May 2024 05:22:12 -0400 Subject: [PATCH 37/89] Add files via upload Fixed an error (AttributeError: 'str' object has no attribute 'decode') coming from line 792 in images.py when trying to upscale certain images. --- modules/images.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c0ff8a630..0f49caf79 100644 --- a/modules/images.py +++ b/modules/images.py @@ -789,7 +789,10 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]: if exif_comment: geninfo = exif_comment elif "comment" in items: # for gif - geninfo = items["comment"].decode('utf8', errors="ignore") + if isinstance(items["comment"], bytes): + geninfo = items["comment"].decode('utf8', errors="ignore") + else: + geninfo = items["comment"] for field in IGNORED_INFO_KEYS: items.pop(field, None) From f7e349cea49731b0e57cc2a2c1eb4904f1aea9b9 Mon Sep 17 00:00:00 2001 From: LoganBooker Date: Wed, 8 May 2024 21:23:18 +1000 Subject: [PATCH 38/89] Add AVIF MIME type support to mimetype definitions AVIF images will open, rather than download, as the default behaviour. --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index 403425f29..cface5002 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -41,6 +41,7 @@ mimetypes.add_type('application/javascript', '.js') # Likewise, add explicit content-type header for certain missing image types mimetypes.add_type('image/webp', '.webp') +mimetypes.add_type('image/avif', '.avif') if not cmd_opts.share and not cmd_opts.listen: # fix gradio phoning home From 5fbac49791d9a4a6af85c8236ba9179d7415e0f9 Mon Sep 17 00:00:00 2001 From: MarcusNyne <69087098+MarcusNyne@users.noreply.github.com> Date: Wed, 8 May 2024 16:48:10 -0400 Subject: [PATCH 39/89] Added --models-dir option The --model-dir option overrides the location of the models directory for stable diffusion, so that models can be shared across multiple installations. When --data-dir is specified alone, both the extensions and models folders are present in this folder. --models-dir can be used independently, but when used with --data-dir, then the models folder is specified by --models-dir, and extensions are found in the --data-dir. --- modules/cmd_args.py | 1 + modules/paths_internal.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 016a33d10..a683c99e8 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -20,6 +20,7 @@ parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argum parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None) parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") +parser.add_argument("--models-dir", type=normalized_filepath, default=None, help="base path where models are stored; overrides --data-dir") parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",) parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints") diff --git a/modules/paths_internal.py b/modules/paths_internal.py index cf9da45ab..884984c9c 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -24,11 +24,13 @@ default_sd_model_file = sd_model_file # Parse the --data-dir flag first so we can use it as a base for our other argument default values parser_pre = argparse.ArgumentParser(add_help=False) parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", ) +parser_pre.add_argument("--models-dir", type=str, default=None, help="base path where models are stored; overrides --data-dir", ) cmd_opts_pre = parser_pre.parse_known_args()[0] data_path = cmd_opts_pre.data_dir +models_override = cmd_opts_pre.models_dir -models_path = os.path.join(data_path, "models") +models_path = models_override if models_override else os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") config_states_dir = os.path.join(script_path, "config_states") From d2cc8ccb11558f1dbdb27a2351e34155c3a24ccf Mon Sep 17 00:00:00 2001 From: MarcusNyne <69087098+MarcusNyne@users.noreply.github.com> Date: Thu, 9 May 2024 17:16:53 -0400 Subject: [PATCH 40/89] When creating a virtual environment, upgrade pip Pip will be upgraded upon immediately creating the virtual environment. If the pip upgrade fails, this should not cause the script to fail (treat as a warning). After the environment is created, it will not attempt further updates to pip. --- webui.bat | 7 ++++++- webui.sh | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/webui.bat b/webui.bat index e2c9079d2..a8d479b05 100644 --- a/webui.bat +++ b/webui.bat @@ -37,10 +37,15 @@ if %ERRORLEVEL% == 0 goto :activate_venv for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i" echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME% %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt -if %ERRORLEVEL% == 0 goto :activate_venv +if %ERRORLEVEL% == 0 goto :upgrade_pip echo Unable to create venv in directory "%VENV_DIR%" goto :show_stdout_stderr +:upgrade_pip +"%VENV_DIR%\Scripts\Python.exe" -m pip install --upgrade pip +if %ERRORLEVEL% == 0 goto :activate_venv +echo Warning: Failed to upgrade PIP version + :activate_venv set PYTHON="%VENV_DIR%\Scripts\Python.exe" echo venv %PYTHON% diff --git a/webui.sh b/webui.sh index c7c4bee98..7acea902c 100755 --- a/webui.sh +++ b/webui.sh @@ -210,6 +210,7 @@ then if [[ ! -d "${venv_dir}" ]] then "${python_cmd}" -m venv "${venv_dir}" + "${venv_dir}"/bin/python -m pip install --upgrade pip first_launch=1 fi # shellcheck source=/dev/null From 73d1caf8f28a387f2db5a77a8892edad8ed505a0 Mon Sep 17 00:00:00 2001 From: Logan Date: Fri, 10 May 2024 12:38:10 +1000 Subject: [PATCH 41/89] Add Align Your Steps to available schedulers * Include both SDXL and SD 1.5 variants (https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html) --- modules/sd_schedulers.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 75eb3ac03..2131eae46 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -4,6 +4,7 @@ import torch import k_diffusion +import numpy as np @dataclasses.dataclass class Scheduler: @@ -30,6 +31,35 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): sigs += [0.0] return torch.FloatTensor(sigs).to(device) +def get_align_your_steps_sigmas(n, device, sigma_id): + # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html + def loglinear_interp(t_steps, num_steps): + """ + Performs log-linear interpolation of a given array of decreasing numbers. + """ + xs = np.linspace(0, 1, len(t_steps)) + ys = np.log(t_steps[::-1]) + + new_xs = np.linspace(0, 1, num_steps) + new_ys = np.interp(new_xs, xs, ys) + + interped_ys = np.exp(new_ys)[::-1].copy() + return interped_ys + + if sigma_id == "sdxl": + sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029] + elif sigma_id == "sd15": + sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029] + else: + print(f'Align Your Steps sigma identifier "{sigma_id}" not recognized, defaulting to SD 1.5.') + sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029] + + if n != len(sigmas): + sigmas = np.append(loglinear_interp(sigmas, n), [0.0]) + else: + sigmas.append(0.0) + + return torch.FloatTensor(sigmas).to(device) schedulers = [ Scheduler('automatic', 'Automatic', None), @@ -38,6 +68,8 @@ schedulers = [ Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential), Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0), Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]), + Scheduler('align_your_steps_sdxl', 'Align Your Steps (SDXL)', lambda n, sigma_min, sigma_max, device: get_align_your_steps_sigmas(n, device, "sdxl")), + Scheduler('align_your_steps_sd15', 'Align Your Steps (SD 1.5)', lambda n, sigma_min, sigma_max, device: get_align_your_steps_sigmas(n, device, "sd15")), ] schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}} From d6b4444069d36cf7554eb9932061ecf43e9b1335 Mon Sep 17 00:00:00 2001 From: Logan Date: Fri, 10 May 2024 18:05:45 +1000 Subject: [PATCH 42/89] Use shared.sd_model.is_sdxl to determine base AYS sigmas --- modules/sd_schedulers.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 2131eae46..0ac1f7a21 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -6,6 +6,8 @@ import k_diffusion import numpy as np +from modules import shared + @dataclasses.dataclass class Scheduler: name: str @@ -31,7 +33,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): sigs += [0.0] return torch.FloatTensor(sigs).to(device) -def get_align_your_steps_sigmas(n, device, sigma_id): +def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device): # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html def loglinear_interp(t_steps, num_steps): """ @@ -46,12 +48,10 @@ def get_align_your_steps_sigmas(n, device, sigma_id): interped_ys = np.exp(new_ys)[::-1].copy() return interped_ys - if sigma_id == "sdxl": + if shared.sd_model.is_sdxl: sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029] - elif sigma_id == "sd15": - sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029] else: - print(f'Align Your Steps sigma identifier "{sigma_id}" not recognized, defaulting to SD 1.5.') + # Default to SD 1.5 sigmas. sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029] if n != len(sigmas): @@ -68,8 +68,7 @@ schedulers = [ Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential), Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0), Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]), - Scheduler('align_your_steps_sdxl', 'Align Your Steps (SDXL)', lambda n, sigma_min, sigma_max, device: get_align_your_steps_sigmas(n, device, "sdxl")), - Scheduler('align_your_steps_sd15', 'Align Your Steps (SD 1.5)', lambda n, sigma_min, sigma_max, device: get_align_your_steps_sigmas(n, device, "sd15")), + Scheduler('align_your_steps', 'Align Your Steps', get_align_your_steps_sigmas), ] schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}} From d44f241317d63095176543839bc111b731069629 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 11 May 2024 13:13:39 +0900 Subject: [PATCH 43/89] use relative path for webui-assets css --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index f6a89b8f9..8eefda56d 100644 --- a/style.css +++ b/style.css @@ -1,6 +1,6 @@ /* temporary fix to load default gradio font in frontend instead of backend */ -@import url('/webui-assets/css/sourcesanspro.css'); +@import url('webui-assets/css/sourcesanspro.css'); /* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */ From ef7713fbb29fed183d669a5a081cda9ac1a8b629 Mon Sep 17 00:00:00 2001 From: elf-mouse Date: Tue, 14 May 2024 15:39:05 +0800 Subject: [PATCH 44/89] chore: sync v1.8.0 packages according to changelog, fix warning --- webui-macos-env.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-macos-env.sh b/webui-macos-env.sh index db7e8b1a0..4126005ae 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -11,7 +11,7 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==2.1.0 torchvision==0.16.0" +export TORCH_COMMAND="pip install torch==2.1.2 torchvision==0.16.2" export PYTORCH_ENABLE_MPS_FALLBACK=1 #################################################################### From 5ab7d08a0a99c88a60a13885e564fd7d2d05cfc1 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 15 May 2024 17:27:05 +0900 Subject: [PATCH 45/89] fix extention update when not on main branch --- modules/extensions.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/extensions.py b/modules/extensions.py index 5ad934b4d..24de766eb 100644 --- a/modules/extensions.py +++ b/modules/extensions.py @@ -191,8 +191,9 @@ class Extension: def check_updates(self): repo = Repo(self.path) + branch_name = f'{repo.remote().name}/{self.branch}' for fetch in repo.remote().fetch(dry_run=True): - if self.branch and fetch.name != f'{repo.remote().name}/{self.branch}': + if self.branch and fetch.name != branch_name: continue if fetch.flags != fetch.HEAD_UPTODATE: self.can_update = True @@ -200,7 +201,7 @@ class Extension: return try: - origin = repo.rev_parse('origin') + origin = repo.rev_parse(branch_name) if repo.head.commit != origin: self.can_update = True self.status = "behind HEAD" @@ -213,8 +214,10 @@ class Extension: self.can_update = False self.status = "latest" - def fetch_and_reset_hard(self, commit='origin'): + def fetch_and_reset_hard(self, commit=None): repo = Repo(self.path) + if commit is None: + commit = f'{repo.remote().name}/{self.branch}' # Fix: `error: Your local changes to the following files would be overwritten by merge`, # because WSL2 Docker set 755 file permissions instead of 644, this results to the error. repo.git.fetch(all=True) From 022d835565f253841f7f9272ba320bb0cec4770d Mon Sep 17 00:00:00 2001 From: huchenlei Date: Wed, 15 May 2024 15:20:40 -0400 Subject: [PATCH 46/89] use_checkpoint = False --- configs/alt-diffusion-inference.yaml | 2 +- configs/alt-diffusion-m18-inference.yaml | 2 +- configs/instruct-pix2pix.yaml | 2 +- configs/sd_xl_inpaint.yaml | 2 +- configs/v1-inference.yaml | 2 +- configs/v1-inpainting-inference.yaml | 2 +- modules/sd_hijack_checkpoint.py | 9 ++++++--- modules/sd_models_config.py | 2 +- 8 files changed, 13 insertions(+), 10 deletions(-) diff --git a/configs/alt-diffusion-inference.yaml b/configs/alt-diffusion-inference.yaml index cfbee72d7..4944ab5c8 100644 --- a/configs/alt-diffusion-inference.yaml +++ b/configs/alt-diffusion-inference.yaml @@ -40,7 +40,7 @@ model: use_spatial_transformer: True transformer_depth: 1 context_dim: 768 - use_checkpoint: True + use_checkpoint: False legacy: False first_stage_config: diff --git a/configs/alt-diffusion-m18-inference.yaml b/configs/alt-diffusion-m18-inference.yaml index 41a031d55..c60dca8c7 100644 --- a/configs/alt-diffusion-m18-inference.yaml +++ b/configs/alt-diffusion-m18-inference.yaml @@ -41,7 +41,7 @@ model: use_linear_in_transformer: True transformer_depth: 1 context_dim: 1024 - use_checkpoint: True + use_checkpoint: False legacy: False first_stage_config: diff --git a/configs/instruct-pix2pix.yaml b/configs/instruct-pix2pix.yaml index 4e896879d..564e50ae2 100644 --- a/configs/instruct-pix2pix.yaml +++ b/configs/instruct-pix2pix.yaml @@ -45,7 +45,7 @@ model: use_spatial_transformer: True transformer_depth: 1 context_dim: 768 - use_checkpoint: True + use_checkpoint: False legacy: False first_stage_config: diff --git a/configs/sd_xl_inpaint.yaml b/configs/sd_xl_inpaint.yaml index 3bad37218..f40f45e33 100644 --- a/configs/sd_xl_inpaint.yaml +++ b/configs/sd_xl_inpaint.yaml @@ -21,7 +21,7 @@ model: params: adm_in_channels: 2816 num_classes: sequential - use_checkpoint: True + use_checkpoint: False in_channels: 9 out_channels: 4 model_channels: 320 diff --git a/configs/v1-inference.yaml b/configs/v1-inference.yaml index d4effe569..25c4d9ed0 100644 --- a/configs/v1-inference.yaml +++ b/configs/v1-inference.yaml @@ -40,7 +40,7 @@ model: use_spatial_transformer: True transformer_depth: 1 context_dim: 768 - use_checkpoint: True + use_checkpoint: False legacy: False first_stage_config: diff --git a/configs/v1-inpainting-inference.yaml b/configs/v1-inpainting-inference.yaml index f9eec37d2..68c199f99 100644 --- a/configs/v1-inpainting-inference.yaml +++ b/configs/v1-inpainting-inference.yaml @@ -40,7 +40,7 @@ model: use_spatial_transformer: True transformer_depth: 1 context_dim: 768 - use_checkpoint: True + use_checkpoint: False legacy: False first_stage_config: diff --git a/modules/sd_hijack_checkpoint.py b/modules/sd_hijack_checkpoint.py index 2604d969f..b2f05bbdc 100644 --- a/modules/sd_hijack_checkpoint.py +++ b/modules/sd_hijack_checkpoint.py @@ -4,16 +4,19 @@ import ldm.modules.attention import ldm.modules.diffusionmodules.openaimodel +# Setting flag=False so that torch skips checking parameters. +# parameters checking is expensive in frequent operations. + def BasicTransformerBlock_forward(self, x, context=None): - return checkpoint(self._forward, x, context) + return checkpoint(self._forward, x, context, flag=False) def AttentionBlock_forward(self, x): - return checkpoint(self._forward, x) + return checkpoint(self._forward, x, flag=False) def ResBlock_forward(self, x, emb): - return checkpoint(self._forward, x, emb) + return checkpoint(self._forward, x, emb, flag=False) stored = [] diff --git a/modules/sd_models_config.py b/modules/sd_models_config.py index b38137eb5..9cec4f13d 100644 --- a/modules/sd_models_config.py +++ b/modules/sd_models_config.py @@ -35,7 +35,7 @@ def is_using_v_parameterization_for_sd2(state_dict): with sd_disable_initialization.DisableInitialization(): unet = ldm.modules.diffusionmodules.openaimodel.UNetModel( - use_checkpoint=True, + use_checkpoint=False, use_fp16=False, image_size=32, in_channels=4, From 0e98529365477a4f240b2ac67d94ff59235144c5 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Wed, 15 May 2024 15:46:53 -0400 Subject: [PATCH 47/89] Replace einops.rearrange with torch native --- modules/sd_hijack_optimizations.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 7f9e328d0..4c2dc56d4 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -486,7 +486,19 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs): k_in = self.to_k(context_k) v_in = self.to_v(context_v) - q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in)) + def _reshape(t): + """rearrange(t, 'b n (h d) -> b n h d', h=h). + Using torch native operations to avoid overhead as this function is + called frequently. (70 times/it for SDXL) + """ + b, n, _ = t.shape # Get the batch size (b) and sequence length (n) + d = t.shape[2] // h # Determine the depth per head + return t.reshape(b, n, h, d) + + q = _reshape(q_in) + k = _reshape(k_in) + v = _reshape(v_in) + del q_in, k_in, v_in dtype = q.dtype @@ -497,7 +509,9 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs): out = out.to(dtype) - out = rearrange(out, 'b n h d -> b n (h d)', h=h) + # out = rearrange(out, 'b n h d -> b n (h d)', h=h) + b, n, h, d = out.shape + out = out.reshape(b, n, h * d) return self.to_out(out) From 9eb2f786316c0f7e94c3df5f5e8bda203e6b875d Mon Sep 17 00:00:00 2001 From: huchenlei Date: Wed, 15 May 2024 16:32:29 -0400 Subject: [PATCH 48/89] Precompute is_sdxl_inpaint flag --- modules/processing.py | 28 +++++++++++----------------- modules/sd_models.py | 7 +++++++ modules/sd_models_xl.py | 9 ++++----- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 76557dd7f..d82cb24fb 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -115,20 +115,17 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device) else: - sd = sd_model.model.state_dict() - diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None) - if diffusion_model_input is not None: - if diffusion_model_input.shape[1] == 9: - # The "masked-image" in this case will just be all 0.5 since the entire image is masked. - image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5 - image_conditioning = images_tensor_to_samples(image_conditioning, - approximation_indexes.get(opts.sd_vae_encode_method)) + if sd_model.model.is_sdxl_inpaint: + # The "masked-image" in this case will just be all 0.5 since the entire image is masked. + image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5 + image_conditioning = images_tensor_to_samples(image_conditioning, + approximation_indexes.get(opts.sd_vae_encode_method)) - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) + # Add the fake full 1s mask to the first dimension. + image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + image_conditioning = image_conditioning.to(x.dtype) - return image_conditioning + return image_conditioning # Dummy zero conditioning if we're not using inpainting or unclip models. # Still takes up a bit of memory, but no encoder call. @@ -390,11 +387,8 @@ class StableDiffusionProcessing: if self.sampler.conditioning_key == "crossattn-adm": return self.unclip_image_conditioning(source_image) - sd = self.sampler.model_wrap.inner_model.model.state_dict() - diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None) - if diffusion_model_input is not None: - if diffusion_model_input.shape[1] == 9: - return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) + if self.sampler.model_wrap.inner_model.model.is_sdxl_inpaint: + return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) # Dummy zero conditioning if we're not using inpainting or depth model. return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..62e74d27a 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -380,6 +380,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model') model.is_sd1 = not model.is_sdxl and not model.is_sd2 model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys() + # Set is_sdxl_inpaint flag. + diffusion_model_input = state_dict.get('diffusion_model.input_blocks.0.0.weight', None) + model.is_sdxl_inpaint = ( + model.is_sdxl and + diffusion_model_input is not None and + diffusion_model_input.shape[1] == 9 + ) if model.is_sdxl: sd_models_xl.extend_sdxl(model) diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py index 94ff973fb..35e21f6e4 100644 --- a/modules/sd_models_xl.py +++ b/modules/sd_models_xl.py @@ -35,11 +35,10 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond): - sd = self.model.state_dict() - diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None) - if diffusion_model_input is not None: - if diffusion_model_input.shape[1] == 9: - x = torch.cat([x] + cond['c_concat'], dim=1) + """WARNING: This function is called once per denoising iteration. DO NOT add + expensive functionc calls such as `model.state_dict`. """ + if self.model.is_sdxl_inpaint: + x = torch.cat([x] + cond['c_concat'], dim=1) return self.model(x, t, cond) From 6a48476502d6cdd19cb3d0c7f2a0b92aacd7c01f Mon Sep 17 00:00:00 2001 From: huchenlei Date: Wed, 15 May 2024 16:54:26 -0400 Subject: [PATCH 49/89] Fix flag check for SD15 --- modules/processing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d82cb24fb..fff2595e7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -115,7 +115,7 @@ def txt2img_image_conditioning(sd_model, x, width, height): return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device) else: - if sd_model.model.is_sdxl_inpaint: + if getattr(sd_model.model, "is_sdxl_inpaint", False): # The "masked-image" in this case will just be all 0.5 since the entire image is masked. image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5 image_conditioning = images_tensor_to_samples(image_conditioning, @@ -387,7 +387,7 @@ class StableDiffusionProcessing: if self.sampler.conditioning_key == "crossattn-adm": return self.unclip_image_conditioning(source_image) - if self.sampler.model_wrap.inner_model.model.is_sdxl_inpaint: + if getattr(self.sampler.model_wrap.inner_model.model, "is_sdxl_inpaint", False): return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask) # Dummy zero conditioning if we're not using inpainting or depth model. From 3e20b36e8f1b26f24db0c149732fb5479bff68bc Mon Sep 17 00:00:00 2001 From: huchenlei Date: Wed, 15 May 2024 17:27:01 -0400 Subject: [PATCH 50/89] Fix attr access --- modules/sd_models_xl.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models_xl.py b/modules/sd_models_xl.py index 35e21f6e4..1242a5936 100644 --- a/modules/sd_models_xl.py +++ b/modules/sd_models_xl.py @@ -37,7 +37,7 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch: def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond): """WARNING: This function is called once per denoising iteration. DO NOT add expensive functionc calls such as `model.state_dict`. """ - if self.model.is_sdxl_inpaint: + if self.is_sdxl_inpaint: x = torch.cat([x] + cond['c_concat'], dim=1) return self.model(x, t, cond) From 9c8075ba8e538f695ef25f85e6513227b58b71ce Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:16:50 +0900 Subject: [PATCH 51/89] torch_utils.float64 return torch.float64 if device is not mps or xpu, else return torch.float32 --- modules/torch_utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/torch_utils.py b/modules/torch_utils.py index e5b52393e..a07e02853 100644 --- a/modules/torch_utils.py +++ b/modules/torch_utils.py @@ -1,6 +1,7 @@ from __future__ import annotations import torch.nn +import torch def get_param(model) -> torch.nn.Parameter: @@ -15,3 +16,11 @@ def get_param(model) -> torch.nn.Parameter: return param raise ValueError(f"No parameters found in model {model!r}") + + +def float64(t: torch.Tensor): + """return torch.float64 if device is not mps or xpu, else return torch.float32""" + match t.device.type: + case 'mps', 'xpu': + return torch.float32 + return torch.float64 From 41f66849c7feac1efd0b9eb6884209be382e9e74 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:18:20 +0900 Subject: [PATCH 52/89] mps, xpu compatibility --- .../soft-inpainting/scripts/soft_inpainting.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py index f56e1e226..0e629963a 100644 --- a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py +++ b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py @@ -3,6 +3,7 @@ import gradio as gr import math from modules.ui_components import InputAccordion import modules.scripts as scripts +from modules.torch_utils import float64 class SoftInpaintingSettings: @@ -79,13 +80,11 @@ def latent_blend(settings, a, b, t): # Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.) # 64-bit operations are used here to allow large exponents. - current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001) + current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001) # Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1). - a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_( - settings.inpaint_detail_preservation) * one_minus_t3 - b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_( - settings.inpaint_detail_preservation) * t3 + a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3 + b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3 desired_magnitude = a_magnitude desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation) del a_magnitude, b_magnitude, t3, one_minus_t3 From f015b94176d6df372ce153eddc018cb3b08c03ba Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:19:06 +0900 Subject: [PATCH 53/89] use torch_utils.float64 --- modules/sd_samplers_timesteps_impl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index 930a64af5..84867d6ee 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -5,13 +5,14 @@ import numpy as np from modules import shared from modules.models.diffusion.uni_pc import uni_pc +from modules.torch_utils import float64 @torch.no_grad() def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x)) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) @@ -43,7 +44,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta= def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x)) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) extra_args = {} if extra_args is None else extra_args From 51b13a8c54854104f1510956b920399226a932f1 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Thu, 16 May 2024 11:39:01 -0400 Subject: [PATCH 54/89] Prevent uncessary bias backup --- extensions-builtin/Lora/networks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 42b14dc23..360455f87 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -378,7 +378,10 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn self.network_weights_backup = weights_backup bias_backup = getattr(self, "network_bias_backup", None) - if bias_backup is None: + if bias_backup is None and wanted_names != (): + if current_names != (): + raise RuntimeError("no backup bias found and current bias are not unchanged") + if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) elif getattr(self, 'bias', None) is not None: From b2ae4490b9c225ff020941bcbf36c8975760deba Mon Sep 17 00:00:00 2001 From: huchenlei Date: Thu, 16 May 2024 14:45:00 -0400 Subject: [PATCH 55/89] Fix LoRA bias error --- extensions-builtin/Lora/networks.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 360455f87..aee4e9d9c 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -379,15 +379,17 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn bias_backup = getattr(self, "network_bias_backup", None) if bias_backup is None and wanted_names != (): - if current_names != (): - raise RuntimeError("no backup bias found and current bias are not unchanged") - if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None: bias_backup = self.out_proj.bias.to(devices.cpu, copy=True) elif getattr(self, 'bias', None) is not None: bias_backup = self.bias.to(devices.cpu, copy=True) else: bias_backup = None + + # Unlike weight which always has value, some modules don't have bias. + # Only report if bias is not None and current bias are not unchanged. + if bias_backup is not None and current_names != (): + raise RuntimeError("no backup bias found and current bias are not unchanged") self.network_bias_backup = bias_backup if current_names != wanted_names: From 221ac0b9abd2e39ccc6f1969a434f05dcd72b29a Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 16 May 2024 23:08:24 +0400 Subject: [PATCH 56/89] img2img batch upload method --- modules/img2img.py | 20 +++++++++++++++----- modules/ui.py | 31 ++++++++++++++++++++----------- 2 files changed, 35 insertions(+), 16 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index a1d042c21..24f869f5c 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -17,11 +17,14 @@ from modules.ui import plaintext_to_html import modules.scripts -def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None): +def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None): output_dir = output_dir.strip() processing.fix_seed(p) - batch_images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) + if isinstance(input, str): + batch_images = list(shared.walk_files(input, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) + else: + batch_images = [os.path.abspath(x.name) for x in input] is_inpaint_batch = False if inpaint_mask_dir: @@ -146,7 +149,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal return batch_results -def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, *args): +def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, img2img_batch_source_type: str, img2img_batch_upload: list, *args): override_settings = create_override_settings_dict(override_settings_texts) is_batch = mode == 5 @@ -221,8 +224,15 @@ def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_ with closing(p): if is_batch: - assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" - processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) + if img2img_batch_source_type == "upload": + assert isinstance(img2img_batch_upload, list) and img2img_batch_upload + output_dir = "" + inpaint_mask_dir = "" + png_info_dir = img2img_batch_png_info_dir if not shared.cmd_opts.hide_ui_dir_config else "" + processed = process_batch(p, img2img_batch_upload, output_dir, inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=png_info_dir) + else: # "from dir" + assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" + processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir) if processed is None: processed = Processed(p, [], p.seed, "") diff --git a/modules/ui.py b/modules/ui.py index 403425f29..f3ac42367 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -566,18 +566,25 @@ def create_ui(): init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask") with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch: - hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML( - "

Process images in a directory on the same machine where the server is running." + - "
Use an empty output directory to save pictures normally instead of writing to the output directory." + - f"
Add inpaint batch mask directory to enable inpaint batch processing." - f"{hidden}

" - ) - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + with gr.Tabs(elem_id="img2img_batch_source"): + img2img_batch_source_type = gr.Textbox(visible=False, value="upload") + with gr.TabItem('Upload', id='batch_upload', elem_id="img2img_batch_upload_tab") as tab_batch_upload: + img2img_batch_upload = gr.Files(label="Files", interactive=True, elem_id="img2img_batch_upload") + with gr.TabItem('From directory', id='batch_from_dir', elem_id="img2img_batch_from_dir_tab") as tab_batch_from_dir: + hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' + gr.HTML( + "

Process images in a directory on the same machine where the server is running." + + "
Use an empty output directory to save pictures normally instead of writing to the output directory." + + f"
Add inpaint batch mask directory to enable inpaint batch processing." + f"{hidden}

" + ) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") + img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir") + tab_batch_upload.select(fn=lambda: "upload", inputs=[], outputs=[img2img_batch_source_type]) + tab_batch_from_dir.select(fn=lambda: "from dir", inputs=[], outputs=[img2img_batch_source_type]) with gr.Accordion("PNG info", open=False): - img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info") + img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", elem_id="img2img_batch_use_png_info") img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir") img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.") @@ -759,6 +766,8 @@ def create_ui(): img2img_batch_use_png_info, img2img_batch_png_info_props, img2img_batch_png_info_dir, + img2img_batch_source_type, + img2img_batch_upload, ] + custom_inputs, outputs=[ output_panel.gallery, From 58eec83a546b8d61500c7b801cb0bdbe7650f6a6 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Thu, 16 May 2024 16:39:02 -0400 Subject: [PATCH 57/89] Fully prevent use_checkpoint --- modules/sd_models.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..a33fa7c33 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -551,6 +551,11 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) + # Do not use checkpoint for inference. + # This helps prevent extra performance overhead on checking parameters. + # The perf overhead is about 100ms/it on 4090. + sd_config.model.params.network_config.params.use_checkpoint = False + def rescale_zero_terminal_snr_abar(alphas_cumprod): alphas_bar_sqrt = alphas_cumprod.sqrt() From 2a8a60c2c50473f0ece5804d4a2cde0d1ff3d35e Mon Sep 17 00:00:00 2001 From: huchenlei Date: Thu, 16 May 2024 19:50:06 -0400 Subject: [PATCH 58/89] Add --precision half cmd option --- modules/cmd_args.py | 2 +- modules/devices.py | 24 ++++++++++++++++++++++++ modules/sd_hijack_unet.py | 29 ++++++++++++++++++++++------- modules/sd_hijack_utils.py | 26 +++++++++++++++----------- modules/sd_models.py | 1 + modules/shared_init.py | 8 ++++++++ 6 files changed, 71 insertions(+), 19 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 016a33d10..58c5e5d5b 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -41,7 +41,7 @@ parser.add_argument("--lowvram", action='store_true', help="enable stable diffus parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM") parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything") parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") -parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast") +parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "half", "autocast"], default="autocast") parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site") parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None) diff --git a/modules/devices.py b/modules/devices.py index e4f671ac6..7de34ac51 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -114,6 +114,9 @@ errors.run(enable_tf32, "Enabling TF32") cpu: torch.device = torch.device("cpu") fp8: bool = False +# Force fp16 for all models in inference. No casting during inference. +# This flag is controlled by "--precision half" command line arg. +force_fp16: bool = False device: torch.device = None device_interrogate: torch.device = None device_gfpgan: torch.device = None @@ -127,6 +130,8 @@ unet_needs_upcast = False def cond_cast_unet(input): + if force_fp16: + return input.to(torch.float16) return input.to(dtype_unet) if unet_needs_upcast else input @@ -206,6 +211,11 @@ def autocast(disable=False): if disable: return contextlib.nullcontext() + if force_fp16: + # No casting during inference if force_fp16 is enabled. + # All tensor dtype conversion happens before inference. + return contextlib.nullcontext() + if fp8 and device==cpu: return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True) @@ -269,3 +279,17 @@ def first_time_calculation(): x = torch.zeros((1, 1, 3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d(x) + + +def force_model_fp16(): + """ + ldm and sgm has modules.diffusionmodules.util.GroupNorm32.forward, which + force conversion of input to float32. If force_fp16 is enabled, we need to + prevent this casting. + """ + assert force_fp16 + import sgm.modules.diffusionmodules.util as sgm_util + import ldm.modules.diffusionmodules.util as ldm_util + sgm_util.GroupNorm32 = torch.nn.GroupNorm + ldm_util.GroupNorm32 = torch.nn.GroupNorm + print("ldm/sgm GroupNorm32 replaced with normal torch.nn.GroupNorm due to `--precision half`.") diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 2101f1a04..41955313a 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -36,7 +36,7 @@ th = TorchHijackForUnet() # Below are monkey patches to enable upcasting a float16 UNet for float32 sampling def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): - + """Always make sure inputs to unet are in correct dtype.""" if isinstance(cond, dict): for y in cond.keys(): if isinstance(cond[y], list): @@ -45,7 +45,11 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y] with devices.autocast(): - return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float() + result = orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs) + if devices.unet_needs_upcast: + return result.float() + else: + return result class GELUHijack(torch.nn.GELU, torch.nn.Module): @@ -64,12 +68,11 @@ def hijack_ddpm_edit(): if not ddpm_edit_hijack: CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond) CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) - ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) + ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model) unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast -CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) -CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) + if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available(): CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast) @@ -81,5 +84,17 @@ CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_s CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond) CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond) -CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast) -CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) +CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model) +CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model) + + +def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs): + if devices.unet_needs_upcast and timesteps.dtype == torch.int64: + dtype = torch.float32 + else: + dtype = devices.dtype_unet + return orig_func(timesteps, *args, **kwargs).to(dtype=dtype) + + +CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result) +CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result) diff --git a/modules/sd_hijack_utils.py b/modules/sd_hijack_utils.py index 79bf6e468..546f2eda4 100644 --- a/modules/sd_hijack_utils.py +++ b/modules/sd_hijack_utils.py @@ -1,7 +1,11 @@ import importlib + +always_true_func = lambda *args, **kwargs: True + + class CondFunc: - def __new__(cls, orig_func, sub_func, cond_func): + def __new__(cls, orig_func, sub_func, cond_func=always_true_func): self = super(CondFunc, cls).__new__(cls) if isinstance(orig_func, str): func_path = orig_func.split('.') @@ -20,13 +24,13 @@ class CondFunc: print(f"Warning: Failed to resolve {orig_func} for CondFunc hijack") pass self.__init__(orig_func, sub_func, cond_func) - return lambda *args, **kwargs: self(*args, **kwargs) - def __init__(self, orig_func, sub_func, cond_func): - self.__orig_func = orig_func - self.__sub_func = sub_func - self.__cond_func = cond_func - def __call__(self, *args, **kwargs): - if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): - return self.__sub_func(self.__orig_func, *args, **kwargs) - else: - return self.__orig_func(*args, **kwargs) + return lambda *args, **kwargs: self(*args, **kwargs) + def __init__(self, orig_func, sub_func, cond_func): + self.__orig_func = orig_func + self.__sub_func = sub_func + self.__cond_func = cond_func + def __call__(self, *args, **kwargs): + if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs): + return self.__sub_func(self.__orig_func, *args, **kwargs) + else: + return self.__orig_func(*args, **kwargs) diff --git a/modules/sd_models.py b/modules/sd_models.py index ff245b7a6..9c5909168 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -403,6 +403,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer model.float() model.alphas_cumprod_original = model.alphas_cumprod devices.dtype_unet = torch.float32 + assert shared.cmd_opts.precision != "half", "Cannot use --precision half with --no-half" timer.record("apply float()") else: vae = model.first_stage_model diff --git a/modules/shared_init.py b/modules/shared_init.py index 935e3a21c..a6ad0433d 100644 --- a/modules/shared_init.py +++ b/modules/shared_init.py @@ -31,6 +31,14 @@ def initialize(): devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16 devices.dtype_inference = torch.float32 if cmd_opts.precision == 'full' else devices.dtype + if cmd_opts.precision == "half": + msg = "--no-half and --no-half-vae conflict with --precision half" + assert devices.dtype == torch.float16, msg + assert devices.dtype_vae == torch.float16, msg + assert devices.dtype_inference == torch.float16, msg + devices.force_fp16 = True + devices.force_model_fp16() + shared.device = devices.device shared.weight_load_location = None if cmd_opts.lowram else "cpu" From 47f1d42a7e77259e2e7418ae8f941718c55cfd25 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Thu, 16 May 2024 20:06:04 -0400 Subject: [PATCH 59/89] Fix for SD15 models --- modules/sd_models.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index a33fa7c33..cda142bdd 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -553,8 +553,11 @@ def repair_config(sd_config): # Do not use checkpoint for inference. # This helps prevent extra performance overhead on checking parameters. - # The perf overhead is about 100ms/it on 4090. - sd_config.model.params.network_config.params.use_checkpoint = False + # The perf overhead is about 100ms/it on 4090 for SDXL. + if hasattr(sd_config.model.params, "network_config"): + sd_config.model.params.network_config.params.use_checkpoint = False + if hasattr(sd_config.model.params, "unet_config"): + sd_config.model.params.unet_config.params.use_checkpoint = False def rescale_zero_terminal_snr_abar(alphas_cumprod): From 10f2407f48fa3a8bbd299068e5f67108f272b87d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 May 2024 00:44:02 +0900 Subject: [PATCH 60/89] xyz csv skipinitialspace --- scripts/xyz_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index b9fd66fe5..d416e4c07 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -212,7 +212,7 @@ def list_to_csv_string(data_list): def csv_string_to_list_strip(data_str): - return list(map(str.strip, chain.from_iterable(csv.reader(StringIO(data_str))))) + return list(map(str.strip, chain.from_iterable(csv.reader(StringIO(data_str), skipinitialspace=True)))) class AxisOption: From 53d67088ee0fb190c3ae1330c2b876dedb16dd8b Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 17 May 2024 12:12:57 -0400 Subject: [PATCH 61/89] Patch timestep embedding to create tensor on-device --- modules/sd_hijack_unet.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 2101f1a04..0dabbe0e4 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -1,5 +1,7 @@ import torch from packaging import version +from einops import repeat +import math from modules import devices from modules.sd_hijack_utils import CondFunc @@ -48,6 +50,30 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs): return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float() +# Monkey patch to create timestep embed tensor on device, avoiding a block. +def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half + ) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + class GELUHijack(torch.nn.GELU, torch.nn.Module): def __init__(self, *args, **kwargs): torch.nn.GELU.__init__(self, *args, **kwargs) @@ -69,6 +95,7 @@ def hijack_ddpm_edit(): unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) +CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available(): CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) From cc9ca67664ef72931af9a4dced88a8434c5d4f16 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Fri, 17 May 2024 13:14:26 -0400 Subject: [PATCH 62/89] Add transformer forward patch --- modules/sd_hijack_unet.py | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack_unet.py b/modules/sd_hijack_unet.py index 0dabbe0e4..c680367eb 100644 --- a/modules/sd_hijack_unet.py +++ b/modules/sd_hijack_unet.py @@ -74,6 +74,30 @@ def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False): return embedding +# Monkey patch to SpatialTransformer removing unnecessary contiguous calls. +# Prevents a lot of unnecessary aten::copy_ calls +def spatial_transformer_forward(_, self, x: torch.Tensor, context=None): + # note: if no context is given, cross-attention defaults to self-attention + if not isinstance(context, list): + context = [context] + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + if not self.use_linear: + x = self.proj_in(x) + x = x.permute(0, 2, 3, 1).reshape(b, h * w, c) + if self.use_linear: + x = self.proj_in(x) + for i, block in enumerate(self.transformer_blocks): + x = block(x, context=context[i]) + if self.use_linear: + x = self.proj_out(x) + x = x.view(b, h, w, c).permute(0, 3, 1, 2) + if not self.use_linear: + x = self.proj_out(x) + return x + x_in + + class GELUHijack(torch.nn.GELU, torch.nn.Module): def __init__(self, *args, **kwargs): torch.nn.GELU.__init__(self, *args, **kwargs) @@ -95,7 +119,8 @@ def hijack_ddpm_edit(): unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast) -CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding) +CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding, lambda *args, **kwargs: True) +CondFunc('ldm.modules.attention.SpatialTransformer.forward', spatial_transformer_forward, lambda *args, **kwargs: True) CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast) if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available(): CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast) From dca9007ac7a9852752d91d34d2ed1feaef6a03f2 Mon Sep 17 00:00:00 2001 From: huchenlei Date: Fri, 17 May 2024 13:23:12 -0400 Subject: [PATCH 63/89] Fix SD15 dtype --- modules/sd_models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 9c5909168..7d4ab0fd8 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -733,6 +733,10 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): sd_model = instantiate_from_config(sd_config.model) sd_model.used_config = checkpoint_config + # ldm's Unet is using self.dtype to cast input tensor. If we do not overwrite + # UnetModel.dtype, it will be the default dtype from config. + # sgm's Unet is not using dtype for casting. The value will be ignored. + sd_model.model.diffusion_model.dtype = devices.dtype_unet timer.record("create model") From b57a70f37322142939f7429f287599e027108bfc Mon Sep 17 00:00:00 2001 From: huchenlei Date: Fri, 17 May 2024 13:34:04 -0400 Subject: [PATCH 64/89] Proper fix of SD15 dtype --- modules/sd_models.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 7d4ab0fd8..26a5127cd 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -541,7 +541,7 @@ def repair_config(sd_config): if hasattr(sd_config.model.params, 'unet_config'): if shared.cmd_opts.no_half: sd_config.model.params.unet_config.params.use_fp16 = False - elif shared.cmd_opts.upcast_sampling: + elif shared.cmd_opts.upcast_sampling or shared.cmd_opts.precision == "half": sd_config.model.params.unet_config.params.use_fp16 = True if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available: @@ -733,10 +733,6 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None): sd_model = instantiate_from_config(sd_config.model) sd_model.used_config = checkpoint_config - # ldm's Unet is using self.dtype to cast input tensor. If we do not overwrite - # UnetModel.dtype, it will be the default dtype from config. - # sgm's Unet is not using dtype for casting. The value will be ignored. - sd_model.model.diffusion_model.dtype = devices.dtype_unet timer.record("create model") From 1d7448281751ea3223c681a82de8219a6fbe1d22 Mon Sep 17 00:00:00 2001 From: Logan Date: Sat, 18 May 2024 09:09:57 +1000 Subject: [PATCH 65/89] Default device for sigma tensor to CPU * Consistent with implementations in k-diffusion. * Makes this compatible with https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15823 --- modules/sd_schedulers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py index 0ac1f7a21..4ddb77850 100644 --- a/modules/sd_schedulers.py +++ b/modules/sd_schedulers.py @@ -33,7 +33,7 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device): sigs += [0.0] return torch.FloatTensor(sigs).to(device) -def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device): +def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device='cpu'): # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html def loglinear_interp(t_steps, num_steps): """ From 281e0a007b102c7fc9f6150fb88c95470dc25a17 Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 18 May 2024 09:13:16 +0400 Subject: [PATCH 66/89] scroll extensions table on overflow --- style.css | 2 ++ 1 file changed, 2 insertions(+) diff --git a/style.css b/style.css index f6a89b8f9..5ec803a04 100644 --- a/style.css +++ b/style.css @@ -807,6 +807,8 @@ table.popup-table .link{ #tab_extensions table{ border-collapse: collapse; + overflow-x: auto; + display: block; } #tab_extensions table td, #tab_extensions table th{ From feeb6802aa71fad190da2e051e50af84a94eda85 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sat, 18 May 2024 01:22:31 -0400 Subject: [PATCH 67/89] fix case where first step skilled if skip early cond is 0 --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 082a4f63c..d89ea2c8b 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -212,7 +212,7 @@ class CFGDenoiser(torch.nn.Module): uncond = denoiser_params.text_uncond skip_uncond = False - if self.step / self.total_steps <= shared.opts.skip_early_cond: + if shared.opts.skip_early_cond != 0. and self.step / self.total_steps <= shared.opts.skip_early_cond: skip_uncond = True x_in = x_in[:-batch_size] sigma_in = sigma_in[:-batch_size] From 501ac016da8c28ff4778219f142f0622083237ce Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 May 2024 18:37:37 +0900 Subject: [PATCH 68/89] Reformat --- scripts/xyz_grid.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index b9fd66fe5..b23fd4770 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -162,12 +162,14 @@ def apply_override(field, boolean: bool = False): if boolean: x = True if x.lower() == "true" else False p.override_settings[field] = x + return fun def boolean_choice(reverse: bool = False): def choice(): return ["False", "True"] if reverse else ["True", "False"] + return choice @@ -572,7 +574,7 @@ class Script(scripts.Script): mc = re_range_count.fullmatch(val) if m is not None: start = int(m.group(1)) - end = int(m.group(2))+1 + end = int(m.group(2)) + 1 step = int(m.group(3)) if m.group(3) is not None else 1 valslist_ext += list(range(start, end, step)) @@ -725,11 +727,11 @@ class Script(scripts.Script): ydim = len(ys) if vary_seeds_y else 1 if vary_seeds_x: - pc.seed += ix + pc.seed += ix if vary_seeds_y: - pc.seed += iy * xdim + pc.seed += iy * xdim if vary_seeds_z: - pc.seed += iz * xdim * ydim + pc.seed += iz * xdim * ydim try: res = process_images(pc) @@ -797,18 +799,18 @@ class Script(scripts.Script): z_count = len(zs) # Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids) - processed.infotexts[:1+z_count] = grid_infotext[:1+z_count] + processed.infotexts[:1 + z_count] = grid_infotext[:1 + z_count] if not include_lone_images: # Don't need sub-images anymore, drop from list: - processed.images = processed.images[:z_count+1] + processed.images = processed.images[:z_count + 1] if opts.grid_save: # Auto-save main and sub-grids: grid_count = z_count + 1 if z_count > 1 else 1 for g in range(grid_count): # TODO: See previous comment about intentional data misalignment. - adj_g = g-1 if g > 0 else g + adj_g = g - 1 if g > 0 else g images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed) if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid break From 969a462ac9ea52eb61b8de9fd685cc477c8b8dac Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 May 2024 18:27:34 +0900 Subject: [PATCH 69/89] xyz util confirm_range --- scripts/xyz_grid.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index b23fd4770..81c7abe95 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -95,6 +95,17 @@ def confirm_checkpoints_or_none(p, xs): raise RuntimeError(f"Unknown checkpoint: {x}") +def confirm_range(min_val, max_val, axis_label): + """Generates a AxisOption.confirm() function that checks all values are within the specified range.""" + + def confirm_range_fun(p, xs): + for x in xs: + if not (max_val >= x >= min_val): + raise ValueError(f'{axis_label} value "{x}" out of range [{min_val}, {max_val}]') + + return confirm_range_fun + + def apply_clip_skip(p, x, xs): opts.data["CLIP_stop_at_last_layers"] = x From 24a59ad3d2f9f44130746fdfe54f9f51ba74e77f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 18 May 2024 15:36:49 +0900 Subject: [PATCH 70/89] fix Hypertile xyz grid --- .../hypertile/scripts/hypertile_script.py | 17 ++++++- .../hypertile/scripts/hypertile_xyz.py | 51 ------------------- 2 files changed, 15 insertions(+), 53 deletions(-) delete mode 100644 extensions-builtin/hypertile/scripts/hypertile_xyz.py diff --git a/extensions-builtin/hypertile/scripts/hypertile_script.py b/extensions-builtin/hypertile/scripts/hypertile_script.py index 395d584b6..59e7f9907 100644 --- a/extensions-builtin/hypertile/scripts/hypertile_script.py +++ b/extensions-builtin/hypertile/scripts/hypertile_script.py @@ -1,6 +1,5 @@ import hypertile from modules import scripts, script_callbacks, shared -from scripts.hypertile_xyz import add_axis_options class ScriptHypertile(scripts.Script): @@ -93,7 +92,6 @@ def on_ui_settings(): "hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"), "hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"), "hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"), - "hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"), "hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"), "hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"), @@ -105,5 +103,20 @@ def on_ui_settings(): shared.opts.add_option(name, opt) +def add_axis_options(): + xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module + xyz_grid.axis_options.extend([ + xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]), + xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')), + xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')), + xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)), + xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]), + xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')), + xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')), + ]) + + script_callbacks.on_ui_settings(on_ui_settings) script_callbacks.on_before_ui(add_axis_options) diff --git a/extensions-builtin/hypertile/scripts/hypertile_xyz.py b/extensions-builtin/hypertile/scripts/hypertile_xyz.py deleted file mode 100644 index 9e96ae3c5..000000000 --- a/extensions-builtin/hypertile/scripts/hypertile_xyz.py +++ /dev/null @@ -1,51 +0,0 @@ -from modules import scripts -from modules.shared import opts - -xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module - -def int_applier(value_name:str, min_range:int = -1, max_range:int = -1): - """ - Returns a function that applies the given value to the given value_name in opts.data. - """ - def validate(value_name:str, value:str): - value = int(value) - # validate value - if not min_range == -1: - assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}" - if not max_range == -1: - assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}" - def apply_int(p, x, xs): - validate(value_name, x) - opts.data[value_name] = int(x) - return apply_int - -def bool_applier(value_name:str): - """ - Returns a function that applies the given value to the given value_name in opts.data. - """ - def validate(value_name:str, value:str): - assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false" - def apply_bool(p, x, xs): - validate(value_name, x) - value_boolean = x.lower() == "true" - opts.data[value_name] = value_boolean - return apply_bool - -def add_axis_options(): - extra_axis_options = [ - xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)), - xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)), - xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]), - xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)), - xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)), - xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)), - xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]), - xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)), - xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)), - ] - set_a = {opt.label for opt in xyz_grid.axis_options} - set_b = {opt.label for opt in extra_axis_options} - if set_a.intersection(set_b): - return - - xyz_grid.axis_options.extend(extra_axis_options) From 82884da18c8f183c4ce0e7237953303f26610370 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 May 2024 04:55:45 +0900 Subject: [PATCH 71/89] use apply_override for Clip skip --- scripts/xyz_grid.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index b9fd66fe5..c7cb51333 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -264,7 +264,7 @@ axis_options = [ AxisOption("Schedule max sigma", float, apply_override("sigma_max")), AxisOption("Schedule rho", float, apply_override("rho")), AxisOption("Eta", float, apply_field("eta")), - AxisOption("Clip skip", int, apply_clip_skip), + AxisOption("Clip skip", int, apply_override('CLIP_stop_at_last_layers')), AxisOption("Denoising", float, apply_field("denoising_strength")), AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")), AxisOption("Extra noise", float, apply_override("img2img_extra_noise")), @@ -399,7 +399,6 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend class SharedSettingsStackHelper(object): def __enter__(self): - self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers self.vae = opts.sd_vae self.uni_pc_order = opts.uni_pc_order @@ -409,8 +408,6 @@ class SharedSettingsStackHelper(object): modules.sd_models.reload_model_weights() modules.sd_vae.reload_vae_weights() - opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers - re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*") From 1f392517f8938e0082e189fa0c28f4eb89fb0eb2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 May 2024 04:59:05 +0900 Subject: [PATCH 72/89] use override for uni_pc_order --- scripts/xyz_grid.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index c7cb51333..622cc43c3 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -140,7 +140,7 @@ def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _): def apply_uni_pc_order(p, x, xs): - opts.data["uni_pc_order"] = min(x, p.steps - 1) + p.override_settings['uni_pc_order'] = min(x, p.steps - 1) def apply_face_restore(p, opt, x): @@ -400,11 +400,9 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend class SharedSettingsStackHelper(object): def __enter__(self): self.vae = opts.sd_vae - self.uni_pc_order = opts.uni_pc_order def __exit__(self, exc_type, exc_value, tb): opts.data["sd_vae"] = self.vae - opts.data["uni_pc_order"] = self.uni_pc_order modules.sd_models.reload_model_weights() modules.sd_vae.reload_vae_weights() From 1e696b028adbd449df8c30ed760103b120ec5546 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 May 2024 05:14:32 +0900 Subject: [PATCH 73/89] use override of sd_vae --- scripts/xyz_grid.py | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 622cc43c3..4c83e92b2 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -118,21 +118,16 @@ def apply_size(p, x: str, xs) -> None: def find_vae(name: str): - if name.lower() in ['auto', 'automatic']: - return modules.sd_vae.unspecified - if name.lower() == 'none': - return None - else: - choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()] - if len(choices) == 0: - print(f"No VAE found for {name}; using automatic") - return modules.sd_vae.unspecified - else: - return modules.sd_vae.vae_dict[choices[0]] + match name := name.lower().strip(): + case 'auto', 'automatic': + return 'Automatic' + case 'none': + return 'None' + return next((k for k in modules.sd_vae.vae_dict if k.lower() == name), print(f'No VAE found for {name}; using Automatic') or 'Automatic') def apply_vae(p, x, xs): - modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x)) + p.override_settings['sd_vae'] = find_vae(x) def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _): @@ -270,7 +265,7 @@ axis_options = [ AxisOption("Extra noise", float, apply_override("img2img_extra_noise")), AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]), AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")), - AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['None'] + list(sd_vae.vae_dict)), + AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['Automatic', 'None'] + list(sd_vae.vae_dict)), AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)), AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5), AxisOption("Face restore", str, apply_face_restore, format_value=format_value), @@ -399,10 +394,9 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend class SharedSettingsStackHelper(object): def __enter__(self): - self.vae = opts.sd_vae + pass def __exit__(self, exc_type, exc_value, tb): - opts.data["sd_vae"] = self.vae modules.sd_models.reload_model_weights() modules.sd_vae.reload_vae_weights() From 51e7122f25c276b258a8f55a64e60e5b2265287f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 19 May 2024 05:17:44 +0900 Subject: [PATCH 74/89] remove unused code --- scripts/xyz_grid.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 4c83e92b2..23dafd477 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -95,17 +95,6 @@ def confirm_checkpoints_or_none(p, xs): raise RuntimeError(f"Unknown checkpoint: {x}") -def apply_clip_skip(p, x, xs): - opts.data["CLIP_stop_at_last_layers"] = x - - -def apply_upscale_latent_space(p, x, xs): - if x.lower().strip() != '0': - opts.data["use_scale_latent_for_hires_fix"] = True - else: - opts.data["use_scale_latent_for_hires_fix"] = False - - def apply_size(p, x: str, xs) -> None: try: width, _, height = x.partition('x') From 5867be2914c303c2f8ba86ff23dba4b31aeafa79 Mon Sep 17 00:00:00 2001 From: viking1304 Date: Mon, 20 May 2024 23:44:17 +0200 Subject: [PATCH 75/89] Use different torch versions for Intel and ARM Macs --- webui-macos-env.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/webui-macos-env.sh b/webui-macos-env.sh index db7e8b1a0..ad0736378 100644 --- a/webui-macos-env.sh +++ b/webui-macos-env.sh @@ -11,7 +11,12 @@ fi export install_dir="$HOME" export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate" -export TORCH_COMMAND="pip install torch==2.1.0 torchvision==0.16.0" export PYTORCH_ENABLE_MPS_FALLBACK=1 +if [[ "$(sysctl -n machdep.cpu.brand_string)" =~ ^.*"Intel".*$ ]]; then + export TORCH_COMMAND="pip install torch==2.1.2 torchvision==0.16.2" +else + export TORCH_COMMAND="pip install torch==2.3.0 torchvision==0.18.0" +fi + #################################################################### From 344eda55d4550e91b1a3e95f8e669084a74c876f Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 22 May 2024 23:06:07 +0900 Subject: [PATCH 76/89] ReloadUI backgroundColor --background-fill-primary --- javascript/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/ui.js b/javascript/ui.js index e0f5feebd..16faacebb 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -337,8 +337,8 @@ onOptionsChanged(function() { let txt2img_textarea, img2img_textarea = undefined; function restart_reload() { + document.body.style.backgroundColor = "var(--background-fill-primary)"; document.body.innerHTML = '

Reloading...

'; - var requestPing = function() { requestGet("./internal/ping", {}, function(data) { location.reload(); From 6dd53ce63dc70b3fcf7f25402d40b48f50abdf74 Mon Sep 17 00:00:00 2001 From: alcacode Date: Sun, 26 May 2024 15:36:55 +0200 Subject: [PATCH 77/89] Fix bug where file extension had an extra '.' under some circumstances Fix bug where under some circumstances an extra "." was inserted between the file base name and the file extension. The bug is triggered when the extension argument is one of "jpg", "jpeg", or "webp", and the image exceeds the format's dimension limit. Then the extension variable is set to ".png", resulting in the fullfn variable to evaluate to a string ending with "..png". --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index c0ff8a630..1be176cdf 100644 --- a/modules/images.py +++ b/modules/images.py @@ -653,7 +653,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i # WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp": print('Image dimensions too large; saving as PNG') - extension = ".png" + extension = "png" if save_to_dirs is None: save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) From 8d6f7417385d1cacfd827800bdf02a0e8dd8f092 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 29 May 2024 03:33:32 +0900 Subject: [PATCH 78/89] #15883 -> #15882 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5c16b5611..596b1ec45 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ ## 1.9.4 ### Bug Fixes: -* pin setuptools version to fix the startup error ([#15883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15883)) +* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882)) ## 1.9.3 From 10f8d0f84216e3642e960ea7118a5acc8a79546f Mon Sep 17 00:00:00 2001 From: eatmoreapple Date: Tue, 4 Jun 2024 15:02:13 +0800 Subject: [PATCH 79/89] feat: lora partial update precede full update. --- extensions-builtin/Lora/networks.py | 40 +++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 10 deletions(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 42b14dc23..18809364b 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -260,6 +260,16 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No loaded_networks.clear() + unavailable_networks = [] + for name in names: + if name.lower() in forbidden_network_aliases and available_networks.get(name) is None: + unavailable_networks.append(name) + elif available_network_aliases.get(name) is None: + unavailable_networks.append(name) + + if unavailable_networks: + update_available_networks_by_names(unavailable_networks) + networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names] if any(x is None for x in networks_on_disk): list_available_networks() @@ -566,22 +576,16 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs): return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs) -def list_available_networks(): - available_networks.clear() - available_network_aliases.clear() - forbidden_network_aliases.clear() - available_network_hash_lookup.clear() - forbidden_network_aliases.update({"none": 1, "Addams": 1}) - - os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) - +def process_network_files(names: list[str] | None = None): candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"])) for filename in candidates: if os.path.isdir(filename): continue - name = os.path.splitext(os.path.basename(filename))[0] + # if names is provided, only load networks with names in the list + if names and name not in names: + continue try: entry = network.NetworkOnDisk(name, filename) except OSError: # should catch FileNotFoundError and PermissionError etc. @@ -597,6 +601,22 @@ def list_available_networks(): available_network_aliases[entry.alias] = entry +def update_available_networks_by_names(names: list[str]): + process_network_files(names) + + +def list_available_networks(): + available_networks.clear() + available_network_aliases.clear() + forbidden_network_aliases.clear() + available_network_hash_lookup.clear() + forbidden_network_aliases.update({"none": 1, "Addams": 1}) + + os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True) + + process_network_files() + + re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)") From 25bbf31f5701b85804908a54b2f6af38a1d50f1f Mon Sep 17 00:00:00 2001 From: NouberNou Date: Thu, 6 Jun 2024 16:22:49 -0700 Subject: [PATCH 80/89] Fix for grids without comprehensive infotexts When generating grids, some scripts such as img2img loopback and ultimate SD upscale do not pass infotexts for each image since they are the same prompt. If you attempt to save those images using the saved button in the UI it will fail because it will look for the selected image info text. This fixes those errors by replicating the infotext for as many images are passed into the image list if the infotext parameter is none. --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 76557dd7f..cb37a77df 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -569,7 +569,7 @@ class Processed: self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt] self.all_seeds = all_seeds or p.all_seeds or [self.seed] self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed] - self.infotexts = infotexts or [info] + self.infotexts = infotexts or [info] * len(image_list) self.version = program_version() def js(self): From 53f62674ae55e84aff4d4c9ed104ba9dce8ae887 Mon Sep 17 00:00:00 2001 From: NouberNou Date: Thu, 6 Jun 2024 16:30:01 -0700 Subject: [PATCH 81/89] Typo on edit Edited in fix in Github editor and mistyped from local copy --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index cb37a77df..c22da4169 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -569,7 +569,7 @@ class Processed: self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt] self.all_seeds = all_seeds or p.all_seeds or [self.seed] self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed] - self.infotexts = infotexts or [info] * len(image_list) + self.infotexts = infotexts or [info] * len(images_list) self.version = program_version() def js(self): From 0769aa318a1896ccf74f57e6e943eb6b5fab5051 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 09:05:35 +0300 Subject: [PATCH 82/89] integrated edits as recommended in the PR #15804 --- modules/sd_hijack_optimizations.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 4c2dc56d4..0269f1f5b 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -486,18 +486,7 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs): k_in = self.to_k(context_k) v_in = self.to_v(context_v) - def _reshape(t): - """rearrange(t, 'b n (h d) -> b n h d', h=h). - Using torch native operations to avoid overhead as this function is - called frequently. (70 times/it for SDXL) - """ - b, n, _ = t.shape # Get the batch size (b) and sequence length (n) - d = t.shape[2] // h # Determine the depth per head - return t.reshape(b, n, h, d) - - q = _reshape(q_in) - k = _reshape(k_in) - v = _reshape(v_in) + q, k, v = (t.reshape(t.shape[0], t.shape[1], h, -1) for t in (q_in, k_in, v_in)) del q_in, k_in, v_in @@ -509,7 +498,6 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs): out = out.to(dtype) - # out = rearrange(out, 'b n h d -> b n (h d)', h=h) b, n, h, d = out.shape out = out.reshape(b, n, h * d) return self.to_out(out) From 5429e4cff514df2f4cab242212ba347741eadc08 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 09:56:09 +0300 Subject: [PATCH 83/89] add proper infotext support for #15607 fix settings override not working for NGMI, s_churn, etc... --- modules/processing.py | 14 ++++++++------ modules/sd_samplers_cfg_denoiser.py | 12 +++++++----- modules/shared_options.py | 6 +++--- 3 files changed, 18 insertions(+), 14 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c22da4169..97a7162aa 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -238,11 +238,6 @@ class StableDiffusionProcessing: self.styles = [] self.sampler_noise_scheduler_override = None - self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond - self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn - self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin - self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf') - self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise self.extra_generation_params = self.extra_generation_params or {} self.override_settings = self.override_settings or {} @@ -259,6 +254,13 @@ class StableDiffusionProcessing: self.cached_uc = StableDiffusionProcessing.cached_uc self.cached_c = StableDiffusionProcessing.cached_c + def fill_fields_from_opts(self): + self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond + self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn + self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin + self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf') + self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise + @property def sd_model(self): return shared.sd_model @@ -794,7 +796,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr, "Init image hash": getattr(p, 'init_img_hash', None), "RNG": opts.randn_source if opts.randn_source != "GPU" else None, - "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, @@ -890,6 +891,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: modules.sd_hijack.model_hijack.apply_circular(p.tiling) modules.sd_hijack.model_hijack.clear_comments() + p.fill_fields_from_opts() p.setup_prompts() if isinstance(seed, list): diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index d89ea2c8b..f48f58a50 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -214,12 +214,14 @@ class CFGDenoiser(torch.nn.Module): if shared.opts.skip_early_cond != 0. and self.step / self.total_steps <= shared.opts.skip_early_cond: skip_uncond = True - x_in = x_in[:-batch_size] - sigma_in = sigma_in[:-batch_size] - - # alternating uncond allows for higher thresholds without the quality loss normally expected from raising it - if (self.step % 2 or shared.opts.s_min_uncond_all) and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: + self.p.extra_generation_params["Skip Early CFG"] = shared.opts.skip_early_cond + elif (self.step % 2 or shared.opts.s_min_uncond_all) and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model: skip_uncond = True + self.p.extra_generation_params["NGMS"] = s_min_uncond + if shared.opts.s_min_uncond_all: + self.p.extra_generation_params["NGMS all steps"] = shared.opts.s_min_uncond_all + + if skip_uncond: x_in = x_in[:-batch_size] sigma_in = sigma_in[:-batch_size] diff --git a/modules/shared_options.py b/modules/shared_options.py index c711fa5f6..05c3d9391 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -209,8 +209,8 @@ options_templates.update(options_section(('img2img', "img2img", "sd"), { options_templates.update(options_section(('optimizations', "Optimizations", "sd"), { "cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}), - "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), - "s_min_uncond_all": OptionInfo(False, "NGMS: Skip every step").info("makes Negative Guidance minimum sigma skip negative guidance on every step instead of only half"), + "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}, infotext='NGMS').link("PR", "https://github.com/AUTOMATIC1111/stablediffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"), + "s_min_uncond_all": OptionInfo(False, "Negative Guidance minimum sigma all steps", infotext='NGMS all steps').info("By default, NGMS above skips every other step; this makes it skip all steps"), "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), @@ -382,7 +382,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), 'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'), 'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models"), - 'skip_early_cond': OptionInfo(0, "Skip CFG during early sampling", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext="Skip Early CFG").info("CFG will be disabled (set to 1) on early steps, can both improve sample diversity/quality and speed up sampling"), + 'skip_early_cond': OptionInfo(0.0, "Ignore negative prompt during early sampling", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext="Skip Early CFG").info("disables CFG on a proportion of steps at the beginning of generation; 0=skip none; 1=skip all; can both improve sample diversity/quality and speed up sampling"), })) options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), { From cd9e9e404955df19a72c832d68888db44ab7b382 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 10:13:38 +0300 Subject: [PATCH 84/89] remove unneeded tabulation --- .../Lora/ui_extra_networks_lora.py | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/extensions-builtin/Lora/ui_extra_networks_lora.py b/extensions-builtin/Lora/ui_extra_networks_lora.py index e35d90c6e..3e34d69dc 100644 --- a/extensions-builtin/Lora/ui_extra_networks_lora.py +++ b/extensions-builtin/Lora/ui_extra_networks_lora.py @@ -60,19 +60,18 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage): else: sd_version = lora_on_disk.sd_version - if shared.sd_model is not None: # still show LoRA in case an error occurs during initial model loading - if shared.opts.lora_show_all or not enable_filter: - pass - elif sd_version == network.SdVersion.Unknown: - model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 - if model_version.name in shared.opts.lora_hide_unknown_for_versions: - return None - elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: - return None - elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: - return None - elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: + if shared.opts.lora_show_all or not enable_filter or not shared.sd_model: + pass + elif sd_version == network.SdVersion.Unknown: + model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1 + if model_version.name in shared.opts.lora_hide_unknown_for_versions: return None + elif shared.sd_model.is_sdxl and sd_version != network.SdVersion.SDXL: + return None + elif shared.sd_model.is_sd2 and sd_version != network.SdVersion.SD2: + return None + elif shared.sd_model.is_sd1 and sd_version != network.SdVersion.SD1: + return None return item From 510f025a01733f20ebe3997c1c3d159e6ac50148 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 4 Jun 2024 02:23:43 +0900 Subject: [PATCH 85/89] replace wsl-open with wslpath and explorer.exe --- modules/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/util.py b/modules/util.py index 0db13736c..768bf32d0 100644 --- a/modules/util.py +++ b/modules/util.py @@ -208,6 +208,6 @@ Requested path was: {path} elif platform.system() == "Darwin": subprocess.Popen(["open", path]) elif "microsoft-standard-WSL2" in platform.uname().release: - subprocess.Popen(["wsl-open", path]) + subprocess.Popen(["explorer.exe", subprocess.check_output(["wslpath", "-w", path])]) else: subprocess.Popen(["xdg-open", path]) From 603509ec905a9c9ac1011e9531a9da180828fcc0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 10:54:41 +0300 Subject: [PATCH 86/89] as per wfjsw's suggestion, revert changes for sd_hijack_checkpoint.py --- modules/sd_hijack_checkpoint.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/modules/sd_hijack_checkpoint.py b/modules/sd_hijack_checkpoint.py index b2f05bbdc..2604d969f 100644 --- a/modules/sd_hijack_checkpoint.py +++ b/modules/sd_hijack_checkpoint.py @@ -4,19 +4,16 @@ import ldm.modules.attention import ldm.modules.diffusionmodules.openaimodel -# Setting flag=False so that torch skips checking parameters. -# parameters checking is expensive in frequent operations. - def BasicTransformerBlock_forward(self, x, context=None): - return checkpoint(self._forward, x, context, flag=False) + return checkpoint(self._forward, x, context) def AttentionBlock_forward(self, x): - return checkpoint(self._forward, x, flag=False) + return checkpoint(self._forward, x) def ResBlock_forward(self, x, emb): - return checkpoint(self._forward, x, emb, flag=False) + return checkpoint(self._forward, x, emb) stored = [] From 07cf95c76ef052c120fbf1cfb69e3018e1cb06f8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 11:26:34 +0300 Subject: [PATCH 87/89] update pickle safe filenames --- modules/safe.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/safe.py b/modules/safe.py index b1d08a792..af019ffd9 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -64,8 +64,8 @@ class RestrictedUnpickler(pickle.Unpickler): raise Exception(f"global '{module}/{name}' is forbidden") -# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/' -allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|(data\.pkl))$") +# Regular expression that accepts 'dirname/version', 'dirname/byteorder', 'dirname/data.pkl', '.data/serialization_id', and 'dirname/data/' +allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|byteorder|.data/serialization_id|(data\.pkl))$") data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$") def check_zip_filenames(filename, names): From 1a7ffa2c76b0e68cd647c1f7f07235bcf85c985d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 11:35:45 +0300 Subject: [PATCH 88/89] remove extra local variable --- modules/paths_internal.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 884984c9c..67521f5cd 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -28,9 +28,8 @@ parser_pre.add_argument("--models-dir", type=str, default=None, help="base path cmd_opts_pre = parser_pre.parse_known_args()[0] data_path = cmd_opts_pre.data_dir -models_override = cmd_opts_pre.models_dir -models_path = models_override if models_override else os.path.join(data_path, "models") +models_path = cmd_opts_pre.models_dir if cmd_opts_pre.models_dir else os.path.join(data_path, "models") extensions_dir = os.path.join(data_path, "extensions") extensions_builtin_dir = os.path.join(script_path, "extensions-builtin") config_states_dir = os.path.join(script_path, "config_states") From 547778b10f25def4e040b81942a2b23295567de3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 8 Jun 2024 12:41:28 +0300 Subject: [PATCH 89/89] possibly make NaN check cheaper --- modules/devices.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 7de34ac51..d574975e5 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -243,22 +243,22 @@ def test_for_nans(x, where): if shared.cmd_opts.disable_nan_check: return - if not torch.all(torch.isnan(x)).item(): + if not torch.isnan(x[(0, ) * len(x.shape)]): return if where == "unet": - message = "A tensor with all NaNs was produced in Unet." + message = "A tensor with NaNs was produced in Unet." if not shared.cmd_opts.no_half: message += " This could be either because there's not enough precision to represent the picture, or because your video card does not support half type. Try setting the \"Upcast cross attention layer to float32\" option in Settings > Stable Diffusion or using the --no-half commandline argument to fix this." elif where == "vae": - message = "A tensor with all NaNs was produced in VAE." + message = "A tensor with NaNs was produced in VAE." if not shared.cmd_opts.no_half and not shared.cmd_opts.no_half_vae: message += " This could be because there's not enough precision to represent the picture. Try adding --no-half-vae commandline argument to fix this." else: - message = "A tensor with all NaNs was produced." + message = "A tensor with NaNs was produced." message += " Use --disable-nan-check commandline argument to disable this check."