From 0cc05fc492a9360d3b2f1b3f64c7d74f9041f74e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 21 May 2023 00:41:41 +0300
Subject: [PATCH 001/168] work on startup profile display
---
html/footer.html | 2 +
javascript/profilerVisualization.js | 91 +++++++++++++++++++++++++++++
javascript/ui_settings_hints.js | 2 +-
modules/script_callbacks.py | 3 +
modules/scripts.py | 3 +-
modules/timer.py | 46 +++++++++++++--
modules/ui.py | 4 +-
style.css | 8 ++-
webui.py | 14 +++--
9 files changed, 159 insertions(+), 14 deletions(-)
create mode 100644 javascript/profilerVisualization.js
diff --git a/html/footer.html b/html/footer.html
index bad87ff61..1ce13295c 100644
--- a/html/footer.html
+++ b/html/footer.html
@@ -5,6 +5,8 @@
•
Gradio
•
+ Startup profile
+ •
Reload UI
diff --git a/javascript/profilerVisualization.js b/javascript/profilerVisualization.js
new file mode 100644
index 000000000..1bd75986a
--- /dev/null
+++ b/javascript/profilerVisualization.js
@@ -0,0 +1,91 @@
+
+function createRow(table, cellName, items) {
+ var tr = document.createElement('tr');
+ var res = [];
+
+ items.forEach(function(x) {
+ var td = document.createElement(cellName);
+ td.textContent = x;
+ tr.appendChild(td);
+ res.push(td);
+ });
+
+ table.appendChild(tr);
+
+ return res;
+}
+
+function showProfile(path, cutoff = 0.0005) {
+ requestGet(path, {}, function(data) {
+ var table = document.createElement('table');
+ table.className = 'popup-table';
+
+ data.records['total'] = data.total;
+ var keys = Object.keys(data.records).sort(function(a, b) {
+ return data.records[b] - data.records[a];
+ });
+ var items = keys.map(function(x) {
+ return {key: x, parts: x.split('/'), time: data.records[x]};
+ });
+ var maxLength = items.reduce(function(a, b) {
+ return Math.max(a, b.parts.length);
+ }, 0);
+
+ var cols = createRow(table, 'th', ['record', 'seconds']);
+ cols[0].colSpan = maxLength;
+
+ function arraysEqual(a, b) {
+ return !(a < b || b < a);
+ }
+
+ var addLevel = function(level, parent) {
+ var matching = items.filter(function(x) {
+ return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent);
+ });
+ var sorted = matching.sort(function(a, b) {
+ return b.time - a.time;
+ });
+ var othersTime = 0;
+ var othersList = [];
+ sorted.forEach(function(x) {
+ if (x.time < cutoff) {
+ othersTime += x.time;
+ othersList.push(x.parts[level]);
+ return;
+ }
+
+ var cells = [];
+ for (var i = 0; i < maxLength; i++) {
+ cells.push(x.parts[i]);
+ }
+ cells.push(x.time.toFixed(3));
+ var cols = createRow(table, 'td', cells);
+ for (i = 0; i < level; i++) {
+ cols[i].className = 'muted';
+ }
+
+ addLevel(level + 1, parent.concat([x.parts[level]]));
+ });
+
+ if (othersTime > 0) {
+ var cells = [];
+ for (var i = 0; i < maxLength; i++) {
+ cells.push(parent[i]);
+ }
+ cells.push(othersTime.toFixed(3));
+ var cols = createRow(table, 'td', cells);
+ for (i = 0; i < level; i++) {
+ cols[i].className = 'muted';
+ }
+
+ cols[level].textContent = 'others';
+ cols[level].title = othersList.join(", ");
+ }
+ };
+
+ addLevel(0, []);
+
+ popup(table);
+ });
+}
+
diff --git a/javascript/ui_settings_hints.js b/javascript/ui_settings_hints.js
index e216852b5..d088f9494 100644
--- a/javascript/ui_settings_hints.js
+++ b/javascript/ui_settings_hints.js
@@ -42,7 +42,7 @@ onOptionsChanged(function() {
function settingsHintsShowQuicksettings() {
requestGet("./internal/quicksettings-hint", {}, function(data) {
var table = document.createElement('table');
- table.className = 'settings-value-table';
+ table.className = 'popup-table';
data.forEach(function(obj) {
var tr = document.createElement('tr');
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 40f388a59..ecffc206f 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -7,6 +7,8 @@ from typing import Optional, Dict, Any
from fastapi import FastAPI
from gradio import Blocks
+from modules import timer
+
def report_exception(c, job):
print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
@@ -123,6 +125,7 @@ def app_started_callback(demo: Optional[Blocks], app: FastAPI):
for c in callback_map['callbacks_app_started']:
try:
c.callback(demo, app)
+ timer.startup_timer.record(c.script)
except Exception:
report_exception(c, 'app_started_callback')
diff --git a/modules/scripts.py b/modules/scripts.py
index c902804b6..7ef1a8f8e 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -6,7 +6,7 @@ from collections import namedtuple
import gradio as gr
-from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
+from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, timer
AlwaysVisible = object()
@@ -270,6 +270,7 @@ def load_scripts():
finally:
sys.path = syspath
current_basedir = paths.script_path
+ timer.startup_timer.record(scriptfile.filename)
global scripts_txt2img, scripts_img2img, scripts_postproc
diff --git a/modules/timer.py b/modules/timer.py
index ba92be336..da99e49f8 100644
--- a/modules/timer.py
+++ b/modules/timer.py
@@ -1,11 +1,30 @@
import time
+class TimerSubcategory:
+ def __init__(self, timer, category):
+ self.timer = timer
+ self.category = category
+ self.start = None
+ self.original_base_category = timer.base_category
+
+ def __enter__(self):
+ self.start = time.time()
+ self.timer.base_category = self.original_base_category + self.category + "/"
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ elapsed_for_subcategroy = time.time() - self.start
+ self.timer.base_category = self.original_base_category
+ self.timer.add_time_to_record(self.original_base_category + self.category, elapsed_for_subcategroy)
+ self.timer.record(self.category)
+
+
class Timer:
def __init__(self):
self.start = time.time()
self.records = {}
self.total = 0
+ self.base_category = ''
def elapsed(self):
end = time.time()
@@ -13,18 +32,29 @@ class Timer:
self.start = end
return res
- def record(self, category, extra_time=0):
- e = self.elapsed()
+ def add_time_to_record(self, category, amount):
if category not in self.records:
self.records[category] = 0
- self.records[category] += e + extra_time
+ self.records[category] += amount
+
+ def record(self, category, extra_time=0):
+ e = self.elapsed()
+
+ self.add_time_to_record(self.base_category + category, e + extra_time)
+
self.total += e + extra_time
+ def subcategory(self, name):
+ self.elapsed()
+
+ subcat = TimerSubcategory(self, name)
+ return subcat
+
def summary(self):
res = f"{self.total:.1f}s"
- additions = [x for x in self.records.items() if x[1] >= 0.1]
+ additions = [(category, time_taken) for category, time_taken in self.records.items() if time_taken >= 0.1 and '/' not in category]
if not additions:
return res
@@ -34,5 +64,13 @@ class Timer:
return res
+ def dump(self):
+ return {'total': self.total, 'records': self.records}
+
def reset(self):
self.__init__()
+
+
+startup_timer = Timer()
+
+startup_record = None
diff --git a/modules/ui.py b/modules/ui.py
index 82820ab52..5174da634 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -13,7 +13,7 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, timer
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
@@ -1901,3 +1901,5 @@ def setup_ui_api(app):
app.add_api_route("/internal/quicksettings-hint", quicksettings_hint, methods=["GET"], response_model=List[QuicksettingsHint])
app.add_api_route("/internal/ping", lambda: {}, methods=["GET"])
+
+ app.add_api_route("/internal/profile-startup", lambda: timer.startup_record, methods=["GET"])
diff --git a/style.css b/style.css
index ba12723a2..f2491726e 100644
--- a/style.css
+++ b/style.css
@@ -403,19 +403,23 @@ div#extras_scale_to_tab div.form{
margin: 0 1.2em;
}
-table.settings-value-table{
+table.popup-table{
background: white;
border-collapse: collapse;
margin: 1em;
border: 4px solid white;
}
-table.settings-value-table td{
+table.popup-table td{
padding: 0.4em;
border: 1px solid #ccc;
max-width: 36em;
}
+table.popup-table .muted{
+ color: #aaa;
+}
+
.ui-defaults-none{
color: #aaa !important;
}
diff --git a/webui.py b/webui.py
index a76e377c7..940966ebd 100644
--- a/webui.py
+++ b/webui.py
@@ -20,7 +20,7 @@ logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not
from modules import paths, timer, import_hook, errors # noqa: F401
-startup_timer = timer.Timer()
+startup_timer = timer.startup_timer
import torch
import pytorch_lightning # noqa: F401 # pytorch_lightning should be imported after torch, but it re-enables warnings on import so import once to disable them
@@ -269,8 +269,8 @@ def initialize_rest(*, reload_script_modules=False):
localization.list_localizations(cmd_opts.localizations_dir)
- modules.scripts.load_scripts()
- startup_timer.record("load scripts")
+ with startup_timer.subcategory("load scripts"):
+ modules.scripts.load_scripts()
if reload_script_modules:
for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]:
@@ -416,9 +416,12 @@ def webui():
ui_extra_networks.add_pages_to_demo(app)
- modules.script_callbacks.app_started_callback(shared.demo, app)
- startup_timer.record("scripts app_started_callback")
+ startup_timer.record("add APIs")
+ with startup_timer.subcategory("app_started_callback"):
+ modules.script_callbacks.app_started_callback(shared.demo, app)
+
+ timer.startup_record = startup_timer.dump()
print(f"Startup time: {startup_timer.summary()}.")
if cmd_opts.subpath:
@@ -443,6 +446,7 @@ def webui():
# If we catch a keyboard interrupt, we want to stop the server and exit.
shared.demo.close()
break
+
print('Restarting UI...')
shared.demo.close()
time.sleep(0.5)
From 8faac8b96313c6c4bf0a81bddecff4d6ba22ac25 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 21 May 2023 21:55:14 +0300
Subject: [PATCH 002/168] run basic torch calculation at startup in parallel to
reduce the performance impact of first generation
---
modules/devices.py | 18 ++++++++++++++++++
webui.py | 4 +++-
2 files changed, 21 insertions(+), 1 deletion(-)
diff --git a/modules/devices.py b/modules/devices.py
index d8a34a0fd..1ed6ffdc1 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,5 +1,7 @@
import sys
import contextlib
+from functools import lru_cache
+
import torch
from modules import errors
@@ -154,3 +156,19 @@ def test_for_nans(x, where):
message += " Use --disable-nan-check commandline argument to disable this check."
raise NansException(message)
+
+
+@lru_cache
+def first_time_calculation():
+ """
+ just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
+ spends about 2.7 seconds doing that, at least wih NVidia.
+ """
+
+ x = torch.zeros((1, 1)).to(device, dtype)
+ linear = torch.nn.Linear(1, 1).to(device, dtype)
+ linear(x)
+
+ x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
+ conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
+ conv2d(x)
diff --git a/webui.py b/webui.py
index d4402f55e..07c70c462 100644
--- a/webui.py
+++ b/webui.py
@@ -20,7 +20,7 @@ import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
-from modules import paths, timer, import_hook, errors # noqa: F401
+from modules import paths, timer, import_hook, errors, devices # noqa: F401
startup_timer = timer.Timer()
@@ -295,6 +295,8 @@ def initialize_rest(*, reload_script_modules=False):
# (when reloading, this does nothing)
Thread(target=lambda: shared.sd_model).start()
+ Thread(target=devices.first_time_calculation).start()
+
shared.reload_hypernetworks()
startup_timer.record("reload hypernetworks")
From 5ed970b94972004961ecada3bc1c936ef6017f3c Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sun, 21 May 2023 23:16:14 +0300
Subject: [PATCH 003/168] Move token counters to separate JS file, fix names
---
.eslintrc.js | 2 +
javascript/token-counters.js | 75 ++++++++++++++++++++++++++++++++++++
javascript/ui.js | 71 +---------------------------------
3 files changed, 78 insertions(+), 70 deletions(-)
create mode 100644 javascript/token-counters.js
diff --git a/.eslintrc.js b/.eslintrc.js
index 944cc869e..218f56098 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -84,5 +84,7 @@ module.exports = {
// imageviewer.js
modalPrevImage: "readonly",
modalNextImage: "readonly",
+ // token-counters.js
+ setupTokenCounters: "readonly",
}
};
diff --git a/javascript/token-counters.js b/javascript/token-counters.js
new file mode 100644
index 000000000..0b74658c0
--- /dev/null
+++ b/javascript/token-counters.js
@@ -0,0 +1,75 @@
+let promptTokenCountDebounceTime = 800;
+let promptTokenCountTimeouts = {};
+var promptTokenCountUpdateFunctions = {};
+
+function update_txt2img_tokens(...args) {
+ // Called from Gradio
+ update_token_counter("txt2img_token_button");
+ if (args.length == 2) {
+ return args[0];
+ }
+ return args;
+}
+
+function update_img2img_tokens(...args) {
+ // Called from Gradio
+ update_token_counter("img2img_token_button");
+ if (args.length == 2) {
+ return args[0];
+ }
+ return args;
+}
+
+function update_token_counter(button_id) {
+ if (promptTokenCountTimeouts[button_id]) {
+ clearTimeout(promptTokenCountTimeouts[button_id]);
+ }
+ promptTokenCountTimeouts[button_id] = setTimeout(
+ () => gradioApp().getElementById(button_id)?.click(),
+ promptTokenCountDebounceTime,
+ );
+}
+
+
+function recalculatePromptTokens(name) {
+ promptTokenCountUpdateFunctions[name]?.();
+}
+
+function recalculate_prompts_txt2img() {
+ // Called from Gradio
+ recalculatePromptTokens('txt2img_prompt');
+ recalculatePromptTokens('txt2img_neg_prompt');
+ return Array.from(arguments);
+}
+
+function recalculate_prompts_img2img() {
+ // Called from Gradio
+ recalculatePromptTokens('img2img_prompt');
+ recalculatePromptTokens('img2img_neg_prompt');
+ return Array.from(arguments);
+}
+
+function setupTokenCounting(id, id_counter, id_button) {
+ var prompt = gradioApp().getElementById(id);
+ var counter = gradioApp().getElementById(id_counter);
+ var textarea = gradioApp().querySelector(`#${id} > label > textarea`);
+
+ if (counter.parentElement == prompt.parentElement) {
+ return;
+ }
+
+ prompt.parentElement.insertBefore(counter, prompt);
+ prompt.parentElement.style.position = "relative";
+
+ promptTokenCountUpdateFunctions[id] = function() {
+ update_token_counter(id_button);
+ };
+ textarea.addEventListener("input", promptTokenCountUpdateFunctions[id]);
+}
+
+function setupTokenCounters() {
+ setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
+ setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
+ setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
+ setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
+}
diff --git a/javascript/ui.js b/javascript/ui.js
index 648a5290e..800a2ae67 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -248,27 +248,6 @@ function confirm_clear_prompt(prompt, negative_prompt) {
}
-var promptTokecountUpdateFuncs = {};
-
-function recalculatePromptTokens(name) {
- if (promptTokecountUpdateFuncs[name]) {
- promptTokecountUpdateFuncs[name]();
- }
-}
-
-function recalculate_prompts_txt2img() {
- recalculatePromptTokens('txt2img_prompt');
- recalculatePromptTokens('txt2img_neg_prompt');
- return Array.from(arguments);
-}
-
-function recalculate_prompts_img2img() {
- recalculatePromptTokens('img2img_prompt');
- recalculatePromptTokens('img2img_neg_prompt');
- return Array.from(arguments);
-}
-
-
var opts = {};
onUiUpdate(function() {
if (Object.keys(opts).length != 0) return;
@@ -302,28 +281,7 @@ onUiUpdate(function() {
json_elem.parentElement.style.display = "none";
- function registerTextarea(id, id_counter, id_button) {
- var prompt = gradioApp().getElementById(id);
- var counter = gradioApp().getElementById(id_counter);
- var textarea = gradioApp().querySelector("#" + id + " > label > textarea");
-
- if (counter.parentElement == prompt.parentElement) {
- return;
- }
-
- prompt.parentElement.insertBefore(counter, prompt);
- prompt.parentElement.style.position = "relative";
-
- promptTokecountUpdateFuncs[id] = function() {
- update_token_counter(id_button);
- };
- textarea.addEventListener("input", promptTokecountUpdateFuncs[id]);
- }
-
- registerTextarea('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button');
- registerTextarea('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button');
- registerTextarea('img2img_prompt', 'img2img_token_counter', 'img2img_token_button');
- registerTextarea('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button');
+ setupTokenCounters();
var show_all_pages = gradioApp().getElementById('settings_show_all_pages');
var settings_tabs = gradioApp().querySelector('#settings div');
@@ -354,33 +312,6 @@ onOptionsChanged(function() {
});
let txt2img_textarea, img2img_textarea = undefined;
-let wait_time = 800;
-let token_timeouts = {};
-
-function update_txt2img_tokens(...args) {
- update_token_counter("txt2img_token_button");
- if (args.length == 2) {
- return args[0];
- }
- return args;
-}
-
-function update_img2img_tokens(...args) {
- update_token_counter(
- "img2img_token_button"
- );
- if (args.length == 2) {
- return args[0];
- }
- return args;
-}
-
-function update_token_counter(button_id) {
- if (token_timeouts[button_id]) {
- clearTimeout(token_timeouts[button_id]);
- }
- token_timeouts[button_id] = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
-}
function restart_reload() {
document.body.innerHTML = 'Reloading...
';
From 618c59b01d8b90794df0aea625de1c1d5d94d407 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sun, 21 May 2023 23:20:50 +0300
Subject: [PATCH 004/168] Add option to disable prompt token counters
---
javascript/token-counters.js | 8 ++++++++
modules/shared.py | 1 +
2 files changed, 9 insertions(+)
diff --git a/javascript/token-counters.js b/javascript/token-counters.js
index 0b74658c0..9d81a723b 100644
--- a/javascript/token-counters.js
+++ b/javascript/token-counters.js
@@ -21,6 +21,9 @@ function update_img2img_tokens(...args) {
}
function update_token_counter(button_id) {
+ if (opts.disable_token_counters) {
+ return;
+ }
if (promptTokenCountTimeouts[button_id]) {
clearTimeout(promptTokenCountTimeouts[button_id]);
}
@@ -54,6 +57,11 @@ function setupTokenCounting(id, id_counter, id_button) {
var counter = gradioApp().getElementById(id_counter);
var textarea = gradioApp().querySelector(`#${id} > label > textarea`);
+ if (opts.disable_token_counters) {
+ counter.style.display = "none";
+ return;
+ }
+
if (counter.parentElement == prompt.parentElement) {
return;
}
diff --git a/modules/shared.py b/modules/shared.py
index 3099d1d2e..e8dbd8a4b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -487,6 +487,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order").needs_restart(),
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(),
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(),
+ "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(),
}))
options_templates.update(options_section(('infotext', "Infotext"), {
From 3366e494a1147e570d8527eea19da88edb3a1e0c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 22 May 2023 00:13:53 +0300
Subject: [PATCH 005/168] option to pad prompt/neg prompt to be same length
---
modules/sd_models.py | 5 +++++
modules/sd_samplers_kdiffusion.py | 10 ++++++++++
modules/shared.py | 1 +
3 files changed, 16 insertions(+)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index b1afbaa7f..91b3eb115 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -508,6 +508,11 @@ def load_model(checkpoint_info=None, already_loaded_state_dict=None):
timer.record("scripts callbacks")
+ with devices.autocast(), torch.no_grad():
+ sd_model.cond_stage_model_empty_prompt = sd_model.cond_stage_model([""])
+
+ timer.record("calculate empty prompt")
+
print(f"Model loaded in {timer.summary()}.")
return sd_model
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 59982fc9b..638e0ac92 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -125,6 +125,16 @@ class CFGDenoiser(torch.nn.Module):
x_in = x_in[:-batch_size]
sigma_in = sigma_in[:-batch_size]
+ # TODO add infotext entry
+ if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
+ empty = shared.sd_model.cond_stage_model_empty_prompt
+ num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
+
+ if num_repeats < 0:
+ tensor = torch.cat([tensor, empty.repeat((tensor.shape[0], -num_repeats, 1))], axis=1)
+ elif num_repeats > 0:
+ uncond = torch.cat([uncond, empty.repeat((uncond.shape[0], num_repeats, 1))], axis=1)
+
if tensor.shape[1] == uncond.shape[1] or skip_uncond:
if is_edit_model:
cond_in = torch.cat([tensor, uncond, uncond])
diff --git a/modules/shared.py b/modules/shared.py
index 3099d1d2e..e1a743d64 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -423,6 +423,7 @@ options_templates.update(options_section(('optimizations', "Optimizations"), {
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
+ "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length").info("improves performance when prompt and negative prompt have different lengths; changes seeds"),
}))
options_templates.update(options_section(('compatibility', "Compatibility"), {
From a862428902c4aecde8852761c3a4d95c196885cb Mon Sep 17 00:00:00 2001
From: missionfloyd
Date: Sun, 21 May 2023 18:17:32 -0600
Subject: [PATCH 006/168] Fix dragging text to prompt
---
javascript/dragdrop.js | 4 ++--
javascript/imageParams.js | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index fe0089248..90cdd5070 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -51,7 +51,7 @@ function dropReplaceImage( imgWrap, files ) {
window.document.addEventListener('dragover', e => {
const target = e.composedPath()[0];
const imgWrap = target.closest('[data-testid="image"]');
- if ( !imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) {
+ if (e.dataTransfer.files.length == 0 || (!imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1)) {
return;
}
e.stopPropagation();
@@ -61,7 +61,7 @@ window.document.addEventListener('dragover', e => {
window.document.addEventListener('drop', e => {
const target = e.composedPath()[0];
- if (target.placeholder.indexOf("Prompt") == -1) {
+ if (e.dataTransfer.files.length == 0 || target.placeholder.indexOf("Prompt") == -1) {
return;
}
const imgWrap = target.closest('[data-testid="image"]');
diff --git a/javascript/imageParams.js b/javascript/imageParams.js
index 64aee93b7..ed0df939c 100644
--- a/javascript/imageParams.js
+++ b/javascript/imageParams.js
@@ -1,7 +1,7 @@
window.onload = (function(){
window.addEventListener('drop', e => {
const target = e.composedPath()[0];
- if (target.placeholder.indexOf("Prompt") == -1) return;
+ if (e.dataTransfer.files.length == 0 || target.placeholder.indexOf("Prompt") == -1) return;
let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
From ee65e729319e4184fc0d49552657e98aa0e28f17 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 22 May 2023 09:49:51 +0300
Subject: [PATCH 007/168] repair file paste for Firefox from #10615 remove
animation when pasting files into prompt rework two dragdrop js files into
one
---
javascript/dragdrop.js | 55 ++++++++++++++++++++++++++++++---------
javascript/imageParams.js | 18 -------------
modules/ui.py | 6 +++--
3 files changed, 46 insertions(+), 33 deletions(-)
delete mode 100644 javascript/imageParams.js
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index fd6b31540..aa79871ab 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -48,12 +48,27 @@ function dropReplaceImage(imgWrap, files) {
}
}
+function eventHasFiles(e) {
+ if (!e.dataTransfer || !e.dataTransfer.files) return false;
+ if (e.dataTransfer.files.length > 0) return true;
+ if (e.dataTransfer.items.length > 0 && e.dataTransfer.items[0].kind == "file") return true;
+
+ return false;
+}
+
+function dragDropTargetIsPrompt(target) {
+ if (!(target?.placeholder?.indexOf("Prompt") >= 0)) return true;
+ if (target?.parentNode?.parentNode) return true;
+ return false;
+}
+
window.document.addEventListener('dragover', e => {
const target = e.composedPath()[0];
- const imgWrap = target.closest('[data-testid="image"]');
- if (e.dataTransfer.files.length == 0 || (!imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1)) {
- return;
- }
+ if (!eventHasFiles(e)) return;
+
+ var targetImage = target.closest('[data-testid="image"]');
+ if (!dragDropTargetIsPrompt(target) && !targetImage) return;
+
e.stopPropagation();
e.preventDefault();
e.dataTransfer.dropEffect = 'copy';
@@ -61,17 +76,31 @@ window.document.addEventListener('dragover', e => {
window.document.addEventListener('drop', e => {
const target = e.composedPath()[0];
- if (e.dataTransfer.files.length == 0 || target.placeholder.indexOf("Prompt") == -1) {
+ if (!eventHasFiles(e)) return;
+
+ if (dragDropTargetIsPrompt(target)) {
+ e.stopPropagation();
+ e.preventDefault();
+
+ let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
+
+ const imgParent = gradioApp().getElementById(prompt_target);
+ const files = e.dataTransfer.files;
+ const fileInput = imgParent.querySelector('input[type="file"]');
+ if (fileInput) {
+ fileInput.files = files;
+ fileInput.dispatchEvent(new Event('change'));
+ }
+ }
+
+ var targetImage = target.closest('[data-testid="image"]');
+ if (targetImage) {
+ e.stopPropagation();
+ e.preventDefault();
+ const files = e.dataTransfer.files;
+ dropReplaceImage(targetImage, files);
return;
}
- const imgWrap = target.closest('[data-testid="image"]');
- if (!imgWrap) {
- return;
- }
- e.stopPropagation();
- e.preventDefault();
- const files = e.dataTransfer.files;
- dropReplaceImage(imgWrap, files);
});
window.addEventListener('paste', e => {
diff --git a/javascript/imageParams.js b/javascript/imageParams.js
deleted file mode 100644
index 0cdd717a9..000000000
--- a/javascript/imageParams.js
+++ /dev/null
@@ -1,18 +0,0 @@
-window.onload = (function() {
- window.addEventListener('drop', e => {
- const target = e.composedPath()[0];
- if (e.dataTransfer.files.length == 0 || target.placeholder.indexOf("Prompt") == -1) return;
-
- let prompt_target = get_tab_index('tabs') == 1 ? "img2img_prompt_image" : "txt2img_prompt_image";
-
- e.stopPropagation();
- e.preventDefault();
- const imgParent = gradioApp().getElementById(prompt_target);
- const files = e.dataTransfer.files;
- const fileInput = imgParent.querySelector('input[type="file"]');
- if (fileInput) {
- fileInput.files = files;
- fileInput.dispatchEvent(new Event('change'));
- }
- });
-});
diff --git a/modules/ui.py b/modules/ui.py
index e62182daa..001b97923 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -616,7 +616,8 @@ def create_ui():
outputs=[
txt2img_prompt,
txt_prompt_img
- ]
+ ],
+ show_progress=False,
)
enable_hr.change(
@@ -902,7 +903,8 @@ def create_ui():
outputs=[
img2img_prompt,
img2img_prompt_img
- ]
+ ],
+ show_progress=False,
)
img2img_args = dict(
From 47b669bc9ff3df73f58b675abaffbdfd84771a67 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Mon, 22 May 2023 09:53:24 +0300
Subject: [PATCH 008/168] Upgrade Gradio, remove docs URL hack
---
requirements.txt | 2 +-
requirements_versions.txt | 2 +-
webui.py | 15 ++++-----------
3 files changed, 6 insertions(+), 13 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 34e4520d6..a464447bc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@ blendmodes
accelerate
basicsr
gfpgan
-gradio==3.31.0
+gradio==3.32.0
numpy
omegaconf
opencv-contrib-python
diff --git a/requirements_versions.txt b/requirements_versions.txt
index de501fda5..31b179a9e 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -3,7 +3,7 @@ transformers==4.25.1
accelerate==0.18.0
basicsr==1.4.2
gfpgan==1.3.8
-gradio==3.31.0
+gradio==3.32.0
numpy==1.23.5
Pillow==9.5.0
realesrgan==0.3.0
diff --git a/webui.py b/webui.py
index d4402f55e..2d2c1134b 100644
--- a/webui.py
+++ b/webui.py
@@ -370,17 +370,6 @@ def webui():
gradio_auth_creds = list(get_gradio_auth_creds()) or None
- # this restores the missing /docs endpoint
- if launch_api and not hasattr(FastAPI, 'original_setup'):
- # TODO: replace this with `launch(app_kwargs=...)` if https://github.com/gradio-app/gradio/pull/4282 gets merged
- def fastapi_setup(self):
- self.docs_url = "/docs"
- self.redoc_url = "/redoc"
- self.original_setup()
-
- FastAPI.original_setup = FastAPI.setup
- FastAPI.setup = fastapi_setup
-
app, local_url, share_url = shared.demo.launch(
share=cmd_opts.share,
server_name=server_name,
@@ -393,6 +382,10 @@ def webui():
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True,
allowed_paths=cmd_opts.gradio_allowed_path,
+ app_kwargs={
+ "docs_url": "/docs",
+ "redoc_url": "/redoc",
+ },
)
if cmd_opts.add_stop_route:
app.add_route("/_stop", stop_route, methods=["POST"])
From cc2f6e3b7b8d720a8e0fb2732751e34b7f41c2e9 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 22 May 2023 15:40:10 +0300
Subject: [PATCH 009/168] fix error in dragdrop logic
---
javascript/dragdrop.js | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js
index aa79871ab..5803daea5 100644
--- a/javascript/dragdrop.js
+++ b/javascript/dragdrop.js
@@ -57,8 +57,8 @@ function eventHasFiles(e) {
}
function dragDropTargetIsPrompt(target) {
- if (!(target?.placeholder?.indexOf("Prompt") >= 0)) return true;
- if (target?.parentNode?.parentNode) return true;
+ if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
+ if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
return false;
}
From a10487986925ca8fd07ee7ae7fc5034752298551 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 21:52:46 +0800
Subject: [PATCH 010/168] Add custom karras scheduler
---
modules/img2img.py | 6 +++-
modules/processing.py | 10 ++++++-
modules/sd_samplers_kdiffusion.py | 6 ++++
modules/shared.py | 1 +
modules/txt2img.py | 6 +++-
modules/ui.py | 46 +++++++++++++++++++++++++++++++
scripts/xyz_grid.py | 3 ++
7 files changed, 75 insertions(+), 3 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index d704bf900..898c3dc10 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, enable_k_sched, sigma_min, sigma_max, rho, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -155,6 +155,10 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
+ enable_karras=enable_k_sched,
+ sigma_min=sigma_min,
+ sigma_max=sigma_max,
+ rho=rho
)
p.scripts = modules.scripts.scripts_img2img
diff --git a/modules/processing.py b/modules/processing.py
index 29a3743f5..b26f79986 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -106,7 +106,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_karras: bool = False, sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -146,6 +146,10 @@ class StableDiffusionProcessing:
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
+ self.enable_karras = enable_karras
+ self.sigma_max = sigma_max
+ self.sigma_min = sigma_min
+ self.rho = rho
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False
@@ -558,6 +562,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params = {
"Steps": p.steps,
"Sampler": p.sampler_name,
+ "Enable Custom Karras Schedule": p.enable_karras,
+ "Karras Scheduler sigma_max": p.sigma_max,
+ "Karras Scheduler sigma_min": p.sigma_min,
+ "Karras Scheduler rho": p.rho,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 638e0ac92..eb6c760c2 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -304,6 +304,12 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
+ elif p.enable_karras:
+ sigma_max = p.sigma_max
+ sigma_min = p.sigma_min
+ rho = p.rho
+ print(f"\nsigma_min: {sigma_min}, sigma_max: {sigma_max}, rho: {rho}")
+ sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
diff --git a/modules/shared.py b/modules/shared.py
index 0897f937a..dbba0824d 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -47,6 +47,7 @@ ui_reorder_categories = [
"inpaint",
"sampler",
"checkboxes",
+ "karras_scheduler",
"hires_fix",
"dimensions",
"cfg",
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 2e7d202d7..9f6340071 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -7,7 +7,7 @@ from modules.ui import plaintext_to_html
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, enable_k_sched, sigma_min, sigma_max, rho, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = processing.StableDiffusionProcessingTxt2Img(
@@ -43,6 +43,10 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
+ enable_karras=enable_k_sched,
+ sigma_min=sigma_min,
+ sigma_max=sigma_max,
+ rho=rho
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/ui.py b/modules/ui.py
index 001b97923..a65f8d857 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -484,6 +484,7 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
+ t2i_enable_k_sched = gr.Checkbox(label='Custom Karras Scheduler', value=False, elem_id="txt2img_enable_k_sched")
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
@@ -510,6 +511,13 @@ def create_ui():
with gr.Row():
hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+ elif category == "karras_scheduler":
+ with FormGroup(visible=False, elem_id="txt2img_karras_scheduler") as t2i_k_sched_options:
+ with FormRow(elem_id="txt2img_karras_scheduler_row1", variant="compact"):
+ t2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
+ t2i_k_sched_sigma_min = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ t2i_k_sched_rho = gr.Slider(minimum=3.0, maximum=10.0, step=0.5, label='rho', value=7.0, elem_id="txt2img_rho")
+
elif category == "batch":
if not opts.dimensions_and_batch_together:
with FormRow(elem_id="txt2img_column_batch"):
@@ -578,6 +586,10 @@ def create_ui():
hr_prompt,
hr_negative_prompt,
override_settings,
+ t2i_enable_k_sched,
+ t2i_k_sched_sigma_max,
+ t2i_k_sched_sigma_min,
+ t2i_k_sched_rho
] + custom_inputs,
@@ -627,6 +639,13 @@ def create_ui():
show_progress = False,
)
+ t2i_enable_k_sched.change(
+ fn=lambda x: gr_show(x),
+ inputs=[t2i_enable_k_sched],
+ outputs=[t2i_k_sched_options],
+ show_progress=False
+ )
+
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
@@ -655,6 +674,10 @@ def create_ui():
(hr_prompt, "Hires prompt"),
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
+ (t2i_enable_k_sched, "Enable CustomKarras Schedule"),
+ (t2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
+ (t2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
+ (t2i_k_sched_rho, "Karras Scheduler rho"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
@@ -846,6 +869,14 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
+ i2i_enable_k_sched = gr.Checkbox(label='Custom Karras Scheduler', value=False, elem_id="txt2img_enable_k_sched")
+
+ elif category == "karras_scheduler":
+ with FormGroup(visible=False, elem_id="img2img_karras_scheduler") as i2i_k_sched_options:
+ with FormRow(elem_id="img2img_karras_scheduler_row1", variant="compact"):
+ i2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
+ i2i_k_sched_sigma_min = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ i2i_k_sched_rho = gr.Slider(minimum=3.0, maximum=10.0, step=0.5, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -949,6 +980,10 @@ def create_ui():
img2img_batch_output_dir,
img2img_batch_inpaint_mask_dir,
override_settings,
+ i2i_enable_k_sched,
+ i2i_k_sched_sigma_max,
+ i2i_k_sched_sigma_min,
+ i2i_k_sched_rho
] + custom_inputs,
outputs=[
img2img_gallery,
@@ -1032,6 +1067,13 @@ def create_ui():
outputs=[prompt, negative_prompt, styles],
)
+ i2i_enable_k_sched.change(
+ fn=lambda x: gr_show(x),
+ inputs=[i2i_enable_k_sched],
+ outputs=[i2i_k_sched_options],
+ show_progress=False
+ )
+
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter])
@@ -1043,6 +1085,10 @@ def create_ui():
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
+ (i2i_enable_k_sched, "Enable Karras Schedule"),
+ (i2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
+ (i2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
+ (i2i_k_sched_rho, "Karras Scheduler rho"),
(cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
(seed, "Seed"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index da820b394..6ea049ee8 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,6 +220,9 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
+ AxisOption("Karras Scheduler Sigma Min", float, apply_field("sigma_min")),
+ AxisOption("Karras Scheduler Sigma Max", float, apply_field("sigma_max")),
+ AxisOption("Karras Scheduler rho", float, apply_field("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From 90ec557d60289a89b4ea6cd9b311658fbe682dc3 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 22:06:13 +0800
Subject: [PATCH 011/168] remove debug print
---
modules/sd_samplers_kdiffusion.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index eb6c760c2..d428551d3 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -308,7 +308,6 @@ class KDiffusionSampler:
sigma_max = p.sigma_max
sigma_min = p.sigma_min
rho = p.rho
- print(f"\nsigma_min: {sigma_min}, sigma_max: {sigma_max}, rho: {rho}")
sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
From efc98530595443d31c773526c6b760b722019d62 Mon Sep 17 00:00:00 2001
From: Monty Anderson
Date: Mon, 22 May 2023 15:52:44 +0100
Subject: [PATCH 012/168] `modules/api/api.py`: disable `timeout_keep_alive`
---
modules/api/api.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/api/api.py b/modules/api/api.py
index 9bb95dfd1..bfeec3856 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -682,4 +682,4 @@ class Api:
def launch(self, server_name, port):
self.app.include_router(self.router)
- uvicorn.run(self.app, host=server_name, port=port)
+ uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=0)
From e6269cba7fd84a76b2bd0012cb954f947a79b6a5 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:02:05 +0800
Subject: [PATCH 013/168] Add dropdown for scheduler type
---
modules/img2img.py | 3 ++-
modules/processing.py | 4 +++-
modules/sd_samplers_kdiffusion.py | 19 +++++++++++++++----
modules/txt2img.py | 3 ++-
modules/ui.py | 26 ++++++++++++++++----------
scripts/xyz_grid.py | 3 ++-
6 files changed, 40 insertions(+), 18 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index 898c3dc10..73af5acbb 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, enable_k_sched, sigma_min, sigma_max, rho, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, enable_k_sched, k_sched_type, sigma_min, sigma_max, rho, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -156,6 +156,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
enable_karras=enable_k_sched,
+ k_sched_type=k_sched_type,
sigma_min=sigma_min,
sigma_max=sigma_max,
rho=rho
diff --git a/modules/processing.py b/modules/processing.py
index b26f79986..ad5d59607 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -106,7 +106,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_karras: bool = False, sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_karras: bool = False, k_sched_type: str = "karras", sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -147,6 +147,7 @@ class StableDiffusionProcessing:
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
self.enable_karras = enable_karras
+ self.k_sched_type = k_sched_type
self.sigma_max = sigma_max
self.sigma_min = sigma_min
self.rho = rho
@@ -563,6 +564,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Steps": p.steps,
"Sampler": p.sampler_name,
"Enable Custom Karras Schedule": p.enable_karras,
+ "Karras Scheduler Type": p.k_sched_type,
"Karras Scheduler sigma_max": p.sigma_max,
"Karras Scheduler sigma_min": p.sigma_min,
"Karras Scheduler rho": p.rho,
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index d428551d3..441c040e4 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -44,6 +44,12 @@ sampler_extra_params = {
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
+k_diffusion_scheduler = {
+ 'karras': k_diffusion.sampling.get_sigmas_karras,
+ 'exponential': k_diffusion.sampling.get_sigmas_exponential,
+ 'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential
+}
+
class CFGDenoiser(torch.nn.Module):
"""
@@ -305,10 +311,15 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif p.enable_karras:
- sigma_max = p.sigma_max
- sigma_min = p.sigma_min
- rho = p.rho
- sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, rho=rho, device=shared.device)
+ print(p.k_sched_type, p.sigma_min, p.sigma_max, p.rho)
+ sigmas_func = k_diffusion_scheduler[p.k_sched_type]
+ sigmas_kwargs = {
+ 'sigma_min': p.sigma_min,
+ 'sigma_max': p.sigma_max
+ }
+ if p.k_sched_type != 'exponential':
+ sigmas_kwargs['rho'] = p.rho
+ sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 9f6340071..28d305688 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -7,7 +7,7 @@ from modules.ui import plaintext_to_html
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, enable_k_sched, sigma_min, sigma_max, rho, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, enable_k_sched, k_sched_type, sigma_min, sigma_max, rho, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = processing.StableDiffusionProcessingTxt2Img(
@@ -44,6 +44,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
enable_karras=enable_k_sched,
+ k_sched_type=k_sched_type,
sigma_min=sigma_min,
sigma_max=sigma_max,
rho=rho
diff --git a/modules/ui.py b/modules/ui.py
index a65f8d857..28d4f1d1e 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -514,9 +514,10 @@ def create_ui():
elif category == "karras_scheduler":
with FormGroup(visible=False, elem_id="txt2img_karras_scheduler") as t2i_k_sched_options:
with FormRow(elem_id="txt2img_karras_scheduler_row1", variant="compact"):
- t2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- t2i_k_sched_sigma_min = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
- t2i_k_sched_rho = gr.Slider(minimum=3.0, maximum=10.0, step=0.5, label='rho', value=7.0, elem_id="txt2img_rho")
+ t2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
+ t2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
+ t2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ t2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -587,8 +588,9 @@ def create_ui():
hr_negative_prompt,
override_settings,
t2i_enable_k_sched,
- t2i_k_sched_sigma_max,
+ t2i_k_sched_type,
t2i_k_sched_sigma_min,
+ t2i_k_sched_sigma_max,
t2i_k_sched_rho
] + custom_inputs,
@@ -674,7 +676,8 @@ def create_ui():
(hr_prompt, "Hires prompt"),
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
- (t2i_enable_k_sched, "Enable CustomKarras Schedule"),
+ (t2i_enable_k_sched, "Enable Custom Karras Schedule"),
+ (t2i_k_sched_type, "Karras Scheduler Type"),
(t2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
(t2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
(t2i_k_sched_rho, "Karras Scheduler rho"),
@@ -874,9 +877,10 @@ def create_ui():
elif category == "karras_scheduler":
with FormGroup(visible=False, elem_id="img2img_karras_scheduler") as i2i_k_sched_options:
with FormRow(elem_id="img2img_karras_scheduler_row1", variant="compact"):
- i2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- i2i_k_sched_sigma_min = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
- i2i_k_sched_rho = gr.Slider(minimum=3.0, maximum=10.0, step=0.5, label='rho', value=7.0, elem_id="txt2img_rho")
+ i2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
+ i2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
+ i2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ i2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -981,8 +985,9 @@ def create_ui():
img2img_batch_inpaint_mask_dir,
override_settings,
i2i_enable_k_sched,
- i2i_k_sched_sigma_max,
+ i2i_k_sched_type,
i2i_k_sched_sigma_min,
+ i2i_k_sched_sigma_max,
i2i_k_sched_rho
] + custom_inputs,
outputs=[
@@ -1085,7 +1090,8 @@ def create_ui():
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
- (i2i_enable_k_sched, "Enable Karras Schedule"),
+ (i2i_enable_k_sched, "Enable Custom Karras Schedule"),
+ (i2i_k_sched_type, "Karras Scheduler Type"),
(i2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
(i2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
(i2i_k_sched_rho, "Karras Scheduler rho"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 6ea049ee8..cea43c188 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -10,7 +10,7 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images, sd_samplers, processing, sd_models, sd_vae
+from modules import images, sd_samplers, processing, sd_models, sd_vae, sd_samplers_kdiffusion
from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img
from modules.shared import opts, state
import modules.shared as shared
@@ -220,6 +220,7 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
+ AxisOption("Karras Scheduler Type", str, apply_field("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
AxisOption("Karras Scheduler Sigma Min", float, apply_field("sigma_min")),
AxisOption("Karras Scheduler Sigma Max", float, apply_field("sigma_max")),
AxisOption("Karras Scheduler rho", float, apply_field("rho")),
From f82105144319fef7e973339062e972b6688fae11 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:09:03 +0800
Subject: [PATCH 014/168] Change karras to kdiffusion
---
modules/processing.py | 8 ++++----
modules/shared.py | 2 +-
modules/ui.py | 32 ++++++++++++++++----------------
scripts/xyz_grid.py | 8 ++++----
4 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index ad5d59607..3fb05d79b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -564,10 +564,10 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Steps": p.steps,
"Sampler": p.sampler_name,
"Enable Custom Karras Schedule": p.enable_karras,
- "Karras Scheduler Type": p.k_sched_type,
- "Karras Scheduler sigma_max": p.sigma_max,
- "Karras Scheduler sigma_min": p.sigma_min,
- "Karras Scheduler rho": p.rho,
+ "kdiffusion Scheduler Type": p.k_sched_type,
+ "kdiffusion Scheduler sigma_max": p.sigma_max,
+ "kdiffusion Scheduler sigma_min": p.sigma_min,
+ "kdiffusion Scheduler rho": p.rho,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
diff --git a/modules/shared.py b/modules/shared.py
index dbba0824d..069b37d83 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -47,7 +47,7 @@ ui_reorder_categories = [
"inpaint",
"sampler",
"checkboxes",
- "karras_scheduler",
+ "kdiffusion_scheduler",
"hires_fix",
"dimensions",
"cfg",
diff --git a/modules/ui.py b/modules/ui.py
index 28d4f1d1e..fd5c07995 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -484,7 +484,7 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- t2i_enable_k_sched = gr.Checkbox(label='Custom Karras Scheduler', value=False, elem_id="txt2img_enable_k_sched")
+ t2i_enable_k_sched = gr.Checkbox(label='Custom KDiffusion Scheduler', value=False, elem_id="txt2img_enable_k_sched")
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
@@ -511,9 +511,9 @@ def create_ui():
with gr.Row():
hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
- elif category == "karras_scheduler":
- with FormGroup(visible=False, elem_id="txt2img_karras_scheduler") as t2i_k_sched_options:
- with FormRow(elem_id="txt2img_karras_scheduler_row1", variant="compact"):
+ elif category == "kdiffusion_scheduler":
+ with FormGroup(visible=False, elem_id="txt2img_kdiffusion_scheduler") as t2i_k_sched_options:
+ with FormRow(elem_id="txt2img_kdiffusion_scheduler_row1", variant="compact"):
t2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
t2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
t2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
@@ -677,10 +677,10 @@ def create_ui():
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
(t2i_enable_k_sched, "Enable Custom Karras Schedule"),
- (t2i_k_sched_type, "Karras Scheduler Type"),
- (t2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
- (t2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
- (t2i_k_sched_rho, "Karras Scheduler rho"),
+ (t2i_k_sched_type, "KDiffusion Scheduler Type"),
+ (t2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
+ (t2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
+ (t2i_k_sched_rho, "KDiffusion Scheduler rho"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
@@ -872,11 +872,11 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
- i2i_enable_k_sched = gr.Checkbox(label='Custom Karras Scheduler', value=False, elem_id="txt2img_enable_k_sched")
+ i2i_enable_k_sched = gr.Checkbox(label='Custom KDiffusion Scheduler', value=False, elem_id="txt2img_enable_k_sched")
- elif category == "karras_scheduler":
- with FormGroup(visible=False, elem_id="img2img_karras_scheduler") as i2i_k_sched_options:
- with FormRow(elem_id="img2img_karras_scheduler_row1", variant="compact"):
+ elif category == "kdiffusion_scheduler":
+ with FormGroup(visible=False, elem_id="img2img_kdiffusion_scheduler") as i2i_k_sched_options:
+ with FormRow(elem_id="img2img_kdiffusion_scheduler_row1", variant="compact"):
i2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
i2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
i2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
@@ -1091,10 +1091,10 @@ def create_ui():
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
(i2i_enable_k_sched, "Enable Custom Karras Schedule"),
- (i2i_k_sched_type, "Karras Scheduler Type"),
- (i2i_k_sched_sigma_max, "Karras Scheduler sigma_max"),
- (i2i_k_sched_sigma_min, "Karras Scheduler sigma_min"),
- (i2i_k_sched_rho, "Karras Scheduler rho"),
+ (i2i_k_sched_type, "KDiffusion Scheduler Type"),
+ (i2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
+ (i2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
+ (i2i_k_sched_rho, "KDiffusion Scheduler rho"),
(cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
(seed, "Seed"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index cea43c188..74ece2527 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,10 +220,10 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("Karras Scheduler Type", str, apply_field("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
- AxisOption("Karras Scheduler Sigma Min", float, apply_field("sigma_min")),
- AxisOption("Karras Scheduler Sigma Max", float, apply_field("sigma_max")),
- AxisOption("Karras Scheduler rho", float, apply_field("rho")),
+ AxisOption("KDiffusion Scheduler Type", str, apply_field("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
+ AxisOption("KDiffusion Scheduler Sigma Min", float, apply_field("sigma_min")),
+ AxisOption("KDiffusion Scheduler Sigma Max", float, apply_field("sigma_max")),
+ AxisOption("KDiffusion Scheduler rho", float, apply_field("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From 7882f76da45de7279c7db0dd17b6aca82b7ddf46 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:26:28 +0800
Subject: [PATCH 015/168] Replace karras by k_diffusion, fix gen info
---
modules/img2img.py | 2 +-
modules/processing.py | 14 +++++++-------
modules/sd_hijack.py | 1 +
modules/sd_samplers_kdiffusion.py | 3 ++-
modules/txt2img.py | 2 +-
5 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index 73af5acbb..bec4354f6 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -155,7 +155,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
- enable_karras=enable_k_sched,
+ enable_custom_k_sched=enable_k_sched,
k_sched_type=k_sched_type,
sigma_min=sigma_min,
sigma_max=sigma_max,
diff --git a/modules/processing.py b/modules/processing.py
index 3fb05d79b..260a573a1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -106,7 +106,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_karras: bool = False, k_sched_type: str = "karras", sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_custom_k_sched: bool = False, k_sched_type: str = "karras", sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -146,7 +146,7 @@ class StableDiffusionProcessing:
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
- self.enable_karras = enable_karras
+ self.enable_custom_k_sched = enable_custom_k_sched
self.k_sched_type = k_sched_type
self.sigma_max = sigma_max
self.sigma_min = sigma_min
@@ -563,11 +563,11 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params = {
"Steps": p.steps,
"Sampler": p.sampler_name,
- "Enable Custom Karras Schedule": p.enable_karras,
- "kdiffusion Scheduler Type": p.k_sched_type,
- "kdiffusion Scheduler sigma_max": p.sigma_max,
- "kdiffusion Scheduler sigma_min": p.sigma_min,
- "kdiffusion Scheduler rho": p.rho,
+ "Enable Custom Karras Schedule": p.enable_custom_k_sched or None,
+ "kdiffusion Scheduler Type": p.k_sched_type if p.enable_custom_k_sched else None,
+ "kdiffusion Scheduler sigma_max": p.sigma_max if p.enable_custom_k_sched else None,
+ "kdiffusion Scheduler sigma_min": p.sigma_min if p.enable_custom_k_sched else None,
+ "kdiffusion Scheduler rho": p.rho if p.enable_custom_k_sched else None,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 08d31080f..9e157db88 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -41,6 +41,7 @@ def list_optimizers():
optimizers.clear()
optimizers.extend(new_optimizers)
+ print(optimizers)
def apply_optimizations():
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 441c040e4..4d8f57a79 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -44,6 +44,7 @@ sampler_extra_params = {
'sample_dpm_2': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
}
+k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion}
k_diffusion_scheduler = {
'karras': k_diffusion.sampling.get_sigmas_karras,
'exponential': k_diffusion.sampling.get_sigmas_exponential,
@@ -310,7 +311,7 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
- elif p.enable_karras:
+ elif p.enable_custom_k_sched:
print(p.k_sched_type, p.sigma_min, p.sigma_max, p.rho)
sigmas_func = k_diffusion_scheduler[p.k_sched_type]
sigmas_kwargs = {
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 28d305688..dd52e710d 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -43,7 +43,7 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
- enable_karras=enable_k_sched,
+ enable_custom_k_sched=enable_k_sched,
k_sched_type=k_sched_type,
sigma_min=sigma_min,
sigma_max=sigma_max,
From 7dc9d9e27e157ccd7a0c59405a3aaf4f1ca7e194 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:34:16 +0800
Subject: [PATCH 016/168] only add metadata when k_sched is actually been used
---
modules/processing.py | 14 ++++++++------
1 file changed, 8 insertions(+), 6 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 260a573a1..5e50f1d63 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -13,7 +13,7 @@ from skimage import exposure
from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_samplers_kdiffusion
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -560,14 +560,16 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
if uses_ensd:
uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
+ use_custom_k_sched = p.enable_custom_k_sched and p.sampler_name in sd_samplers_kdiffusion.k_diffusion_samplers_map
+
generation_params = {
"Steps": p.steps,
"Sampler": p.sampler_name,
- "Enable Custom Karras Schedule": p.enable_custom_k_sched or None,
- "kdiffusion Scheduler Type": p.k_sched_type if p.enable_custom_k_sched else None,
- "kdiffusion Scheduler sigma_max": p.sigma_max if p.enable_custom_k_sched else None,
- "kdiffusion Scheduler sigma_min": p.sigma_min if p.enable_custom_k_sched else None,
- "kdiffusion Scheduler rho": p.rho if p.enable_custom_k_sched else None,
+ "Enable Custom Karras Schedule": use_custom_k_sched or None,
+ "kdiffusion Scheduler Type": p.k_sched_type if use_custom_k_sched else None,
+ "kdiffusion Scheduler sigma_max": p.sigma_max if use_custom_k_sched else None,
+ "kdiffusion Scheduler sigma_min": p.sigma_min if use_custom_k_sched else None,
+ "kdiffusion Scheduler rho": p.rho if use_custom_k_sched else None,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
From 5dfb1f597b47b1028ee010df2ed8642e2beb6c1c Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:36:16 +0800
Subject: [PATCH 017/168] remove not related code
---
modules/sd_hijack.py | 1 -
modules/sd_samplers_kdiffusion.py | 1 -
2 files changed, 2 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 9e157db88..08d31080f 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -41,7 +41,6 @@ def list_optimizers():
optimizers.clear()
optimizers.extend(new_optimizers)
- print(optimizers)
def apply_optimizations():
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 4d8f57a79..ba0cf08e5 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -312,7 +312,6 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif p.enable_custom_k_sched:
- print(p.k_sched_type, p.sigma_min, p.sigma_max, p.rho)
sigmas_func = k_diffusion_scheduler[p.k_sched_type]
sigmas_kwargs = {
'sigma_min': p.sigma_min,
From 4365c35bf9e2a777e2ebddc15c5ea94000407071 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:41:14 +0800
Subject: [PATCH 018/168] Avoid loop import
---
modules/processing.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 5e50f1d63..d9703fe8a 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -13,7 +13,7 @@ from skimage import exposure
from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_samplers_kdiffusion
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -560,6 +560,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
if uses_ensd:
uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
+ # avoid loop import
+ from modules import sd_samplers_kdiffusion
use_custom_k_sched = p.enable_custom_k_sched and p.sampler_name in sd_samplers_kdiffusion.k_diffusion_samplers_map
generation_params = {
From 302d95c72697ecaf436445817f4676e70ba68f20 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Mon, 22 May 2023 23:43:06 +0800
Subject: [PATCH 019/168] Minor naming fixes
---
modules/processing.py | 10 +++++-----
modules/ui.py | 4 ++--
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index d9703fe8a..68f7f1682 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -567,11 +567,11 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params = {
"Steps": p.steps,
"Sampler": p.sampler_name,
- "Enable Custom Karras Schedule": use_custom_k_sched or None,
- "kdiffusion Scheduler Type": p.k_sched_type if use_custom_k_sched else None,
- "kdiffusion Scheduler sigma_max": p.sigma_max if use_custom_k_sched else None,
- "kdiffusion Scheduler sigma_min": p.sigma_min if use_custom_k_sched else None,
- "kdiffusion Scheduler rho": p.rho if use_custom_k_sched else None,
+ "Enable Custom KDiffusion Schedule": use_custom_k_sched or None,
+ "KDiffusion Scheduler Type": p.k_sched_type if use_custom_k_sched else None,
+ "KDiffusion Scheduler sigma_max": p.sigma_max if use_custom_k_sched else None,
+ "KDiffusion Scheduler sigma_min": p.sigma_min if use_custom_k_sched else None,
+ "KDiffusion Scheduler rho": p.rho if use_custom_k_sched else None,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
diff --git a/modules/ui.py b/modules/ui.py
index fd5c07995..6d53bdc66 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -676,7 +676,7 @@ def create_ui():
(hr_prompt, "Hires prompt"),
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
- (t2i_enable_k_sched, "Enable Custom Karras Schedule"),
+ (t2i_enable_k_sched, "Enable Custom KDiffusion Schedule"),
(t2i_k_sched_type, "KDiffusion Scheduler Type"),
(t2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
(t2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
@@ -1090,7 +1090,7 @@ def create_ui():
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
- (i2i_enable_k_sched, "Enable Custom Karras Schedule"),
+ (i2i_enable_k_sched, "Enable Custom KDiffusion Schedule"),
(i2i_k_sched_type, "KDiffusion Scheduler Type"),
(i2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
(i2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
From 65a87ccc9bf92a0fd24a453e2837dd2d19bbf5ce Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 00:09:49 +0800
Subject: [PATCH 020/168] Add error information for recursion error
---
modules/sd_samplers_kdiffusion.py | 6 ++++++
1 file changed, 6 insertions(+)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index ba0cf08e5..e2f18b542 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -272,6 +272,12 @@ class KDiffusionSampler:
try:
return func()
+ except RecursionError:
+ print(
+ 'rho>5 with polyexponential scheduler may cause this error.'
+ 'You should try to use smaller rho instead.'
+ )
+ return self.last_latent
except sd_samplers_common.InterruptedException:
return self.last_latent
From 403b304162b670597f20b01f147bb042eb78ee5c Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 00:29:38 +0800
Subject: [PATCH 021/168] use sigma_max/min in model if sigma_max/min is 0
---
modules/sd_samplers_kdiffusion.py | 5 +++--
modules/ui.py | 4 ++--
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index e2f18b542..7364ed44e 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -318,10 +318,11 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif p.enable_custom_k_sched:
+ sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas_func = k_diffusion_scheduler[p.k_sched_type]
sigmas_kwargs = {
- 'sigma_min': p.sigma_min,
- 'sigma_max': p.sigma_max
+ 'sigma_min': p.sigma_min or sigma_min,
+ 'sigma_max': p.sigma_max or sigma_max
}
if p.k_sched_type != 'exponential':
sigmas_kwargs['rho'] = p.rho
diff --git a/modules/ui.py b/modules/ui.py
index 6d53bdc66..fa3a41eb8 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -516,7 +516,7 @@ def create_ui():
with FormRow(elem_id="txt2img_kdiffusion_scheduler_row1", variant="compact"):
t2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
t2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- t2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ t2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
t2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
@@ -879,7 +879,7 @@ def create_ui():
with FormRow(elem_id="img2img_kdiffusion_scheduler_row1", variant="compact"):
i2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
i2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- i2i_k_sched_sigma_max = gr.Slider(minimum=5.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
+ i2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
i2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
From 51d672890d168fe46dd152305d2fde3f1bb9b531 Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Mon, 22 May 2023 13:06:57 -0400
Subject: [PATCH 022/168] Revert #10586
---
modules/sd_samplers_kdiffusion.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 638e0ac92..dcec9e0e4 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -20,7 +20,7 @@ samplers_k_diffusion = [
('DPM++ 2S a', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a'], {"uses_ensd": True, "second_order": True}),
('DPM++ 2M', 'sample_dpmpp_2m', ['k_dpmpp_2m'], {}),
('DPM++ SDE', 'sample_dpmpp_sde', ['k_dpmpp_sde'], {"second_order": True, "brownian_noise": True}),
- ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True, 'discard_next_to_last_sigma': True}),
+ ('DPM++ 2M SDE', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {"brownian_noise": True}),
('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {"uses_ensd": True}),
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {"uses_ensd": True}),
('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
@@ -29,7 +29,7 @@ samplers_k_diffusion = [
('DPM++ 2S a Karras', 'sample_dpmpp_2s_ancestral', ['k_dpmpp_2s_a_ka'], {'scheduler': 'karras', "uses_ensd": True, "second_order": True}),
('DPM++ 2M Karras', 'sample_dpmpp_2m', ['k_dpmpp_2m_ka'], {'scheduler': 'karras'}),
('DPM++ SDE Karras', 'sample_dpmpp_sde', ['k_dpmpp_sde_ka'], {'scheduler': 'karras', "second_order": True, "brownian_noise": True}),
- ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True, 'discard_next_to_last_sigma': True}),
+ ('DPM++ 2M SDE Karras', 'sample_dpmpp_2m_sde', ['k_dpmpp_2m_sde_ka'], {'scheduler': 'karras', "brownian_noise": True}),
]
samplers_data_k_diffusion = [
From e1c44267ea239d0903202c41f9e75c864e31227f Mon Sep 17 00:00:00 2001
From: ArthurHeitmann <37270165+ArthurHeitmann@users.noreply.github.com>
Date: Mon, 22 May 2023 21:56:26 +0200
Subject: [PATCH 023/168] Fix for #10643 (pixel noise in webui inpainting
canvas breaking inpainting, so that it behaves like plain img2img)
---
modules/img2img.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index d704bf900..4c12c2c5a 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -92,7 +92,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
elif mode == 2: # inpaint
image, mask = init_img_with_mask["image"], init_img_with_mask["mask"]
alpha_mask = ImageOps.invert(image.split()[-1]).convert('L').point(lambda x: 255 if x > 0 else 0, mode='1')
- mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
+ mask = mask.convert('L').point(lambda x: 255 if x > 128 else 0, mode='1')
+ mask = ImageChops.lighter(alpha_mask, mask).convert('L')
image = image.convert("RGB")
elif mode == 3: # inpaint sketch
image = inpaint_color_sketch
From 38aaad654bec640e99beb42964d09357878179bd Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 09:38:30 +0800
Subject: [PATCH 024/168] Better hint for user
Co-authored-by: catboxanon <122327233+catboxanon@users.noreply.github.com>
---
modules/sd_samplers_kdiffusion.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 7364ed44e..969ef02b1 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -274,8 +274,9 @@ class KDiffusionSampler:
return func()
except RecursionError:
print(
- 'rho>5 with polyexponential scheduler may cause this error.'
- 'You should try to use smaller rho instead.'
+ 'Encountered RecursionError during sampling, returning last latent. '
+ 'rho >5 with a polyexponential scheduler may cause this error. '
+ 'You should try to use a smaller rho value instead.'
)
return self.last_latent
except sd_samplers_common.InterruptedException:
From 89c44bbc15488a3d42eb63adee867b77ec432b09 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 09:52:15 +0800
Subject: [PATCH 025/168] Add hint for custom k_diffusion scheduler
---
javascript/hints.js | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 46f342cb9..9583c7dc1 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -113,7 +113,12 @@ var titles = {
"Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
"Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.",
- "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
+ "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction.",
+
+ "Custom KDiffusion Scheduler": "Custom noise scheduler to use for KDiffusion. See https://arxiv.org/abs/2206.00364",
+ "sigma min": "the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use.",
+ "sigma max": "the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use.",
+ "rho": "higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"
};
function updateTooltipForSpan(span) {
From 1846ad36a3bd2a60bc9dc59a60e16d3ca7a559fe Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 10:58:57 +0800
Subject: [PATCH 026/168] Use settings instead of main interface
---
javascript/hints.js | 7 +--
modules/generation_parameters_copypaste.py | 5 +++
modules/img2img.py | 7 +--
modules/processing.py | 12 ++---
modules/shared.py | 6 ++-
modules/txt2img.py | 7 +--
modules/ui.py | 52 ----------------------
7 files changed, 19 insertions(+), 77 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 9583c7dc1..46f342cb9 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -113,12 +113,7 @@ var titles = {
"Multiplier for extra networks": "When adding extra network such as Hypernetwork or Lora to prompt, use this multiplier for it.",
"Discard weights with matching name": "Regular expression; if weights's name matches it, the weights is not written to the resulting checkpoint. Use ^model_ema to discard EMA weights.",
"Extra networks tab order": "Comma-separated list of tab names; tabs listed here will appear in the extra networks UI first and in order lsited.",
- "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction.",
-
- "Custom KDiffusion Scheduler": "Custom noise scheduler to use for KDiffusion. See https://arxiv.org/abs/2206.00364",
- "sigma min": "the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use.",
- "sigma max": "the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use.",
- "rho": "higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"
+ "Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
};
function updateTooltipForSpan(span) {
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index d5f0a49b2..c92fb0fbf 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -318,6 +318,11 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
+ ('Enable Custom KDiffusion Schedule', 'custom_k_sched'),
+ ('KDiffusion Scheduler Type', 'k_sched_type'),
+ ('KDiffusion Scheduler sigma_max', 'sigma_max'),
+ ('KDiffusion Scheduler sigma_min', 'sigma_min'),
+ ('KDiffusion Scheduler rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
diff --git a/modules/img2img.py b/modules/img2img.py
index bec4354f6..d704bf900 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -78,7 +78,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
processed_image.save(os.path.join(output_dir, filename))
-def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, enable_k_sched, k_sched_type, sigma_min, sigma_max, rho, *args):
+def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, steps: int, sampler_index: int, mask_blur: int, mask_alpha: float, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
is_batch = mode == 5
@@ -155,11 +155,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
override_settings=override_settings,
- enable_custom_k_sched=enable_k_sched,
- k_sched_type=k_sched_type,
- sigma_min=sigma_min,
- sigma_max=sigma_max,
- rho=rho
)
p.scripts = modules.scripts.scripts_img2img
diff --git a/modules/processing.py b/modules/processing.py
index 68f7f1682..0a0181de8 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -106,7 +106,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_custom_k_sched: bool = False, k_sched_type: str = "karras", sigma_min: float=0.1, sigma_max: float=10.0, rho: float=7.0):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_custom_k_sched: bool = False, k_sched_type: str = "", sigma_min: float=0.0, sigma_max: float=0.0, rho: float=0.0):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -146,11 +146,11 @@ class StableDiffusionProcessing:
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
- self.enable_custom_k_sched = enable_custom_k_sched
- self.k_sched_type = k_sched_type
- self.sigma_max = sigma_max
- self.sigma_min = sigma_min
- self.rho = rho
+ self.enable_custom_k_sched = opts.custom_k_sched
+ self.k_sched_type = k_sched_type or opts.k_sched_type
+ self.sigma_max = sigma_max or opts.sigma_max
+ self.sigma_min = sigma_min or opts.sigma_min
+ self.rho = rho or opts.rho
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False
diff --git a/modules/shared.py b/modules/shared.py
index 069b37d83..a0e762d2a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -47,7 +47,6 @@ ui_reorder_categories = [
"inpaint",
"sampler",
"checkboxes",
- "kdiffusion_scheduler",
"hires_fix",
"dimensions",
"cfg",
@@ -518,6 +517,11 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ 'custom_k_sched': OptionInfo(False, "Enable Custom KDiffusion Scheduler"),
+ 'k_sched_type': OptionInfo("karras", "scheduler type", gr.Dropdown, {"choices": ["karras", "exponential", "polyexponential"]}),
+ 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
+ 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
+ 'rho': OptionInfo(7.0, "rho", gr.Number).info("higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
diff --git a/modules/txt2img.py b/modules/txt2img.py
index dd52e710d..2e7d202d7 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -7,7 +7,7 @@ from modules.ui import plaintext_to_html
-def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, enable_k_sched, k_sched_type, sigma_min, sigma_max, rho, *args):
+def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_sampler_index: int, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args):
override_settings = create_override_settings_dict(override_settings_texts)
p = processing.StableDiffusionProcessingTxt2Img(
@@ -43,11 +43,6 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
hr_prompt=hr_prompt,
hr_negative_prompt=hr_negative_prompt,
override_settings=override_settings,
- enable_custom_k_sched=enable_k_sched,
- k_sched_type=k_sched_type,
- sigma_min=sigma_min,
- sigma_max=sigma_max,
- rho=rho
)
p.scripts = modules.scripts.scripts_txt2img
diff --git a/modules/ui.py b/modules/ui.py
index fa3a41eb8..001b97923 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -484,7 +484,6 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling")
- t2i_enable_k_sched = gr.Checkbox(label='Custom KDiffusion Scheduler', value=False, elem_id="txt2img_enable_k_sched")
enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr")
hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False)
@@ -511,14 +510,6 @@ def create_ui():
with gr.Row():
hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
- elif category == "kdiffusion_scheduler":
- with FormGroup(visible=False, elem_id="txt2img_kdiffusion_scheduler") as t2i_k_sched_options:
- with FormRow(elem_id="txt2img_kdiffusion_scheduler_row1", variant="compact"):
- t2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
- t2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- t2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
- t2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
-
elif category == "batch":
if not opts.dimensions_and_batch_together:
with FormRow(elem_id="txt2img_column_batch"):
@@ -587,11 +578,6 @@ def create_ui():
hr_prompt,
hr_negative_prompt,
override_settings,
- t2i_enable_k_sched,
- t2i_k_sched_type,
- t2i_k_sched_sigma_min,
- t2i_k_sched_sigma_max,
- t2i_k_sched_rho
] + custom_inputs,
@@ -641,13 +627,6 @@ def create_ui():
show_progress = False,
)
- t2i_enable_k_sched.change(
- fn=lambda x: gr_show(x),
- inputs=[t2i_enable_k_sched],
- outputs=[t2i_k_sched_options],
- show_progress=False
- )
-
txt2img_paste_fields = [
(txt2img_prompt, "Prompt"),
(txt2img_negative_prompt, "Negative prompt"),
@@ -676,11 +655,6 @@ def create_ui():
(hr_prompt, "Hires prompt"),
(hr_negative_prompt, "Hires negative prompt"),
(hr_prompts_container, lambda d: gr.update(visible=True) if d.get("Hires prompt", "") != "" or d.get("Hires negative prompt", "") != "" else gr.update()),
- (t2i_enable_k_sched, "Enable Custom KDiffusion Schedule"),
- (t2i_k_sched_type, "KDiffusion Scheduler Type"),
- (t2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
- (t2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
- (t2i_k_sched_rho, "KDiffusion Scheduler rho"),
*modules.scripts.scripts_txt2img.infotext_fields
]
parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields, override_settings)
@@ -872,15 +846,6 @@ def create_ui():
with FormRow(elem_classes="checkboxes-row", variant="compact"):
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces")
tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling")
- i2i_enable_k_sched = gr.Checkbox(label='Custom KDiffusion Scheduler', value=False, elem_id="txt2img_enable_k_sched")
-
- elif category == "kdiffusion_scheduler":
- with FormGroup(visible=False, elem_id="img2img_kdiffusion_scheduler") as i2i_k_sched_options:
- with FormRow(elem_id="img2img_kdiffusion_scheduler_row1", variant="compact"):
- i2i_k_sched_type = gr.Dropdown(label="Type", elem_id="t2i_k_sched_type", choices=['karras', 'exponential', 'polyexponential'], value='karras')
- i2i_k_sched_sigma_min = gr.Slider(minimum=0.0, maximum=0.5, step=0.05, label='sigma min', value=0.1, elem_id="txt2img_sigma_min")
- i2i_k_sched_sigma_max = gr.Slider(minimum=0.0, maximum=50.0, step=0.1, label='sigma max', value=10.0, elem_id="txt2img_sigma_max")
- i2i_k_sched_rho = gr.Slider(minimum=0.5, maximum=10.0, step=0.1, label='rho', value=7.0, elem_id="txt2img_rho")
elif category == "batch":
if not opts.dimensions_and_batch_together:
@@ -984,11 +949,6 @@ def create_ui():
img2img_batch_output_dir,
img2img_batch_inpaint_mask_dir,
override_settings,
- i2i_enable_k_sched,
- i2i_k_sched_type,
- i2i_k_sched_sigma_min,
- i2i_k_sched_sigma_max,
- i2i_k_sched_rho
] + custom_inputs,
outputs=[
img2img_gallery,
@@ -1072,13 +1032,6 @@ def create_ui():
outputs=[prompt, negative_prompt, styles],
)
- i2i_enable_k_sched.change(
- fn=lambda x: gr_show(x),
- inputs=[i2i_enable_k_sched],
- outputs=[i2i_k_sched_options],
- show_progress=False
- )
-
token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[img2img_negative_prompt, steps], outputs=[negative_token_counter])
@@ -1090,11 +1043,6 @@ def create_ui():
(steps, "Steps"),
(sampler_index, "Sampler"),
(restore_faces, "Face restoration"),
- (i2i_enable_k_sched, "Enable Custom KDiffusion Schedule"),
- (i2i_k_sched_type, "KDiffusion Scheduler Type"),
- (i2i_k_sched_sigma_max, "KDiffusion Scheduler sigma_max"),
- (i2i_k_sched_sigma_min, "KDiffusion Scheduler sigma_min"),
- (i2i_k_sched_rho, "KDiffusion Scheduler rho"),
(cfg_scale, "CFG scale"),
(image_cfg_scale, "Image CFG scale"),
(seed, "Seed"),
From 70650f87a42615a62568a896403156d0065621b4 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 11:34:51 +0800
Subject: [PATCH 027/168] Use better way to impl
---
modules/processing.py | 16 +---------------
modules/sd_samplers_kdiffusion.py | 19 +++++++++++++------
2 files changed, 14 insertions(+), 21 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 0a0181de8..29a3743f5 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -106,7 +106,7 @@ class StableDiffusionProcessing:
"""
The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing
"""
- def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None, enable_custom_k_sched: bool = False, k_sched_type: str = "", sigma_min: float=0.0, sigma_max: float=0.0, rho: float=0.0):
+ def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_min_uncond: float = 0.0, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None):
if sampler_index is not None:
print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr)
@@ -146,11 +146,6 @@ class StableDiffusionProcessing:
self.s_tmin = s_tmin or opts.s_tmin
self.s_tmax = s_tmax or float('inf') # not representable as a standard ui option
self.s_noise = s_noise or opts.s_noise
- self.enable_custom_k_sched = opts.custom_k_sched
- self.k_sched_type = k_sched_type or opts.k_sched_type
- self.sigma_max = sigma_max or opts.sigma_max
- self.sigma_min = sigma_min or opts.sigma_min
- self.rho = rho or opts.rho
self.override_settings = {k: v for k, v in (override_settings or {}).items() if k not in shared.restricted_opts}
self.override_settings_restore_afterwards = override_settings_restore_afterwards
self.is_using_inpainting_conditioning = False
@@ -560,18 +555,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
if uses_ensd:
uses_ensd = sd_samplers_common.is_sampler_using_eta_noise_seed_delta(p)
- # avoid loop import
- from modules import sd_samplers_kdiffusion
- use_custom_k_sched = p.enable_custom_k_sched and p.sampler_name in sd_samplers_kdiffusion.k_diffusion_samplers_map
-
generation_params = {
"Steps": p.steps,
"Sampler": p.sampler_name,
- "Enable Custom KDiffusion Schedule": use_custom_k_sched or None,
- "KDiffusion Scheduler Type": p.k_sched_type if use_custom_k_sched else None,
- "KDiffusion Scheduler sigma_max": p.sigma_max if use_custom_k_sched else None,
- "KDiffusion Scheduler sigma_min": p.sigma_min if use_custom_k_sched else None,
- "KDiffusion Scheduler rho": p.rho if use_custom_k_sched else None,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 969ef02b1..5fea08b0c 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -295,6 +295,13 @@ class KDiffusionSampler:
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
+ if opts.custom_k_sched:
+ p.extra_generation_params["Enable Custom KDiffusion Schedule"] = True
+ p.extra_generation_params["KDiffusion Scheduler Type"] = opts.k_sched_type
+ p.extra_generation_params["KDiffusion Scheduler sigma_max"] = opts.sigma_max
+ p.extra_generation_params["KDiffusion Scheduler sigma_min"] = opts.sigma_min
+ p.extra_generation_params["KDiffusion Scheduler rho"] = opts.rho
+
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
@@ -318,15 +325,15 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
- elif p.enable_custom_k_sched:
+ elif opts.custom_k_sched:
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
- sigmas_func = k_diffusion_scheduler[p.k_sched_type]
+ sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
sigmas_kwargs = {
- 'sigma_min': p.sigma_min or sigma_min,
- 'sigma_max': p.sigma_max or sigma_max
+ 'sigma_min': opts.sigma_min or sigma_min,
+ 'sigma_max': opts.sigma_max or sigma_max
}
- if p.k_sched_type != 'exponential':
- sigmas_kwargs['rho'] = p.rho
+ if opts.k_sched_type != 'exponential':
+ sigmas_kwargs['rho'] = opts.rho
sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
From 78aed1fa4a984b2714ad11f33cbb20007aec2a34 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 11:47:32 +0800
Subject: [PATCH 028/168] Fix xyz
---
scripts/xyz_grid.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 74ece2527..cb618e180 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,10 +220,10 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("KDiffusion Scheduler Type", str, apply_field("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
- AxisOption("KDiffusion Scheduler Sigma Min", float, apply_field("sigma_min")),
- AxisOption("KDiffusion Scheduler Sigma Max", float, apply_field("sigma_max")),
- AxisOption("KDiffusion Scheduler rho", float, apply_field("rho")),
+ AxisOption("KDiffusion Scheduler Type", str, apply_override("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
+ AxisOption("KDiffusion Scheduler Sigma Min", float, apply_override("sigma_min")),
+ AxisOption("KDiffusion Scheduler Sigma Max", float, apply_override("sigma_max")),
+ AxisOption("KDiffusion Scheduler rho", float, apply_override("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From 1db7d212836e0fd8a4eff6922c13a54a372175b2 Mon Sep 17 00:00:00 2001
From: "fumitaka.yano"
Date: Tue, 23 May 2023 15:56:08 +0900
Subject: [PATCH 029/168] Subject:. Improvements to handle VAE filenames in
generated image filenames
Body:.
1) Added new line 24 to import sd_vae module.
2) Added new method get_vae_filename at lines 340-349 to obtain the VAE filename to be used for image generation and further process it to extract only the filename by splitting it with a dot symbol.
3) Added a new lambda function 'vae_filename' at line 373 to handle VAE filenames.
Reason:.
A function was needed to get the VAE filename and handle it in the program.
Test:.
We tested whether we could use this new functionality to get the expected file names.
The correct behaviour was confirmed for the following commonly distributed VAE files.
vae-ft-mse-840000-ema-pruned.safetensors -> vae-ft-mse-840000-ema-pruned
anything-v4.0.vae.pt -> anything-v4.0
ruff response:.
There were no problems with the code I added.
There was a minor configuration error in a line I did not modify, but I did not modify it as it was not relevant to this modification.
Logged.
images.py:426:56: F841 [*] Local variable `_` is assigned to but never used
images.py:432:43: F841 [*] Local variable `_` is assigned to but never used
Impact:.
This change makes it easier to retrieve the VAE filename used for image generation and use it in the programme.
---
modules/images.py | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/modules/images.py b/modules/images.py
index a41965ab6..3abaf4121 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -21,6 +21,8 @@ import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
from modules.shared import opts, cmd_opts
+import modules.sd_vae as sd_vae
+
LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS)
@@ -335,6 +337,16 @@ def sanitize_filename_part(text, replace_spaces=True):
class FilenameGenerator:
+ def get_vae_filename(self): #get the name of the VAE file.
+ if sd_vae.loaded_vae_file is None:
+ return "NoneType"
+ file_name = os.path.basename(sd_vae.loaded_vae_file)
+ split_file_name = file_name.split('.')
+ if len(split_file_name) > 1 and split_file_name[0] == '':
+ return split_file_name[1] # if the first character of the filename is "." then [1] is obtained.
+ else:
+ return split_file_name[0]
+
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
'steps': lambda self: self.p and self.p.steps,
@@ -358,6 +370,8 @@ class FilenameGenerator:
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..]
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
+ 'vae_filename': lambda self: self.get_vae_filename(),
+
}
default_time_format = '%Y%m%d%H%M%S'
From 72377b02518f96051a01a7e0ea30a6a14d8ec1de Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 23:48:23 +0800
Subject: [PATCH 030/168] Use type to determine if it is enable
---
modules/generation_parameters_copypaste.py | 1 -
modules/sd_samplers_kdiffusion.py | 6 +++---
modules/shared.py | 3 +--
3 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index c92fb0fbf..e98866fce 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -318,7 +318,6 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
- ('Enable Custom KDiffusion Schedule', 'custom_k_sched'),
('KDiffusion Scheduler Type', 'k_sched_type'),
('KDiffusion Scheduler sigma_max', 'sigma_max'),
('KDiffusion Scheduler sigma_min', 'sigma_min'),
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 5fea08b0c..eff2e32d4 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -46,6 +46,7 @@ sampler_extra_params = {
k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion}
k_diffusion_scheduler = {
+ 'None': None,
'karras': k_diffusion.sampling.get_sigmas_karras,
'exponential': k_diffusion.sampling.get_sigmas_exponential,
'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential
@@ -295,8 +296,7 @@ class KDiffusionSampler:
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
- if opts.custom_k_sched:
- p.extra_generation_params["Enable Custom KDiffusion Schedule"] = True
+ if opts.k_sched_type != "None":
p.extra_generation_params["KDiffusion Scheduler Type"] = opts.k_sched_type
p.extra_generation_params["KDiffusion Scheduler sigma_max"] = opts.sigma_max
p.extra_generation_params["KDiffusion Scheduler sigma_min"] = opts.sigma_min
@@ -325,7 +325,7 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
- elif opts.custom_k_sched:
+ elif opts.k_sched_type != "None":
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
sigmas_kwargs = {
diff --git a/modules/shared.py b/modules/shared.py
index a0e762d2a..b24f52dd4 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -517,8 +517,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'custom_k_sched': OptionInfo(False, "Enable Custom KDiffusion Scheduler"),
- 'k_sched_type': OptionInfo("karras", "scheduler type", gr.Dropdown, {"choices": ["karras", "exponential", "polyexponential"]}),
+ 'k_sched_type': OptionInfo("default", "scheduler type", gr.Dropdown, {"choices": ["None", "karras", "exponential", "polyexponential"]}),
'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
'rho': OptionInfo(7.0, "rho", gr.Number).info("higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"),
From 0e1c41998af53250a1c8ddc59225b50cbe18e770 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 23 May 2023 14:07:00 +0300
Subject: [PATCH 031/168] fix bad styling for thumbs view in extra networks
#10639
---
style.css | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)
diff --git a/style.css b/style.css
index ba12723a2..571f4cf45 100644
--- a/style.css
+++ b/style.css
@@ -756,13 +756,22 @@ footer {
.extra-network-cards .card .metadata-button, .extra-network-thumbs .card .metadata-button{
display: none;
position: absolute;
- right: 0;
color: white;
+ right: 0;
+}
+.extra-network-cards .card .metadata-button {
text-shadow: 2px 2px 3px black;
padding: 0.25em;
font-size: 22pt;
width: 1.5em;
}
+.extra-network-thumbs .card .metadata-button {
+ text-shadow: 1px 1px 2px black;
+ padding: 0;
+ font-size: 16pt;
+ width: 1em;
+ top: -0.25em;
+}
.extra-network-cards .card:hover .metadata-button, .extra-network-thumbs .card:hover .metadata-button{
display: inline-block;
}
@@ -787,6 +796,13 @@ footer {
position: relative;
}
+.extra-network-thumbs .card .preview{
+ position: absolute;
+ object-fit: cover;
+ width: 100%;
+ height:100%;
+}
+
.extra-network-thumbs .card:hover .additional a {
display: inline-block;
}
From a6e653be26cc05f4438145fa0082816e9fbbf5fc Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 23 May 2023 18:02:09 +0300
Subject: [PATCH 032/168] possible fix for empty list of optimizations #10605
---
modules/sd_hijack.py | 21 +++++++++++++++------
webui.py | 17 ++++++++++++++---
2 files changed, 29 insertions(+), 9 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 08d31080f..f93df0a63 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -48,6 +48,11 @@ def apply_optimizations():
undo_optimizations()
+ if len(optimizers) == 0:
+ # a script can access the model very early, and optimizations would not be filled by then
+ current_optimizer = None
+ return ''
+
ldm.modules.diffusionmodules.model.nonlinearity = silu
ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th
@@ -67,8 +72,9 @@ def apply_optimizations():
matching_optimizer = optimizers[0]
if matching_optimizer is not None:
- print(f"Applying optimization: {matching_optimizer.name}")
+ print(f"Applying optimization: {matching_optimizer.name}... ", end='')
matching_optimizer.apply()
+ print("done.")
current_optimizer = matching_optimizer
return current_optimizer.name
else:
@@ -149,6 +155,13 @@ class StableDiffusionModelHijack:
def __init__(self):
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
+ def apply_optimizations(self):
+ try:
+ self.optimization_method = apply_optimizations()
+ except Exception as e:
+ errors.display(e, "applying cross attention optimization")
+ undo_optimizations()
+
def hijack(self, m):
if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
model_embeddings = m.cond_stage_model.roberta.embeddings
@@ -168,11 +181,7 @@ class StableDiffusionModelHijack:
if m.cond_stage_key == "edit":
sd_hijack_unet.hijack_ddpm_edit()
- try:
- self.optimization_method = apply_optimizations()
- except Exception as e:
- errors.display(e, "applying cross attention optimization")
- undo_optimizations()
+ self.apply_optimizations()
self.clip = m.cond_stage_model
diff --git a/webui.py b/webui.py
index 6933473d5..f9210f41b 100644
--- a/webui.py
+++ b/webui.py
@@ -291,9 +291,20 @@ def initialize_rest(*, reload_script_modules=False):
modules.sd_hijack.list_optimizers()
startup_timer.record("scripts list_optimizers")
- # load model in parallel to other startup stuff
- # (when reloading, this does nothing)
- Thread(target=lambda: shared.sd_model).start()
+ def load_model():
+ """
+ Accesses shared.sd_model property to load model.
+ After it's available, if it has been loaded before this access by some extension,
+ its optimization may be None because the list of optimizaers has neet been filled
+ by that time, so we apply optimization again.
+ """
+
+ shared.sd_model # noqa: B018
+
+ if modules.sd_hijack.current_optimizer is None:
+ modules.sd_hijack.apply_optimizations()
+
+ Thread(target=load_model).start()
Thread(target=devices.first_time_calculation).start()
From 27962ded4a5303548559d14fe2cae373d7a5e5ac Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Tue, 23 May 2023 23:50:19 +0800
Subject: [PATCH 033/168] Fix ruff error
---
scripts/xyz_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index cb618e180..a4126e789 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,7 +220,7 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("KDiffusion Scheduler Type", str, apply_override("k_sched_type"), choices=lambda: [x for x in sd_samplers_kdiffusion.k_diffusion_scheduler]),
+ AxisOption("KDiffusion Scheduler Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
AxisOption("KDiffusion Scheduler Sigma Min", float, apply_override("sigma_min")),
AxisOption("KDiffusion Scheduler Sigma Max", float, apply_override("sigma_max")),
AxisOption("KDiffusion Scheduler rho", float, apply_override("rho")),
From 1601fccebca2dc5a806a0d2f0d33aa2da81a28fb Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Wed, 24 May 2023 00:18:09 +0800
Subject: [PATCH 034/168] Use automatic instead of None/default
---
modules/sd_samplers_kdiffusion.py | 6 +++---
modules/shared.py | 2 +-
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index eff2e32d4..a4c797c6d 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -46,7 +46,7 @@ sampler_extra_params = {
k_diffusion_samplers_map = {x.name: x for x in samplers_data_k_diffusion}
k_diffusion_scheduler = {
- 'None': None,
+ 'Automatic': None,
'karras': k_diffusion.sampling.get_sigmas_karras,
'exponential': k_diffusion.sampling.get_sigmas_exponential,
'polyexponential': k_diffusion.sampling.get_sigmas_polyexponential
@@ -296,7 +296,7 @@ class KDiffusionSampler:
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
- if opts.k_sched_type != "None":
+ if opts.k_sched_type != "Automatic":
p.extra_generation_params["KDiffusion Scheduler Type"] = opts.k_sched_type
p.extra_generation_params["KDiffusion Scheduler sigma_max"] = opts.sigma_max
p.extra_generation_params["KDiffusion Scheduler sigma_min"] = opts.sigma_min
@@ -325,7 +325,7 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
- elif opts.k_sched_type != "None":
+ elif opts.k_sched_type != "Automatic":
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
sigmas_kwargs = {
diff --git a/modules/shared.py b/modules/shared.py
index b24f52dd4..da7f7cfb7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -517,7 +517,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'k_sched_type': OptionInfo("default", "scheduler type", gr.Dropdown, {"choices": ["None", "karras", "exponential", "polyexponential"]}),
+ 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}),
'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
'rho': OptionInfo(7.0, "rho", gr.Number).info("higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"),
From 4b88e24ebe776680b327e33fe96d7fcf38e2e5d2 Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Wed, 24 May 2023 20:35:58 +0800
Subject: [PATCH 035/168] improvements
See:
https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/10649#issuecomment-1561047723
---
modules/generation_parameters_copypaste.py | 20 ++++++++++++----
modules/sd_samplers_kdiffusion.py | 27 ++++++++++++++--------
modules/shared.py | 4 ++--
scripts/xyz_grid.py | 8 +++----
4 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index e98866fce..4f827a6f0 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -306,6 +306,18 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "RNG" not in res:
res["RNG"] = "GPU"
+ if "KDiff Sched Type" not in res:
+ res["KDiff Sched Type"] = "Automatic"
+
+ if "KDiff Sched max sigma" not in res:
+ res["KDiff Sched max sigma"] = 14.6
+
+ if "KDiff Sched min sigma" not in res:
+ res["KDiff Sched min sigma"] = 0.3
+
+ if "KDiff Sched rho" not in res:
+ res["KDiff Sched rho"] = 7.0
+
return res
@@ -318,10 +330,10 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
- ('KDiffusion Scheduler Type', 'k_sched_type'),
- ('KDiffusion Scheduler sigma_max', 'sigma_max'),
- ('KDiffusion Scheduler sigma_min', 'sigma_min'),
- ('KDiffusion Scheduler rho', 'rho'),
+ ('KDiff Sched Type', 'k_sched_type'),
+ ('KDiff Sched max sigma', 'sigma_max'),
+ ('KDiff Sched min sigma', 'sigma_min'),
+ ('KDiff Sched rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index a4c797c6d..d2d172e4c 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -296,12 +296,6 @@ class KDiffusionSampler:
k_diffusion.sampling.torch = TorchHijack(self.sampler_noises if self.sampler_noises is not None else [])
- if opts.k_sched_type != "Automatic":
- p.extra_generation_params["KDiffusion Scheduler Type"] = opts.k_sched_type
- p.extra_generation_params["KDiffusion Scheduler sigma_max"] = opts.sigma_max
- p.extra_generation_params["KDiffusion Scheduler sigma_min"] = opts.sigma_min
- p.extra_generation_params["KDiffusion Scheduler rho"] = opts.rho
-
extra_params_kwargs = {}
for param_name in self.extra_params:
if hasattr(p, param_name) and param_name in inspect.signature(self.func).parameters:
@@ -326,14 +320,27 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
elif opts.k_sched_type != "Automatic":
- sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
- sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
+ m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
+ sigma_min, sigma_max = (0.1, 10)
sigmas_kwargs = {
- 'sigma_min': opts.sigma_min or sigma_min,
- 'sigma_max': opts.sigma_max or sigma_max
+ 'sigma_min': sigma_min if opts.use_old_karras_scheduler_sigmas else m_sigma_min,
+ 'sigma_max': sigma_max if opts.use_old_karras_scheduler_sigmas else m_sigma_max
}
+
+ sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
+ p.extra_generation_params["KDiff Sched Type"] = opts.k_sched_type
+
+ if opts.sigma_min != 0.3:
+ # take 0.0 as model default
+ sigmas_kwargs['sigma_min'] = opts.sigma_min or m_sigma_min
+ p.extra_generation_params["KDiff Sched min sigma"] = opts.sigma_min
+ if opts.sigma_max != 14.6:
+ sigmas_kwargs['sigma_max'] = opts.sigma_max or m_sigma_max
+ p.extra_generation_params["KDiff Sched max sigma"] = opts.sigma_max
if opts.k_sched_type != 'exponential':
sigmas_kwargs['rho'] = opts.rho
+ p.extra_generation_params["KDiff Sched rho"] = opts.rho
+
sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
diff --git a/modules/shared.py b/modules/shared.py
index da7f7cfb7..00fcced89 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -518,8 +518,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}),
- 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
- 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
+ 'sigma_max': OptionInfo(14.6, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
+ 'sigma_min': OptionInfo(0.3, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
'rho': OptionInfo(7.0, "rho", gr.Number).info("higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index a4126e789..41fc21070 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,10 +220,10 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("KDiffusion Scheduler Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
- AxisOption("KDiffusion Scheduler Sigma Min", float, apply_override("sigma_min")),
- AxisOption("KDiffusion Scheduler Sigma Max", float, apply_override("sigma_max")),
- AxisOption("KDiffusion Scheduler rho", float, apply_override("rho")),
+ AxisOption("KDiff Sched Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
+ AxisOption("KDiff Sched min sigma", float, apply_override("sigma_min")),
+ AxisOption("KDiff Sched max sigma", float, apply_override("sigma_max")),
+ AxisOption("KDiff Sched rho", float, apply_override("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From a69b71a37f1fd32a60fbd87beed13f4f280400bd Mon Sep 17 00:00:00 2001
From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com>
Date: Wed, 24 May 2023 20:40:37 +0800
Subject: [PATCH 036/168] use Schedule instead of Sched
---
modules/generation_parameters_copypaste.py | 24 +++++++++++-----------
modules/sd_samplers_kdiffusion.py | 8 ++++----
scripts/xyz_grid.py | 8 ++++----
3 files changed, 20 insertions(+), 20 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 4f827a6f0..1443c5cd9 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -306,17 +306,17 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "RNG" not in res:
res["RNG"] = "GPU"
- if "KDiff Sched Type" not in res:
- res["KDiff Sched Type"] = "Automatic"
+ if "KDiff Schedule Type" not in res:
+ res["KDiff Schedule Type"] = "Automatic"
- if "KDiff Sched max sigma" not in res:
- res["KDiff Sched max sigma"] = 14.6
+ if "KDiff Schedule max sigma" not in res:
+ res["KDiff Schedule max sigma"] = 14.6
- if "KDiff Sched min sigma" not in res:
- res["KDiff Sched min sigma"] = 0.3
+ if "KDiff Schedule min sigma" not in res:
+ res["KDiff Schedule min sigma"] = 0.3
- if "KDiff Sched rho" not in res:
- res["KDiff Sched rho"] = 7.0
+ if "KDiff Schedule rho" not in res:
+ res["KDiff Schedule rho"] = 7.0
return res
@@ -330,10 +330,10 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
- ('KDiff Sched Type', 'k_sched_type'),
- ('KDiff Sched max sigma', 'sigma_max'),
- ('KDiff Sched min sigma', 'sigma_min'),
- ('KDiff Sched rho', 'rho'),
+ ('KDiff Schedule Type', 'k_sched_type'),
+ ('KDiff Schedule max sigma', 'sigma_max'),
+ ('KDiff Schedule min sigma', 'sigma_min'),
+ ('KDiff Schedule rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index d2d172e4c..9c9d9f179 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -328,18 +328,18 @@ class KDiffusionSampler:
}
sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
- p.extra_generation_params["KDiff Sched Type"] = opts.k_sched_type
+ p.extra_generation_params["KDiff Schedule Type"] = opts.k_sched_type
if opts.sigma_min != 0.3:
# take 0.0 as model default
sigmas_kwargs['sigma_min'] = opts.sigma_min or m_sigma_min
- p.extra_generation_params["KDiff Sched min sigma"] = opts.sigma_min
+ p.extra_generation_params["KDiff Schedule min sigma"] = opts.sigma_min
if opts.sigma_max != 14.6:
sigmas_kwargs['sigma_max'] = opts.sigma_max or m_sigma_max
- p.extra_generation_params["KDiff Sched max sigma"] = opts.sigma_max
+ p.extra_generation_params["KDiff Schedule max sigma"] = opts.sigma_max
if opts.k_sched_type != 'exponential':
sigmas_kwargs['rho'] = opts.rho
- p.extra_generation_params["KDiff Sched rho"] = opts.rho
+ p.extra_generation_params["KDiff Schedule rho"] = opts.rho
sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 41fc21070..089d375ea 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,10 +220,10 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("KDiff Sched Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
- AxisOption("KDiff Sched min sigma", float, apply_override("sigma_min")),
- AxisOption("KDiff Sched max sigma", float, apply_override("sigma_max")),
- AxisOption("KDiff Sched rho", float, apply_override("rho")),
+ AxisOption("KDiff Schedule Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
+ AxisOption("KDiff Schedule min sigma", float, apply_override("sigma_min")),
+ AxisOption("KDiff Schedule max sigma", float, apply_override("sigma_max")),
+ AxisOption("KDiff Schedule rho", float, apply_override("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From fb5d0ef2090b06001195ee22e86964059c223d06 Mon Sep 17 00:00:00 2001
From: strelokhalfer
Date: Wed, 24 May 2023 18:17:02 +0300
Subject: [PATCH 037/168] Changed 'images.zip' to generation by pattern
---
modules/images.py | 10 +++++++---
modules/shared.py | 1 +
modules/ui_common.py | 9 +++++++--
3 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 4e8cd9934..d619c7194 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -338,6 +338,8 @@ def sanitize_filename_part(text, replace_spaces=True):
class FilenameGenerator:
replacements = {
'seed': lambda self: self.seed if self.seed is not None else '',
+ 'seed_first': lambda self: self.seed if self.p.batch_size == 1 else self.p.all_seeds[0],
+ 'seed_last': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.all_seeds[-1],
'steps': lambda self: self.p and self.p.steps,
'cfg': lambda self: self.p and self.p.cfg_scale,
'width': lambda self: self.image.width,
@@ -354,19 +356,21 @@ class FilenameGenerator:
'prompt_no_styles': lambda self: self.prompt_no_style(),
'prompt_spaces': lambda self: sanitize_filename_part(self.prompt, replace_spaces=False),
'prompt_words': lambda self: self.prompt_words(),
- 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 else self.p.batch_index + 1,
- 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.n_iter == 1 and self.p.batch_size == 1 else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
+ 'batch_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if self.p.batch_size == 1 or self.zip else self.p.batch_index + 1,
+ 'batch_size': lambda self: self.p.batch_size,
+ 'generation_number': lambda self: NOTHING_AND_SKIP_PREVIOUS_TEXT if (self.p.n_iter == 1 and self.p.batch_size == 1) or self.zip else self.p.iteration * self.p.batch_size + self.p.batch_index + 1,
'hasprompt': lambda self, *args: self.hasprompt(*args), # accepts formats:[hasprompt..]
'clip_skip': lambda self: opts.data["CLIP_stop_at_last_layers"],
'denoising': lambda self: self.p.denoising_strength if self.p and self.p.denoising_strength else NOTHING_AND_SKIP_PREVIOUS_TEXT,
}
default_time_format = '%Y%m%d%H%M%S'
- def __init__(self, p, seed, prompt, image):
+ def __init__(self, p, seed, prompt, image, zip=False):
self.p = p
self.seed = seed
self.prompt = prompt
self.image = image
+ self.zip = zip
def hasprompt(self, *args):
lower = self.prompt.lower()
diff --git a/modules/shared.py b/modules/shared.py
index 0897f937a..fbb10c2ac 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -314,6 +314,7 @@ options_templates.update(options_section(('saving-images', "Saving images/grids"
"grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"),
"grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"),
"grid_prevent_empty_spots": OptionInfo(False, "Prevent empty spots in grid (when set to autodetect)"),
+ "grid_zip_filename_pattern": OptionInfo("", "Archive filename pattern", component_args=hide_dirs).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Images-Filename-Name-and-Subdirectory"),
"n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}),
"enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"),
diff --git a/modules/ui_common.py b/modules/ui_common.py
index 27ab3ebb6..5a9204a4d 100644
--- a/modules/ui_common.py
+++ b/modules/ui_common.py
@@ -50,9 +50,10 @@ def save_files(js_data, images, do_make_zip, index):
save_to_dirs = shared.opts.use_save_to_dirs_for_ui
extension: str = shared.opts.samples_format
start_index = 0
+ only_one = False
if index > -1 and shared.opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
-
+ only_one = True
images = [images[index]]
start_index = index
@@ -70,6 +71,7 @@ def save_files(js_data, images, do_make_zip, index):
is_grid = image_index < p.index_of_first_image
i = 0 if is_grid else (image_index - p.index_of_first_image)
+ p.batch_index = image_index-1
fullfn, txt_fullfn = modules.images.save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
filename = os.path.relpath(fullfn, path)
@@ -83,7 +85,10 @@ def save_files(js_data, images, do_make_zip, index):
# Make Zip
if do_make_zip:
- zip_filepath = os.path.join(path, "images.zip")
+ zip_fileseed = p.all_seeds[index-1] if only_one else p.all_seeds[0]
+ namegen = modules.images.FilenameGenerator(p, zip_fileseed, p.all_prompts[0], image, True)
+ zip_filename = namegen.apply(shared.opts.grid_zip_filename_pattern or "[datetime]_[[model_name]]_[seed]-[seed_last]")
+ zip_filepath = os.path.join(path, f"{zip_filename}.zip")
from zipfile import ZipFile
with ZipFile(zip_filepath, "w") as zip_file:
From d66c64b9d76553a9518ae6a3141714519d65d796 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Wed, 24 May 2023 20:19:16 +0300
Subject: [PATCH 038/168] Optimize tooltip checks
* Instead of traversing tens of thousands of text nodes, only look at elements and their children
* Debounce the checks to happen only every one second
---
javascript/hints.js | 61 ++++++++++++++++++++++++---------------------
1 file changed, 33 insertions(+), 28 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 46f342cb9..a8c729760 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -116,17 +116,17 @@ var titles = {
"Negative Guidance minimum sigma": "Skip negative prompt for steps where image is already mostly denoised; the higher this value, the more skips there will be; provides increased performance in exchange for minor quality reduction."
};
-function updateTooltipForSpan(span) {
- if (span.title) return; // already has a title
+function updateTooltip(element) {
+ if (element.title) return; // already has a title
- let tooltip = localization[titles[span.textContent]] || titles[span.textContent];
+ let tooltip = localization[titles[element.textContent]] || titles[element.textContent];
if (!tooltip) {
- tooltip = localization[titles[span.value]] || titles[span.value];
+ tooltip = localization[titles[element.value]] || titles[element.value];
}
if (!tooltip) {
- for (const c of span.classList) {
+ for (const c of element.classList) {
if (c in titles) {
tooltip = localization[titles[c]] || titles[c];
break;
@@ -135,34 +135,39 @@ function updateTooltipForSpan(span) {
}
if (tooltip) {
- span.title = tooltip;
+ element.title = tooltip;
}
}
-function updateTooltipForSelect(select) {
- if (select.onchange != null) return;
+// Nodes to check for adding tooltips.
+const tooltipCheckNodes = new Set();
+// Timer for debouncing tooltip check.
+let tooltipCheckTimer = null;
- select.onchange = function() {
- select.title = localization[titles[select.value]] || titles[select.value] || "";
- };
+function processTooltipCheckNodes() {
+ for (const node of tooltipCheckNodes) {
+ updateTooltip(node);
+ }
+ tooltipCheckNodes.clear();
}
-var observedTooltipElements = {SPAN: 1, BUTTON: 1, SELECT: 1, P: 1};
-
-onUiUpdate(function(m) {
- m.forEach(function(record) {
- record.addedNodes.forEach(function(node) {
- if (observedTooltipElements[node.tagName]) {
- updateTooltipForSpan(node);
+onUiUpdate(function(mutationRecords) {
+ for (const record of mutationRecords) {
+ for (const node of record.addedNodes) {
+ if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) {
+ if (
+ node.tagName === "SPAN" ||
+ node.tagName === "BUTTON" ||
+ node.tagName === "P"
+ ) {
+ tooltipCheckNodes.add(node);
+ }
+ node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n));
}
- if (node.tagName == "SELECT") {
- updateTooltipForSelect(node);
- }
-
- if (node.querySelectorAll) {
- node.querySelectorAll('span, button, select, p').forEach(updateTooltipForSpan);
- node.querySelectorAll('select').forEach(updateTooltipForSelect);
- }
- });
- });
+ }
+ }
+ if (tooltipCheckNodes.size) {
+ clearTimeout(tooltipCheckTimer);
+ tooltipCheckTimer = setTimeout(processTooltipCheckNodes, 1000);
+ }
});
From b82d4a65fe9b025e9da1b8c7a72ed9d56b96315d Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Wed, 24 May 2023 20:34:57 +0300
Subject: [PATCH 039/168] Restore support for dropdown tooltips
---
javascript/hints.js | 25 +++++++++++++++++++------
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index a8c729760..7f8885bcb 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -153,14 +153,27 @@ function processTooltipCheckNodes() {
onUiUpdate(function(mutationRecords) {
for (const record of mutationRecords) {
+ if (record.type === "childList" && record.target.classList.contains("options")) {
+ // This smells like a Gradio dropdown menu having changed,
+ // so let's enqueue an update for the input element that shows the current value.
+ let wrap = record.target.parentNode;
+ let input = wrap?.querySelector("input");
+ if (input) {
+ input.title = ""; // So we'll even have a chance to update it.
+ tooltipCheckNodes.add(input);
+ }
+ }
for (const node of record.addedNodes) {
if (node.nodeType === Node.ELEMENT_NODE && !node.classList.contains("hide")) {
- if (
- node.tagName === "SPAN" ||
- node.tagName === "BUTTON" ||
- node.tagName === "P"
- ) {
- tooltipCheckNodes.add(node);
+ if (!node.title) {
+ if (
+ node.tagName === "SPAN" ||
+ node.tagName === "BUTTON" ||
+ node.tagName === "P" ||
+ node.tagName === "INPUT"
+ ) {
+ tooltipCheckNodes.add(node);
+ }
}
node.querySelectorAll('span, button, p').forEach(n => tooltipCheckNodes.add(n));
}
From 32b0f7c9bbb908b870c2e0d488bd63a9c71ba078 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Wed, 24 May 2023 20:45:05 +0300
Subject: [PATCH 040/168] Add support for tooltips on dropdown options
---
javascript/hints.js | 15 ++++++++++++---
1 file changed, 12 insertions(+), 3 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 7f8885bcb..05ae5f22c 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -119,10 +119,18 @@ var titles = {
function updateTooltip(element) {
if (element.title) return; // already has a title
- let tooltip = localization[titles[element.textContent]] || titles[element.textContent];
+ let text = element.textContent;
+ let tooltip = localization[titles[text]] || titles[text];
if (!tooltip) {
- tooltip = localization[titles[element.value]] || titles[element.value];
+ let value = element.value;
+ if (value) tooltip = localization[titles[value]] || titles[value];
+ }
+
+ if (!tooltip) {
+ // Gradio dropdown options have `data-value`.
+ let dataValue = element.dataset.value;
+ if (dataValue) tooltip = localization[titles[dataValue]] || titles[dataValue];
}
if (!tooltip) {
@@ -170,7 +178,8 @@ onUiUpdate(function(mutationRecords) {
node.tagName === "SPAN" ||
node.tagName === "BUTTON" ||
node.tagName === "P" ||
- node.tagName === "INPUT"
+ node.tagName === "INPUT" ||
+ (node.tagName === "LI" && node.classList.contains("item")) // Gradio dropdown item
) {
tooltipCheckNodes.add(node);
}
From 7a1bbf99da7c9bad866a50890221e4e539e1025d Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Wed, 24 May 2023 16:41:22 -0400
Subject: [PATCH 041/168] Cleaner image metadata read
---
modules/images.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 4e8cd9934..d1801355e 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -665,9 +665,13 @@ def read_info_from_image(image):
items['exif comment'] = exif_comment
geninfo = exif_comment
- for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
- 'loop', 'background', 'timestamp', 'duration']:
+ for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
+ 'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
+ 'icc_profile', 'chromaticity']:
+ try:
items.pop(field, None)
+ except KeyError:
+ pass
if items.get("Software", None) == "NovelAI":
try:
From f661fb0fd39cceca121b455cb0133e829cfe72aa Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 25 May 2023 09:00:45 +0300
Subject: [PATCH 042/168] Just use console.error, it's in all browsers
---
script.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/script.js b/script.js
index f76127799..12f00cb14 100644
--- a/script.js
+++ b/script.js
@@ -41,7 +41,7 @@ function runCallback(x, m) {
try {
x(m);
} catch (e) {
- (console.error || console.log).call(console, e.message, e);
+ console.error("error running callback", x, ":", e);
}
}
function executeCallbacks(queue, m) {
From 9574ebe2128ae3a1b04935c13c3067e4c9a54e63 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 25 May 2023 09:02:38 +0300
Subject: [PATCH 043/168] Merge executeCallbacks and runCallback, simplify +
optimize
---
script.js | 16 +++++++---------
1 file changed, 7 insertions(+), 9 deletions(-)
diff --git a/script.js b/script.js
index 12f00cb14..777585711 100644
--- a/script.js
+++ b/script.js
@@ -37,17 +37,15 @@ function onOptionsChanged(callback) {
optionsChangedCallbacks.push(callback);
}
-function runCallback(x, m) {
- try {
- x(m);
- } catch (e) {
- console.error("error running callback", x, ":", e);
+function executeCallbacks(queue, arg) {
+ for (const callback of queue) {
+ try {
+ callback(arg);
+ } catch (e) {
+ console.error("error running callback", callback, ":", e);
+ }
}
}
-function executeCallbacks(queue, m) {
- queue.forEach(function(x) {
- runCallback(x, m);
- });
}
var executedOnLoaded = false;
From 54696dce056ece694bbca3f6c0252532fdd05bbd Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 25 May 2023 09:03:14 +0300
Subject: [PATCH 044/168] Document on* handlers (for extension authors' sake)
---
script.js | 20 ++++++++++++++++++++
1 file changed, 20 insertions(+)
diff --git a/script.js b/script.js
index 777585711..46310f357 100644
--- a/script.js
+++ b/script.js
@@ -24,15 +24,35 @@ var uiTabChangeCallbacks = [];
var optionsChangedCallbacks = [];
var uiCurrentTab = null;
+/**
+ * Register callback to be called at each UI update.
+ * The callback receives an array of MutationRecords as an argument.
+ */
function onUiUpdate(callback) {
uiUpdateCallbacks.push(callback);
}
+
+/**
+ * Register callback to be called when the UI is loaded.
+ * The callback receives no arguments.
+ */
function onUiLoaded(callback) {
uiLoadedCallbacks.push(callback);
}
+
+/**
+ * Register callback to be called when the UI tab is changed.
+ * The callback receives no arguments.
+ */
function onUiTabChange(callback) {
uiTabChangeCallbacks.push(callback);
}
+
+/**
+ * Register callback to be called when the options are changed.
+ * The callback receives no arguments.
+ * @param callback
+ */
function onOptionsChanged(callback) {
optionsChangedCallbacks.push(callback);
}
From bc53ecf298478ecd9d01a78ece50fea06a609d6a Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 25 May 2023 09:05:06 +0300
Subject: [PATCH 045/168] Add onAfterUiUpdate callback
---
.eslintrc.js | 7 ++++---
script.js | 27 +++++++++++++++++++++++++++
2 files changed, 31 insertions(+), 3 deletions(-)
diff --git a/.eslintrc.js b/.eslintrc.js
index 218f56098..f33aca09f 100644
--- a/.eslintrc.js
+++ b/.eslintrc.js
@@ -50,13 +50,14 @@ module.exports = {
globals: {
//script.js
gradioApp: "readonly",
+ executeCallbacks: "readonly",
+ onAfterUiUpdate: "readonly",
+ onOptionsChanged: "readonly",
onUiLoaded: "readonly",
onUiUpdate: "readonly",
- onOptionsChanged: "readonly",
uiCurrentTab: "writable",
- uiElementIsVisible: "readonly",
uiElementInSight: "readonly",
- executeCallbacks: "readonly",
+ uiElementIsVisible: "readonly",
//ui.js
opts: "writable",
all_gallery_buttons: "readonly",
diff --git a/script.js b/script.js
index 46310f357..de9d7e22a 100644
--- a/script.js
+++ b/script.js
@@ -19,9 +19,11 @@ function get_uiCurrentTabContent() {
}
var uiUpdateCallbacks = [];
+var uiAfterUpdateCallbacks = [];
var uiLoadedCallbacks = [];
var uiTabChangeCallbacks = [];
var optionsChangedCallbacks = [];
+var uiAfterUpdateTimeout = null;
var uiCurrentTab = null;
/**
@@ -32,6 +34,18 @@ function onUiUpdate(callback) {
uiUpdateCallbacks.push(callback);
}
+/**
+ * Register callback to be called soon after UI updates.
+ * The callback receives no arguments.
+ *
+ * This is preferred over `onUiUpdate` if you don't need
+ * access to the MutationRecords, as your function will
+ * not be called quite as often.
+ */
+function onAfterUiUpdate(callback) {
+ uiAfterUpdateCallbacks.push(callback);
+}
+
/**
* Register callback to be called when the UI is loaded.
* The callback receives no arguments.
@@ -66,6 +80,18 @@ function executeCallbacks(queue, arg) {
}
}
}
+
+/**
+ * Schedule the execution of the callbacks registered with onAfterUiUpdate.
+ * The callbacks are executed after a short while, unless another call to this function
+ * is made before that time. IOW, the callbacks are executed only once, even
+ * when there are multiple mutations observed.
+ */
+function scheduleAfterUiUpdateCallbacks() {
+ clearTimeout(uiAfterUpdateTimeout);
+ uiAfterUpdateTimeout = setTimeout(function() {
+ executeCallbacks(uiAfterUpdateCallbacks);
+ }, 200);
}
var executedOnLoaded = false;
@@ -78,6 +104,7 @@ document.addEventListener("DOMContentLoaded", function() {
}
executeCallbacks(uiUpdateCallbacks, m);
+ scheduleAfterUiUpdateCallbacks();
const newTab = get_uiCurrentTab();
if (newTab && (newTab !== uiCurrentTab)) {
uiCurrentTab = newTab;
From dc7a1bbb1c70ba7585ca64c0a96e1bcba4d2302f Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Thu, 25 May 2023 09:09:13 +0300
Subject: [PATCH 046/168] Use onAfterUiUpdate where possible
---
javascript/aspectRatioOverlay.js | 2 +-
javascript/contextMenus.js | 4 +---
javascript/generationParams.js | 2 +-
javascript/imageMaskFix.js | 2 +-
javascript/imageviewer.js | 2 +-
javascript/notification.js | 2 +-
javascript/ui.js | 2 +-
7 files changed, 7 insertions(+), 9 deletions(-)
diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js
index 1c08a1a97..2cf2d571f 100644
--- a/javascript/aspectRatioOverlay.js
+++ b/javascript/aspectRatioOverlay.js
@@ -81,7 +81,7 @@ function dimensionChange(e, is_width, is_height) {
}
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
var arPreviewRect = gradioApp().querySelector('#imageARPreview');
if (arPreviewRect) {
arPreviewRect.style.display = 'none';
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js
index f14af1d42..d60a10c4c 100644
--- a/javascript/contextMenus.js
+++ b/javascript/contextMenus.js
@@ -167,6 +167,4 @@ var addContextMenuEventListener = initResponse[2];
})();
//End example Context Menu Items
-onUiUpdate(function() {
- addContextMenuEventListener();
-});
+onAfterUiUpdate(addContextMenuEventListener);
diff --git a/javascript/generationParams.js b/javascript/generationParams.js
index a877f8a54..7c0fd221d 100644
--- a/javascript/generationParams.js
+++ b/javascript/generationParams.js
@@ -1,7 +1,7 @@
// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes
let txt2img_gallery, img2img_gallery, modal = undefined;
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (!txt2img_gallery) {
txt2img_gallery = attachGalleryListeners("txt2img");
}
diff --git a/javascript/imageMaskFix.js b/javascript/imageMaskFix.js
index 3c9b8a6fd..900c56f32 100644
--- a/javascript/imageMaskFix.js
+++ b/javascript/imageMaskFix.js
@@ -39,5 +39,5 @@ function imageMaskResize() {
});
}
-onUiUpdate(imageMaskResize);
+onAfterUiUpdate(imageMaskResize);
window.addEventListener('resize', imageMaskResize);
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 78e24eb9e..677e95c1b 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -170,7 +170,7 @@ function modalTileImageToggle(event) {
event.stopPropagation();
}
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
var fullImg_preview = gradioApp().querySelectorAll('.gradio-gallery > div > img');
if (fullImg_preview != null) {
fullImg_preview.forEach(setupImageForLightbox);
diff --git a/javascript/notification.js b/javascript/notification.js
index a68a76f25..76c5715da 100644
--- a/javascript/notification.js
+++ b/javascript/notification.js
@@ -4,7 +4,7 @@ let lastHeadImg = null;
let notificationButton = null;
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (notificationButton == null) {
notificationButton = gradioApp().getElementById('request_notifications');
diff --git a/javascript/ui.js b/javascript/ui.js
index 800a2ae67..d70a681bf 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -249,7 +249,7 @@ function confirm_clear_prompt(prompt, negative_prompt) {
var opts = {};
-onUiUpdate(function() {
+onAfterUiUpdate(function() {
if (Object.keys(opts).length != 0) return;
var json_elem = gradioApp().getElementById('settings_json');
From 60062b51d821411e8830f321a39ee473431c4535 Mon Sep 17 00:00:00 2001
From: catboxanon <122327233+catboxanon@users.noreply.github.com>
Date: Thu, 25 May 2023 08:33:40 -0400
Subject: [PATCH 047/168] Remove try/except in img metadata read
---
modules/images.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index d1801355e..93252f417 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -668,10 +668,7 @@ def read_info_from_image(image):
for field in ['jfif', 'jfif_version', 'jfif_unit', 'jfif_density', 'dpi', 'exif',
'loop', 'background', 'timestamp', 'duration', 'progressive', 'progression',
'icc_profile', 'chromaticity']:
- try:
- items.pop(field, None)
- except KeyError:
- pass
+ items.pop(field, None)
if items.get("Software", None) == "NovelAI":
try:
From d29fe44e467c6ecc435f561a776ead8f5116f077 Mon Sep 17 00:00:00 2001
From: kernelmethod <17100608+kernelmethod@users.noreply.github.com>
Date: Thu, 25 May 2023 14:51:47 -0400
Subject: [PATCH 048/168] Small fixes to prepare_tcmalloc for Debian/Ubuntu
compatibility
- /usr/sbin (where ldconfig is usually located) is not typically on users' PATHs by default, so we set that variable before trying to run ldconfig.
- The libtcmalloc library is called libtcmalloc_minimal on Debian/Ubuntu systems. We now check whether libtcmalloc_minimal exists when running prepare_tcmalloc.
---
webui.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/webui.sh b/webui.sh
index 19cf2f78f..7f26d6bf8 100755
--- a/webui.sh
+++ b/webui.sh
@@ -183,7 +183,7 @@ fi
# Try using TCMalloc on Linux
prepare_tcmalloc() {
if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then
- TCMALLOC="$(ldconfig -p | grep -Po "libtcmalloc.so.\d" | head -n 1)"
+ TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -Po "libtcmalloc(_minimal|)\.so\.\d" | head -n 1)"
if [[ ! -z "${TCMALLOC}" ]]; then
echo "Using TCMalloc: ${TCMALLOC}"
export LD_PRELOAD="${TCMALLOC}"
From 09d9c3d287ee4543d285e0fde8b81603c9751a7e Mon Sep 17 00:00:00 2001
From: Roman Beltiukov
Date: Thu, 25 May 2023 14:45:05 -0700
Subject: [PATCH 049/168] change to AMD only if NVIDIA is not presented
---
webui.sh | 11 +++++++----
1 file changed, 7 insertions(+), 4 deletions(-)
diff --git a/webui.sh b/webui.sh
index 19cf2f78f..999fafe48 100755
--- a/webui.sh
+++ b/webui.sh
@@ -116,11 +116,14 @@ case "$gpu_info" in
*)
;;
esac
-if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
+if ! echo "$gpu_info" | grep -q "NVIDIA";
then
- # AMD users will still use torch 1.13 because 2.0 does not seem to work.
- export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2"
-fi
+ if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
+ then
+ # AMD users will still use torch 1.13 because 2.0 does not seem to work.
+ export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2"
+ fi
+fi
for preq in "${GIT}" "${python_cmd}"
do
From bdc371983ea817547378fd2232ff0eb22bb315fa Mon Sep 17 00:00:00 2001
From: Roman Beltiukov
Date: Fri, 26 May 2023 02:09:09 -0700
Subject: [PATCH 050/168] Update webui.sh
---
webui.sh | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/webui.sh b/webui.sh
index d8b57b143..31296e08c 100755
--- a/webui.sh
+++ b/webui.sh
@@ -128,8 +128,7 @@ if ! echo "$gpu_info" | grep -q "NVIDIA";
then
if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
then
- # AMD users will still use torch 1.13 because 2.0 does not seem to work.
- export TORCH_COMMAND="pip install torch==1.13.1+rocm5.2 torchvision==0.14.1+rocm5.2 --index-url https://download.pytorch.org/whl/rocm5.2"
+ export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2"
fi
fi
From 3829afec365b748e330da33b00a0e363f8c8ab71 Mon Sep 17 00:00:00 2001
From: linkoid <36754150+linkoid@users.noreply.github.com>
Date: Fri, 26 May 2023 15:08:53 -0400
Subject: [PATCH 051/168] Remove exit() from select_checkpoint()
Raising a FileNotFoundError instead.
---
modules/sd_models.py | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 91b3eb115..1871cc977 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -164,6 +164,7 @@ def model_hash(filename):
def select_checkpoint():
+ """Raises `FileNotFoundError` if no checkpoints are found."""
model_checkpoint = shared.opts.sd_model_checkpoint
checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
@@ -171,14 +172,14 @@ def select_checkpoint():
return checkpoint_info
if len(checkpoints_list) == 0:
- print("No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
+ error_message = "No checkpoints found. When searching for checkpoints, looked at:"
if shared.cmd_opts.ckpt is not None:
- print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
- print(f" - directory {model_path}", file=sys.stderr)
+ error_message += f"\n - file {os.path.abspath(shared.cmd_opts.ckpt)}"
+ error_message += f"\n - directory {model_path}"
if shared.cmd_opts.ckpt_dir is not None:
- print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
- print("Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations. The program will exit.", file=sys.stderr)
- exit(1)
+ error_message += f"\n - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}"
+ error_message += "Can't run without a checkpoint. Find and place a .ckpt or .safetensors file into any of those locations."
+ raise FileNotFoundError(error_message)
checkpoint_info = next(iter(checkpoints_list.values()))
if model_checkpoint is not None:
From 1f0fdede176989f151da6b97bd9a140b7f0af6e5 Mon Sep 17 00:00:00 2001
From: linkoid <36754150+linkoid@users.noreply.github.com>
Date: Fri, 26 May 2023 15:15:59 -0400
Subject: [PATCH 052/168] Show full traceback in get_sd_model()
to reveal if an error is caused by an extension
---
modules/errors.py | 8 ++++++--
modules/sd_models.py | 2 +-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/modules/errors.py b/modules/errors.py
index f6b80dbbd..da4694f85 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -12,9 +12,13 @@ def print_error_explanation(message):
print('=' * max_len, file=sys.stderr)
-def display(e: Exception, task):
+def display(e: Exception, task, *, full_traceback=False):
print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ te = traceback.TracebackException.from_exception(e)
+ if full_traceback:
+ # include frames leading up to the try-catch block
+ te.stack = traceback.StackSummary(traceback.extract_stack()[:-2] + te.stack)
+ print(*te.format(), sep="", file=sys.stderr)
message = str(e)
if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message:
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 1871cc977..3e7fc7e32 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -424,7 +424,7 @@ class SdModelData:
try:
load_model()
except Exception as e:
- errors.display(e, "loading stable diffusion model")
+ errors.display(e, "loading stable diffusion model", full_traceback=True)
print("", file=sys.stderr)
print("Stable diffusion model failed to load", file=sys.stderr)
self.sd_model = None
From 339b5315700a469f4a9f0d5afc08ca2aca60c579 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 27 May 2023 15:47:33 +0300
Subject: [PATCH 053/168] custom unet support
---
modules/processing.py | 4 +-
modules/script_callbacks.py | 20 ++++++++
modules/sd_hijack.py | 20 +++++---
modules/sd_models.py | 4 +-
modules/sd_unet.py | 92 +++++++++++++++++++++++++++++++++++++
modules/shared.py | 1 +
modules/shared_items.py | 11 +++++
webui.py | 4 ++
8 files changed, 148 insertions(+), 8 deletions(-)
create mode 100644 modules/sd_unet.py
diff --git a/modules/processing.py b/modules/processing.py
index 29a3743f5..b75f25157 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -13,7 +13,7 @@ from skimage import exposure
from typing import Any, Dict, List
import modules.sd_hijack
-from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common
+from modules import devices, prompt_parser, masking, sd_samplers, lowvram, generation_parameters_copypaste, extra_networks, sd_vae_approx, scripts, sd_samplers_common, sd_unet
from modules.sd_hijack import model_hijack
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -674,6 +674,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
sd_vae_approx.model()
+ sd_unet.apply_unet()
+
if state.job_count == -1:
state.job_count = p.n_iter
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 40f388a59..d2728e12c 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -111,6 +111,7 @@ callback_map = dict(
callbacks_before_ui=[],
callbacks_on_reload=[],
callbacks_list_optimizers=[],
+ callbacks_list_unets=[],
)
@@ -271,6 +272,18 @@ def list_optimizers_callback():
return res
+def list_unets_callback():
+ res = []
+
+ for c in callback_map['callbacks_list_unets']:
+ try:
+ c.callback(res)
+ except Exception:
+ report_exception(c, 'list_unets')
+
+ return res
+
+
def add_callback(callbacks, fun):
stack = [x for x in inspect.stack() if x.filename != __file__]
filename = stack[0].filename if len(stack) > 0 else 'unknown file'
@@ -430,3 +443,10 @@ def on_list_optimizers(callback):
to it."""
add_callback(callback_map['callbacks_list_optimizers'], callback)
+
+
+def on_list_unets(callback):
+ """register a function to be called when UI is making a list of alternative options for unet.
+ The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it."""
+
+ add_callback(callback_map['callbacks_list_unets'], callback)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index f93df0a63..487dfd600 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -3,7 +3,7 @@ from torch.nn.functional import silu
from types import MethodType
import modules.textual_inversion.textual_inversion
-from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors
+from modules import devices, sd_hijack_optimizations, shared, script_callbacks, errors, sd_unet
from modules.hypernetworks import hypernetwork
from modules.shared import cmd_opts
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr
@@ -43,7 +43,7 @@ def list_optimizers():
optimizers.extend(new_optimizers)
-def apply_optimizations():
+def apply_optimizations(option=None):
global current_optimizer
undo_optimizations()
@@ -60,7 +60,7 @@ def apply_optimizations():
current_optimizer.undo()
current_optimizer = None
- selection = shared.opts.cross_attention_optimization
+ selection = option or shared.opts.cross_attention_optimization
if selection == "Automatic" and len(optimizers) > 0:
matching_optimizer = next(iter([x for x in optimizers if x.cmd_opt and getattr(shared.cmd_opts, x.cmd_opt, False)]), optimizers[0])
else:
@@ -72,12 +72,13 @@ def apply_optimizations():
matching_optimizer = optimizers[0]
if matching_optimizer is not None:
- print(f"Applying optimization: {matching_optimizer.name}... ", end='')
+ print(f"Applying attention optimization: {matching_optimizer.name}... ", end='')
matching_optimizer.apply()
print("done.")
current_optimizer = matching_optimizer
return current_optimizer.name
else:
+ print("Disabling attention optimization")
return ''
@@ -155,9 +156,9 @@ class StableDiffusionModelHijack:
def __init__(self):
self.embedding_db.add_embedding_dir(cmd_opts.embeddings_dir)
- def apply_optimizations(self):
+ def apply_optimizations(self, option=None):
try:
- self.optimization_method = apply_optimizations()
+ self.optimization_method = apply_optimizations(option)
except Exception as e:
errors.display(e, "applying cross attention optimization")
undo_optimizations()
@@ -194,6 +195,11 @@ class StableDiffusionModelHijack:
self.layers = flatten(m)
+ if not hasattr(ldm.modules.diffusionmodules.openaimodel, 'copy_of_UNetModel_forward_for_webui'):
+ ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui = ldm.modules.diffusionmodules.openaimodel.UNetModel.forward
+
+ ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = sd_unet.UNetModel_forward
+
def undo_hijack(self, m):
if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation:
m.cond_stage_model = m.cond_stage_model.wrapped
@@ -215,6 +221,8 @@ class StableDiffusionModelHijack:
self.layers = None
self.clip = None
+ ldm.modules.diffusionmodules.openaimodel.UNetModel.forward = ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui
+
def apply_circular(self, enable):
if self.circular_enabled == enable:
return
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 91b3eb115..835bc016e 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -14,7 +14,7 @@ import ldm.modules.midas as midas
from ldm.util import instantiate_from_config
-from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config
+from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet
from modules.sd_hijack_inpainting import do_inpainting_hijack
from modules.timer import Timer
import tomesd
@@ -532,6 +532,8 @@ def reload_model_weights(sd_model=None, info=None):
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
+ sd_unet.apply_unet("None")
+
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
diff --git a/modules/sd_unet.py b/modules/sd_unet.py
new file mode 100644
index 000000000..6d708ad29
--- /dev/null
+++ b/modules/sd_unet.py
@@ -0,0 +1,92 @@
+import torch.nn
+import ldm.modules.diffusionmodules.openaimodel
+
+from modules import script_callbacks, shared, devices
+
+unet_options = []
+current_unet_option = None
+current_unet = None
+
+
+def list_unets():
+ new_unets = script_callbacks.list_unets_callback()
+
+ unet_options.clear()
+ unet_options.extend(new_unets)
+
+
+def get_unet_option(option=None):
+ option = option or shared.opts.sd_unet
+
+ if option == "None":
+ return None
+
+ if option == "Automatic":
+ name = shared.sd_model.sd_checkpoint_info.model_name
+
+ options = [x for x in unet_options if x.model_name == name]
+
+ option = options[0].label if options else "None"
+
+ return next(iter([x for x in unet_options if x.label == option]), None)
+
+
+def apply_unet(option=None):
+ global current_unet_option
+ global current_unet
+
+ new_option = get_unet_option(option)
+ if new_option == current_unet_option:
+ return
+
+ if current_unet is not None:
+ print(f"Dectivating unet: {current_unet.option.label}")
+ current_unet.deactivate()
+
+ current_unet_option = new_option
+ if current_unet_option is None:
+ current_unet = None
+
+ if not (shared.cmd_opts.lowvram or shared.cmd_opts.medvram):
+ shared.sd_model.model.diffusion_model.to(devices.device)
+
+ return
+
+ shared.sd_model.model.diffusion_model.to(devices.cpu)
+ devices.torch_gc()
+
+ current_unet = current_unet_option.create_unet()
+ current_unet.option = current_unet_option
+ print(f"Activating unet: {current_unet.option.label}")
+ current_unet.activate()
+
+
+class SdUnetOption:
+ model_name = None
+ """name of related checkpoint - this option will be selected automatically for unet if the name of checkpoint matches this"""
+
+ label = None
+ """name of the unet in UI"""
+
+ def create_unet(self):
+ """returns SdUnet object to be used as a Unet instead of built-in unet when making pictures"""
+ raise NotImplementedError()
+
+
+class SdUnet(torch.nn.Module):
+ def forward(self, x, timesteps, context, *args, **kwargs):
+ raise NotImplementedError()
+
+ def activate(self):
+ pass
+
+ def deactivate(self):
+ pass
+
+
+def UNetModel_forward(self, x, timesteps=None, context=None, *args, **kwargs):
+ if current_unet is not None:
+ return current_unet.forward(x, timesteps, context, *args, **kwargs)
+
+ return ldm.modules.diffusionmodules.openaimodel.copy_of_UNetModel_forward_for_webui(self, x, timesteps, context, *args, **kwargs)
+
diff --git a/modules/shared.py b/modules/shared.py
index 0897f937a..a5e7824ab 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -403,6 +403,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}),
"sd_vae": OptionInfo("Automatic", "SD VAE", gr.Dropdown, lambda: {"choices": shared_items.sd_vae_items()}, refresh=shared_items.refresh_vae_list).info("choose VAE model: Automatic = use one with same filename as checkpoint; None = use VAE from checkpoint"),
"sd_vae_as_default": OptionInfo(True, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"),
+ "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"),
"inpainting_mask_weight": OptionInfo(1.0, "Inpainting conditioning mask strength", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
"initial_noise_multiplier": OptionInfo(1.0, "Noise multiplier for img2img", gr.Slider, {"minimum": 0.5, "maximum": 1.5, "step": 0.01}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 2a8713c87..7f306a06f 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -29,3 +29,14 @@ def cross_attention_optimizations():
return ["Automatic"] + [x.title() for x in modules.sd_hijack.optimizers] + ["None"]
+def sd_unet_items():
+ import modules.sd_unet
+
+ return ["Automatic"] + [x.label for x in modules.sd_unet.unet_options] + ["None"]
+
+
+def refresh_unet_list():
+ import modules.sd_unet
+
+ modules.sd_unet.list_unets()
+
diff --git a/webui.py b/webui.py
index f9210f41b..1e3ff0615 100644
--- a/webui.py
+++ b/webui.py
@@ -58,6 +58,7 @@ import modules.sd_hijack
import modules.sd_hijack_optimizations
import modules.sd_models
import modules.sd_vae
+import modules.sd_unet
import modules.txt2img
import modules.script_callbacks
import modules.textual_inversion.textual_inversion
@@ -291,6 +292,9 @@ def initialize_rest(*, reload_script_modules=False):
modules.sd_hijack.list_optimizers()
startup_timer.record("scripts list_optimizers")
+ modules.sd_unet.list_unets()
+ startup_timer.record("scripts list_unets")
+
def load_model():
"""
Accesses shared.sd_model property to load model.
From 633867ecc6061c547dd62533dad2077fa0d6ac08 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 27 May 2023 19:06:49 +0300
Subject: [PATCH 054/168] fix serving images that have already been saved
without temp files function that broke after updating gradio
---
modules/ui_tempdir.py | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py
index f05049e1f..7f6b42aea 100644
--- a/modules/ui_tempdir.py
+++ b/modules/ui_tempdir.py
@@ -4,6 +4,7 @@ from collections import namedtuple
from pathlib import Path
import gradio as gr
+import gradio.components
from PIL import PngImagePlugin
@@ -31,13 +32,16 @@ def check_tmp_file(gradio, filename):
return False
-def save_pil_to_file(pil_image, dir=None):
+def save_pil_to_file(self, pil_image, dir=None):
already_saved_as = getattr(pil_image, 'already_saved_as', None)
if already_saved_as and os.path.isfile(already_saved_as):
register_tmp_file(shared.demo, already_saved_as)
+ filename = already_saved_as
- file_obj = Savedfile(f'{already_saved_as}?{os.path.getmtime(already_saved_as)}')
- return file_obj
+ if not shared.opts.save_images_add_number:
+ filename += f'?{os.path.getmtime(already_saved_as)}'
+
+ return filename
if shared.opts.temp_dir != "":
dir = shared.opts.temp_dir
@@ -51,11 +55,11 @@ def save_pil_to_file(pil_image, dir=None):
file_obj = tempfile.NamedTemporaryFile(delete=False, suffix=".png", dir=dir)
pil_image.save(file_obj, pnginfo=(metadata if use_metadata else None))
- return file_obj
+ return file_obj.name
# override save to file function so that it also writes PNG info
-gr.processing_utils.save_pil_to_file = save_pil_to_file
+gradio.components.IOComponent.pil_to_temp_file = save_pil_to_file
def on_tmpdir_changed():
From e8e7fe11e903115a706187f8301df2e06fa018f8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 27 May 2023 19:53:09 +0300
Subject: [PATCH 055/168] updates for the noise schedule settings
---
modules/generation_parameters_copypaste.py | 24 +++++++++----------
modules/sd_samplers_kdiffusion.py | 28 ++++++++++++----------
modules/shared.py | 8 +++----
scripts/xyz_grid.py | 8 +++----
4 files changed, 35 insertions(+), 33 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 1443c5cd9..81aef5026 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -306,17 +306,17 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
if "RNG" not in res:
res["RNG"] = "GPU"
- if "KDiff Schedule Type" not in res:
- res["KDiff Schedule Type"] = "Automatic"
+ if "Schedule type" not in res:
+ res["Schedule type"] = "Automatic"
- if "KDiff Schedule max sigma" not in res:
- res["KDiff Schedule max sigma"] = 14.6
+ if "Schedule max sigma" not in res:
+ res["Schedule max sigma"] = 0
- if "KDiff Schedule min sigma" not in res:
- res["KDiff Schedule min sigma"] = 0.3
+ if "Schedule min sigma" not in res:
+ res["Schedule min sigma"] = 0
- if "KDiff Schedule rho" not in res:
- res["KDiff Schedule rho"] = 7.0
+ if "Schedule rho" not in res:
+ res["Schedule rho"] = 0
return res
@@ -330,10 +330,10 @@ infotext_to_setting_name_mapping = [
('Conditional mask weight', 'inpainting_mask_weight'),
('Model hash', 'sd_model_checkpoint'),
('ENSD', 'eta_noise_seed_delta'),
- ('KDiff Schedule Type', 'k_sched_type'),
- ('KDiff Schedule max sigma', 'sigma_max'),
- ('KDiff Schedule min sigma', 'sigma_min'),
- ('KDiff Schedule rho', 'rho'),
+ ('Schedule type', 'k_sched_type'),
+ ('Schedule max sigma', 'sigma_max'),
+ ('Schedule min sigma', 'sigma_min'),
+ ('Schedule rho', 'rho'),
('Noise multiplier', 'initial_noise_multiplier'),
('Eta', 'eta_ancestral'),
('Eta DDIM', 'eta_ddim'),
diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py
index 9c9d9f179..e9ba2c61f 100644
--- a/modules/sd_samplers_kdiffusion.py
+++ b/modules/sd_samplers_kdiffusion.py
@@ -321,25 +321,27 @@ class KDiffusionSampler:
sigmas = p.sampler_noise_scheduler_override(steps)
elif opts.k_sched_type != "Automatic":
m_sigma_min, m_sigma_max = (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item())
- sigma_min, sigma_max = (0.1, 10)
+ sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (m_sigma_min, m_sigma_max)
sigmas_kwargs = {
- 'sigma_min': sigma_min if opts.use_old_karras_scheduler_sigmas else m_sigma_min,
- 'sigma_max': sigma_max if opts.use_old_karras_scheduler_sigmas else m_sigma_max
+ 'sigma_min': sigma_min,
+ 'sigma_max': sigma_max,
}
sigmas_func = k_diffusion_scheduler[opts.k_sched_type]
- p.extra_generation_params["KDiff Schedule Type"] = opts.k_sched_type
+ p.extra_generation_params["Schedule type"] = opts.k_sched_type
- if opts.sigma_min != 0.3:
- # take 0.0 as model default
- sigmas_kwargs['sigma_min'] = opts.sigma_min or m_sigma_min
- p.extra_generation_params["KDiff Schedule min sigma"] = opts.sigma_min
- if opts.sigma_max != 14.6:
- sigmas_kwargs['sigma_max'] = opts.sigma_max or m_sigma_max
- p.extra_generation_params["KDiff Schedule max sigma"] = opts.sigma_max
- if opts.k_sched_type != 'exponential':
+ if opts.sigma_min != m_sigma_min and opts.sigma_min != 0:
+ sigmas_kwargs['sigma_min'] = opts.sigma_min
+ p.extra_generation_params["Schedule min sigma"] = opts.sigma_min
+ if opts.sigma_max != m_sigma_max and opts.sigma_max != 0:
+ sigmas_kwargs['sigma_max'] = opts.sigma_max
+ p.extra_generation_params["Schedule max sigma"] = opts.sigma_max
+
+ default_rho = 1. if opts.k_sched_type == "polyexponential" else 7.
+
+ if opts.k_sched_type != 'exponential' and opts.rho != 0 and opts.rho != default_rho:
sigmas_kwargs['rho'] = opts.rho
- p.extra_generation_params["KDiff Schedule rho"] = opts.rho
+ p.extra_generation_params["Schedule rho"] = opts.rho
sigmas = sigmas_func(n=steps, **sigmas_kwargs, device=shared.device)
elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
diff --git a/modules/shared.py b/modules/shared.py
index 364a59917..daab38dcc 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -518,10 +518,10 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}),
- 'sigma_max': OptionInfo(14.6, "sigma max", gr.Number).info("the maximum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
- 'sigma_min': OptionInfo(0.3, "sigma min", gr.Number).info("the minimum noise strength for the scheduler. Set to 0 to use the same value which 'xxx karras' samplers use."),
- 'rho': OptionInfo(7.0, "rho", gr.Number).info("higher will make a more steep noise scheduler (decrease faster). default for karras is 7.0, for polyexponential is 1.0"),
+ 'k_sched_type': OptionInfo("Automatic", "scheduler type", gr.Dropdown, {"choices": ["Automatic", "karras", "exponential", "polyexponential"]}).info("lets you override the noise schedule for k-diffusion samplers; choosing Automatic disables the three parameters below"),
+ 'sigma_min': OptionInfo(0.0, "sigma min", gr.Number).info("0 = default (~0.03); minimum noise strength for k-diffusion noise scheduler"),
+ 'sigma_max': OptionInfo(0.0, "sigma max", gr.Number).info("0 = default (~14.6); maximum noise strength for k-diffusion noise schedule"),
+ 'rho': OptionInfo(0.0, "rho", gr.Number).info("0 = default (7 for karras, 1 for polyexponential); higher values result in a more steep noise schedule (decreases faster)"),
'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}).info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"),
'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma").link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"),
'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}),
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 089d375ea..7821cc655 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -220,10 +220,10 @@ axis_options = [
AxisOption("Sigma min", float, apply_field("s_tmin")),
AxisOption("Sigma max", float, apply_field("s_tmax")),
AxisOption("Sigma noise", float, apply_field("s_noise")),
- AxisOption("KDiff Schedule Type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
- AxisOption("KDiff Schedule min sigma", float, apply_override("sigma_min")),
- AxisOption("KDiff Schedule max sigma", float, apply_override("sigma_max")),
- AxisOption("KDiff Schedule rho", float, apply_override("rho")),
+ AxisOption("Schedule type", str, apply_override("k_sched_type"), choices=lambda: list(sd_samplers_kdiffusion.k_diffusion_scheduler)),
+ AxisOption("Schedule min sigma", float, apply_override("sigma_min")),
+ AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
+ AxisOption("Schedule rho", float, apply_override("rho")),
AxisOption("Eta", float, apply_field("eta")),
AxisOption("Clip skip", int, apply_clip_skip),
AxisOption("Denoising", float, apply_field("denoising_strength")),
From 662af75973984d493be9300eaa965b73d3008d38 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Sat, 27 May 2023 22:54:45 +0300
Subject: [PATCH 056/168] Ability to zoom and move the canvas
---
javascript/zoom.js | 312 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 312 insertions(+)
create mode 100644 javascript/zoom.js
diff --git a/javascript/zoom.js b/javascript/zoom.js
new file mode 100644
index 000000000..4958ddcf0
--- /dev/null
+++ b/javascript/zoom.js
@@ -0,0 +1,312 @@
+// Main
+onUiLoaded(async () => {
+ const hotkeysConfig = {
+ resetZoom: "KeyR",
+ fitToScreen: "KeyS",
+ moveKey: "KeyF",
+ overlap: "KeyO",
+ };
+
+ let isMoving = false;
+ let mouseX, mouseY;
+
+ const elementIDs = {
+ sketch: "#img2img_sketch",
+ inpaint: "#img2maskimg",
+ inpaintSketch: "#inpaint_sketch",
+ img2imgTabs: "#mode_img2img .tab-nav",
+ };
+
+ async function getElements() {
+ const elements = await Promise.all(
+ Object.values(elementIDs).map((id) => document.querySelector(id))
+ );
+ return Object.fromEntries(
+ Object.keys(elementIDs).map((key, index) => [key, elements[index]])
+ );
+ }
+
+ const elements = await getElements();
+
+ function applyZoomAndPan(targetElement, elemId) {
+ targetElement.style.transformOrigin = "0 0";
+ let [zoomLevel, panX, panY] = [1, 0, 0];
+ let fullScreenMode = false;
+
+ // Reset the zoom level and pan position of the target element to their initial values
+ function resetZoom() {
+ zoomLevel = 1;
+ panX = 0;
+ panY = 0;
+
+ targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
+
+ const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+
+ toggleOverlap("off");
+ fullScreenMode = false;
+
+ targetElement.style.width = "";
+ if (canvas) {
+ targetElement.style.height = canvas.style.height;
+ }
+ }
+
+ // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
+ function toggleOverlap(forced = "") {
+ const zIndex1 = "0";
+ const zIndex2 = "998";
+
+ targetElement.style.zIndex =
+ targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
+
+ if (forced === "off") {
+ targetElement.style.zIndex = zIndex1;
+ } else if (forced === "on") {
+ targetElement.style.zIndex = zIndex2;
+ }
+ }
+
+ // Adjust the brush size based on the deltaY value from a mouse wheel event
+ function adjustBrushSize(
+ elemId,
+ deltaY,
+ withoutValue = false,
+ percentage = 5
+ ) {
+ const input =
+ document.querySelector(`${elemId} input[aria-label='Brush radius']`) ||
+ document.querySelector(`${elemId} button[aria-label="Use brush"]`);
+
+ if (input) {
+ input.click();
+ if (!withoutValue) {
+ const maxValue = parseFloat(input.getAttribute("max")) || 100;
+ const changeAmount = maxValue * (percentage / 100);
+ const newValue =
+ parseFloat(input.value) +
+ (deltaY > 0 ? -changeAmount : changeAmount);
+ input.value = Math.min(Math.max(newValue, 0), maxValue);
+ input.dispatchEvent(new Event("change"));
+ }
+ }
+ }
+
+ // Reset zoom when uploading a new image
+ fileInput = document.querySelector(
+ `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
+ );
+ fileInput.addEventListener("click", resetZoom);
+
+ // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
+ function updateZoom(newZoomLevel, mouseX, mouseY) {
+ newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
+ panX += mouseX - (mouseX * newZoomLevel) / zoomLevel;
+ panY += mouseY - (mouseY * newZoomLevel) / zoomLevel;
+
+ targetElement.style.transformOrigin = "0 0";
+ targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${newZoomLevel})`;
+
+ toggleOverlap("on");
+ return newZoomLevel;
+ }
+
+ // Change the zoom level based on user interaction
+ function changeZoomLevel(operation, e) {
+ if (e.shiftKey) {
+ e.preventDefault();
+
+ let zoomPosX, zoomPosY;
+ let delta = 0.2;
+ if (zoomLevel > 7) {
+ delta = 0.9;
+ } else if (zoomLevel > 2) {
+ delta = 0.6;
+ }
+
+ zoomPosX = e.clientX;
+ zoomPosY = e.clientY;
+
+ fullScreenMode = false;
+ zoomLevel = updateZoom(
+ zoomLevel + (operation === "+" ? delta : -delta),
+ zoomPosX - targetElement.getBoundingClientRect().left,
+ zoomPosY - targetElement.getBoundingClientRect().top
+ );
+ }
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ // Fullscreen mode
+ function fitToScreen() {
+ const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+
+ if (!canvas) return;
+
+ if (fullScreenMode) {
+ resetZoom();
+ fullScreenMode = false;
+ return;
+ }
+
+ resetZoom();
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const screenWidth = window.innerWidth;
+ const screenHeight = window.innerHeight;
+
+ // Get element's coordinates relative to the page
+ const elementRect = targetElement.getBoundingClientRect();
+ const elementY = elementRect.y;
+ const elementX = elementRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ // Get the current transformOrigin
+ const computedStyle = window.getComputedStyle(targetElement);
+ const transformOrigin = computedStyle.transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ // Calculate offsets with respect to the transformOrigin
+ const offsetX = (screenWidth - elementWidth * scale) / 2 - elementX - originXValue * (1 - scale);
+ const offsetY = (screenHeight - elementHeight * scale) / 2 - elementY - originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ zoomLevel = scale;
+ panX = offsetX;
+ panY = offsetY;
+
+ toggleOverlap("on");
+ fullScreenMode = true;
+ }
+
+ // Handle keydown events
+ function handleKeyDown(event) {
+ const hotkeyActions = {
+ [hotkeysConfig.resetZoom]: resetZoom,
+ [hotkeysConfig.overlap]: toggleOverlap,
+ [hotkeysConfig.fitToScreen]: fitToScreen,
+ // [hotkeysConfig.moveKey] : moveCanvas,
+ };
+
+ const action = hotkeyActions[event.code];
+ if (action) {
+ event.preventDefault();
+ action(event);
+ }
+
+ }
+
+ // Get Mouse position
+ function getMousePosition(e) {
+ mouseX = e.offsetX;
+ mouseY = e.offsetY;
+ }
+
+ targetElement.addEventListener("mousemove", getMousePosition);
+
+ // Handle events only inside the targetElement
+ let isKeyDownHandlerAttached = false;
+
+ function handleMouseMove() {
+ if (!isKeyDownHandlerAttached) {
+ document.addEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = true;
+ }
+ }
+
+ function handleMouseLeave() {
+ if (isKeyDownHandlerAttached) {
+ document.removeEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = false;
+ }
+ }
+
+ // Add mouse event handlers
+ targetElement.addEventListener("mousemove", handleMouseMove);
+ targetElement.addEventListener("mouseleave", handleMouseLeave);
+
+ // Reset zoom when click on another tab
+ elements.img2imgTabs.addEventListener("click", resetZoom);
+
+ targetElement.addEventListener("wheel", (e) => {
+ // change zoom level
+ const operation = e.deltaY > 0 ? "-" : "+";
+ changeZoomLevel(operation, e);
+
+ // Handle brush size adjustment with ctrl key pressed
+ if (e.ctrlKey || e.metaKey) {
+ e.preventDefault();
+
+ // Increase or decrease brush size based on scroll direction
+ adjustBrushSize(elemId, e.deltaY);
+ }
+ });
+
+ /**
+ * Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
+ * @param {MouseEvent} e - The mouse event.
+ */
+ function handleMoveKeyDown(e) {
+ if (e.code === hotkeysConfig.moveKey) {
+ if(!e.ctrlKey && !e.metaKey){
+ isMoving = true;
+ }
+ }
+ }
+
+ function handleMoveKeyUp(e) {
+ if (e.code === hotkeysConfig.moveKey) {
+ isMoving = false;
+ }
+ }
+
+ document.addEventListener("keydown", handleMoveKeyDown);
+ document.addEventListener("keyup", handleMoveKeyUp);
+
+ // Detect zoom level and update the pan speed.
+ function updatePanPosition(movementX, movementY) {
+ let panSpeed = 1.5;
+
+ if (zoomLevel > 8) {
+ panSpeed = 2.5;
+ }
+
+ panX = panX + movementX * panSpeed;
+ panY = panY + movementY * panSpeed;
+
+ targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${zoomLevel})`;
+ toggleOverlap("on");
+ }
+
+ function handleMoveByKey(e) {
+ if (isMoving) {
+ updatePanPosition(e.movementX, e.movementY);
+ targetElement.style.pointerEvents = "none";
+ } else {
+ targetElement.style.pointerEvents = "auto";
+ }
+ }
+
+ document.addEventListener("mousemove", handleMoveByKey);
+ }
+
+ applyZoomAndPan(elements.sketch, elementIDs.sketch);
+ applyZoomAndPan(elements.inpaint, elementIDs.inpaint);
+ applyZoomAndPan(elements.inpaintSketch, elementIDs.inpaintSketch);
+});
From 433c70b403e8fe948f5286a5a3fc686765e40b08 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Sun, 28 May 2023 01:31:23 +0300
Subject: [PATCH 057/168] Formatted Prettier added fullscreen mode canvas
expansion function
---
javascript/zoom.js | 109 ++++++++++++++++++++++++++++++++++++++-------
1 file changed, 93 insertions(+), 16 deletions(-)
diff --git a/javascript/zoom.js b/javascript/zoom.js
index 4958ddcf0..189b1d8ff 100644
--- a/javascript/zoom.js
+++ b/javascript/zoom.js
@@ -41,11 +41,22 @@ onUiLoaded(async () => {
targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
- const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+ const canvas = document.querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
toggleOverlap("off");
fullScreenMode = false;
+ if (
+ canvas &&
+ parseFloat(canvas.style.width) > 865 &&
+ parseFloat(targetElement.style.width) > 865
+ ) {
+ fitToElement();
+ return;
+ }
+
targetElement.style.width = "";
if (canvas) {
targetElement.style.height = canvas.style.height;
@@ -137,24 +148,82 @@ onUiLoaded(async () => {
}
/**
- * This function fits the target element to the screen by calculating
- * the required scale and offsets. It also updates the global variables
- * zoomLevel, panX, and panY to reflect the new state.
- */
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ function fitToElement() {
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const parentElement = targetElement.parentElement;
+ const screenWidth = parentElement.clientWidth;
+ const screenHeight = parentElement.clientHeight;
+
+ // Get element's coordinates relative to the parent element
+ const elementRect = targetElement.getBoundingClientRect();
+ const parentRect = parentElement.getBoundingClientRect();
+ const elementX = elementRect.x - parentRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ const transformOrigin =
+ window.getComputedStyle(targetElement).transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 - originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2.5 -
+ originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ zoomLevel = scale;
+ panX = offsetX;
+ panY = offsetY;
+
+ fullScreenMode = false;
+ toggleOverlap("off");
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
// Fullscreen mode
function fitToScreen() {
- const canvas = document.querySelector(`${elemId} canvas[key="interface"]`);
+ const canvas = document.querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
if (!canvas) return;
+ if (canvas.offsetWidth > 862) {
+ targetElement.style.width = canvas.offsetWidth + "px";
+ }
+
if (fullScreenMode) {
resetZoom();
fullScreenMode = false;
return;
}
- resetZoom();
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
@@ -180,8 +249,14 @@ onUiLoaded(async () => {
const originYValue = parseFloat(originY);
// Calculate offsets with respect to the transformOrigin
- const offsetX = (screenWidth - elementWidth * scale) / 2 - elementX - originXValue * (1 - scale);
- const offsetY = (screenHeight - elementHeight * scale) / 2 - elementY - originYValue * (1 - scale);
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 -
+ elementX -
+ originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2 -
+ elementY -
+ originYValue * (1 - scale);
// Apply scale and offsets to the element
targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
@@ -191,8 +266,8 @@ onUiLoaded(async () => {
panX = offsetX;
panY = offsetY;
- toggleOverlap("on");
fullScreenMode = true;
+ toggleOverlap("on");
}
// Handle keydown events
@@ -208,8 +283,7 @@ onUiLoaded(async () => {
if (action) {
event.preventDefault();
action(event);
- }
-
+ }
}
// Get Mouse position
@@ -243,6 +317,9 @@ onUiLoaded(async () => {
// Reset zoom when click on another tab
elements.img2imgTabs.addEventListener("click", resetZoom);
+ elements.img2imgTabs.addEventListener("click", () => {
+ targetElement.style.width = "";
+ });
targetElement.addEventListener("wheel", (e) => {
// change zoom level
@@ -259,12 +336,12 @@ onUiLoaded(async () => {
});
/**
- * Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
- * @param {MouseEvent} e - The mouse event.
- */
+ * Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
+ * @param {MouseEvent} e - The mouse event.
+ */
function handleMoveKeyDown(e) {
if (e.code === hotkeysConfig.moveKey) {
- if(!e.ctrlKey && !e.metaKey){
+ if (!e.ctrlKey && !e.metaKey) {
isMoving = true;
}
}
From 9e69009d1b35afb65c9e07c210149fa4f98fd57d Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Sun, 28 May 2023 01:56:48 +0300
Subject: [PATCH 058/168] Improve reset zoom when toggle tabs
---
javascript/zoom.js | 35 ++++++++++++++++++++++++++++++++++-
1 file changed, 34 insertions(+), 1 deletion(-)
diff --git a/javascript/zoom.js b/javascript/zoom.js
index 189b1d8ff..0f1f9e096 100644
--- a/javascript/zoom.js
+++ b/javascript/zoom.js
@@ -1,4 +1,19 @@
// Main
+
+// Helper functions
+// Get active tab
+function getActiveTab(elements, all = false) {
+ const tabs = elements.img2imgTabs.querySelectorAll("button");
+
+ if (all) return tabs;
+
+ for (let tab of tabs) {
+ if (tab.classList.contains("selected")) {
+ return tab;
+ }
+ }
+}
+
onUiLoaded(async () => {
const hotkeysConfig = {
resetZoom: "KeyR",
@@ -33,12 +48,27 @@ onUiLoaded(async () => {
let [zoomLevel, panX, panY] = [1, 0, 0];
let fullScreenMode = false;
+ // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
+ function fixCanvas() {
+ const activeTab = getActiveTab(elements).textContent.trim();
+
+ if (activeTab !== "img2img") {
+ const img = targetElement.querySelector(`${elemId} img`);
+
+ if (img && img.style.display !== "none") {
+ img.style.display = "none";
+ img.style.visibility = "hidden";
+ }
+ }
+ }
+
// Reset the zoom level and pan position of the target element to their initial values
function resetZoom() {
zoomLevel = 1;
panX = 0;
panY = 0;
+ fixCanvas();
targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
const canvas = document.querySelector(
@@ -318,7 +348,10 @@ onUiLoaded(async () => {
// Reset zoom when click on another tab
elements.img2imgTabs.addEventListener("click", resetZoom);
elements.img2imgTabs.addEventListener("click", () => {
- targetElement.style.width = "";
+ // targetElement.style.width = "";
+ if (parseInt(targetElement.style.width) > 865) {
+ setTimeout(fitToElement, 0);
+ }
});
targetElement.addEventListener("wheel", (e) => {
From b957dcfece29c84ac0cfcd5a69475ff8684c531f Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 28 May 2023 10:39:57 +0300
Subject: [PATCH 059/168] add quoting for infotext values that have a colon in
them
---
modules/generation_parameters_copypaste.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 81aef5026..071bd9ead 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -35,7 +35,7 @@ def reset():
def quote(text):
- if ',' not in str(text) and '\n' not in str(text):
+ if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
return text
return json.dumps(text, ensure_ascii=False)
From 10137589336199d6185ff3f255d611ff8f3edb88 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sun, 28 May 2023 14:41:44 +0300
Subject: [PATCH 060/168] Mark caption_image_overlay's textfont as deprecated;
fix #10778
---
modules/textual_inversion/image_embedding.py | 21 ++++++++++++--------
1 file changed, 13 insertions(+), 8 deletions(-)
diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py
index 5858a55f5..81cff7bf1 100644
--- a/modules/textual_inversion/image_embedding.py
+++ b/modules/textual_inversion/image_embedding.py
@@ -1,8 +1,10 @@
import base64
import json
+import warnings
+
import numpy as np
import zlib
-from PIL import Image, ImageDraw, ImageFont
+from PIL import Image, ImageDraw
import torch
@@ -129,14 +131,17 @@ def extract_image_data_embed(image):
def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, textfont=None):
+ from modules.images import get_font
+ if textfont:
+ warnings.warn(
+ 'passing in a textfont to caption_image_overlay is deprecated and does nothing',
+ DeprecationWarning,
+ stacklevel=2,
+ )
from math import cos
image = srcimage.copy()
fontsize = 32
- if textfont is None:
- from modules.images import get_font
- textfont = get_font(fontsize)
-
factor = 1.5
gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0))
for y in range(image.size[1]):
@@ -147,12 +152,12 @@ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, t
draw = ImageDraw.Draw(image)
- font = ImageFont.truetype(textfont, fontsize)
+ font = get_font(fontsize)
padding = 10
_, _, w, h = draw.textbbox((0, 0), title, font=font)
fontsize = min(int(fontsize * (((image.size[0]*0.75)-(padding*4))/w)), 72)
- font = ImageFont.truetype(textfont, fontsize)
+ font = get_font(fontsize)
_, _, w, h = draw.textbbox((0, 0), title, font=font)
draw.text((padding, padding), title, anchor='lt', font=font, fill=(255, 255, 255, 230))
@@ -163,7 +168,7 @@ def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, t
_, _, w, h = draw.textbbox((0, 0), footerRight, font=font)
fontsize_right = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72)
- font = ImageFont.truetype(textfont, min(fontsize_left, fontsize_mid, fontsize_right))
+ font = get_font(min(fontsize_left, fontsize_mid, fontsize_right))
draw.text((padding, image.size[1]-padding), footerLeft, anchor='ls', font=font, fill=(255, 255, 255, 230))
draw.text((image.size[0]/2, image.size[1]-padding), footerMid, anchor='ms', font=font, fill=(255, 255, 255, 230))
From 3d42411c3dbab7996584402d219c40753a285e64 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sun, 28 May 2023 15:36:43 +0300
Subject: [PATCH 061/168] Sort requirements files
---
requirements.txt | 38 ++++++++++++++++++-------------------
requirements_versions.txt | 40 +++++++++++++++++++--------------------
2 files changed, 39 insertions(+), 39 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index a464447bc..2f70a0d32 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,32 +1,32 @@
-astunparse
-blendmodes
+GitPython
+Pillow
accelerate
+astunparse
basicsr
+blendmodes
+clean-fid
+einops
gfpgan
gradio==3.32.0
+inflection
+jsonmerge
+kornia
+lark
numpy
omegaconf
opencv-contrib-python
-requests
piexif
-Pillow
+psutil
pytorch_lightning==1.7.7
realesrgan
+requests
+resize-right
+rich
+safetensors
scikit-image>=0.19
timm==0.4.12
-transformers==4.25.1
-torch
-einops
-jsonmerge
-clean-fid
-resize-right
-torchdiffeq
-kornia
-lark
-inflection
-GitPython
-torchsde
-safetensors
-psutil
-rich
tomesd
+torch
+torchdiffeq
+torchsde
+transformers==4.25.1
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 31b179a9e..43bced5a8 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -1,29 +1,29 @@
-blendmodes==2022
-transformers==4.25.1
+GitPython==3.1.30
+Pillow==9.5.0
accelerate==0.18.0
basicsr==1.4.2
+blendmodes==2022
+clean-fid==0.1.35
+einops==0.4.1
+fastapi==0.94.0
gfpgan==1.3.8
gradio==3.32.0
-numpy==1.23.5
-Pillow==9.5.0
-realesrgan==0.3.0
-torch
-omegaconf==2.2.3
-pytorch_lightning==1.9.4
-scikit-image==0.20.0
-timm==0.6.7
-piexif==1.1.3
-einops==0.4.1
+httpcore<=0.15
+inflection==0.5.1
jsonmerge==1.8.0
-clean-fid==0.1.35
-resize-right==0.0.2
-torchdiffeq==0.2.3
kornia==0.6.7
lark==1.1.2
-inflection==0.5.1
-GitPython==3.1.30
-torchsde==0.2.5
+numpy==1.23.5
+omegaconf==2.2.3
+piexif==1.1.3
+pytorch_lightning==1.9.4
+realesrgan==0.3.0
+resize-right==0.0.2
safetensors==0.3.1
-httpcore<=0.15
-fastapi==0.94.0
+scikit-image==0.20.0
+timm==0.6.7
tomesd==0.1.2
+torch
+torchdiffeq==0.2.3
+torchsde==0.2.5
+transformers==4.25.1
From cf07983a6e5aa2cf131a75e5b974c25c171a7126 Mon Sep 17 00:00:00 2001
From: Sakura-Luna <53183413+Sakura-Luna@users.noreply.github.com>
Date: Sun, 28 May 2023 20:42:19 +0800
Subject: [PATCH 062/168] Upgrade xformers
---
modules/launch_utils.py | 2 +-
webui.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 35a52310b..6eb3ea116 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -223,7 +223,7 @@ def prepare_environment():
torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.1 torchvision==0.15.2 --extra-index-url {torch_index_url}")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
- xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.17')
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.20')
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "https://github.com/TencentARC/GFPGAN/archive/8d2447a2d918f8eba5a4a01463fd48e45126a379.zip")
clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip")
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
diff --git a/webui.py b/webui.py
index 1e3ff0615..3df2cd1a7 100644
--- a/webui.py
+++ b/webui.py
@@ -135,7 +135,7 @@ there are reports of issues with training tab on the latest version.
Use --skip-version-check commandline argument to disable this check.
""".strip())
- expected_xformers_version = "0.0.17"
+ expected_xformers_version = "0.0.20"
if shared.xformers_available:
import xformers
From c1a5068ebea127412dfaaa6598795196a64200f1 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Sun, 28 May 2023 15:40:04 +0300
Subject: [PATCH 063/168] Synchronize requirements/requirements_versions
* Remove deps not listed in _versions from requirements
* Omit versions when they don't match _versions
---
requirements.txt | 10 +++++-----
requirements_versions.txt | 1 +
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index 2f70a0d32..3142085ea 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
GitPython
Pillow
accelerate
-astunparse
+
basicsr
blendmodes
clean-fid
@@ -14,17 +14,17 @@ kornia
lark
numpy
omegaconf
-opencv-contrib-python
+
piexif
psutil
-pytorch_lightning==1.7.7
+pytorch_lightning
realesrgan
requests
resize-right
-rich
+
safetensors
scikit-image>=0.19
-timm==0.4.12
+timm
tomesd
torch
torchdiffeq
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 43bced5a8..f71b9d6c5 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -16,6 +16,7 @@ lark==1.1.2
numpy==1.23.5
omegaconf==2.2.3
piexif==1.1.3
+psutil~=5.9.5
pytorch_lightning==1.9.4
realesrgan==0.3.0
resize-right==0.0.2
From bae2fca52332cef3248d68a6d578eb67d9ced50f Mon Sep 17 00:00:00 2001
From: nyqui <67160376+nyqui@users.noreply.github.com>
Date: Sun, 28 May 2023 22:59:29 +0900
Subject: [PATCH 064/168] fix "hires. fix" prompt/neg sharing same labels as
txt2img_prompt/negative_prompt
---
modules/ui.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index e62182daa..361f596eb 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -505,10 +505,10 @@ def create_ui():
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
with gr.Column(scale=80):
with gr.Row():
- hr_prompt = gr.Textbox(label="Prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
+ hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
with gr.Column(scale=80):
with gr.Row():
- hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
+ hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
elif category == "batch":
if not opts.dimensions_and_batch_together:
From 905c3fe23ee7a07925dd1c652c82659d83062c88 Mon Sep 17 00:00:00 2001
From: yoinked
Date: Sun, 28 May 2023 08:39:00 -0700
Subject: [PATCH 065/168] typo
vidocard -> videocard
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 3099d1d2e..b9f280365 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -414,7 +414,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"),
"CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}).link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP nrtwork; 1 ignores none, 2 ignores one layer"),
"upcast_attn": OptionInfo(False, "Upcast cross attention layer to float32"),
- "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different vidocard vendors"),
+ "randn_source": OptionInfo("GPU", "Random number generator source.", gr.Radio, {"choices": ["GPU", "CPU"]}).info("changes seeds drastically; use CPU to produce the same picture across different videocard vendors"),
}))
options_templates.update(options_section(('optimizations', "Optimizations"), {
From f48bce5f688f5fd31732db90c3a5e157309e4141 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Sun, 28 May 2023 20:22:35 +0300
Subject: [PATCH 066/168] Corrected the code according to Code style
---
javascript/zoom.js | 802 +++++++++++++++++++++++----------------------
1 file changed, 404 insertions(+), 398 deletions(-)
diff --git a/javascript/zoom.js b/javascript/zoom.js
index 0f1f9e096..519b76f5f 100644
--- a/javascript/zoom.js
+++ b/javascript/zoom.js
@@ -3,420 +3,426 @@
// Helper functions
// Get active tab
function getActiveTab(elements, all = false) {
- const tabs = elements.img2imgTabs.querySelectorAll("button");
+ const tabs = elements.img2imgTabs.querySelectorAll("button");
- if (all) return tabs;
+ if (all) return tabs;
- for (let tab of tabs) {
- if (tab.classList.contains("selected")) {
- return tab;
+ for (let tab of tabs) {
+ if (tab.classList.contains("selected")) {
+ return tab;
+ }
}
- }
}
-onUiLoaded(async () => {
- const hotkeysConfig = {
- resetZoom: "KeyR",
- fitToScreen: "KeyS",
- moveKey: "KeyF",
- overlap: "KeyO",
- };
+onUiLoaded(async() => {
+ const hotkeysConfig = {
+ resetZoom: "KeyR",
+ fitToScreen: "KeyS",
+ moveKey: "KeyF",
+ overlap: "KeyO"
+ };
- let isMoving = false;
- let mouseX, mouseY;
+ let isMoving = false;
+ let mouseX, mouseY;
- const elementIDs = {
- sketch: "#img2img_sketch",
- inpaint: "#img2maskimg",
- inpaintSketch: "#inpaint_sketch",
- img2imgTabs: "#mode_img2img .tab-nav",
- };
+ const elementIDs = {
+ sketch: "#img2img_sketch",
+ inpaint: "#img2maskimg",
+ inpaintSketch: "#inpaint_sketch",
+ img2imgTabs: "#mode_img2img .tab-nav"
+ };
- async function getElements() {
- const elements = await Promise.all(
- Object.values(elementIDs).map((id) => document.querySelector(id))
- );
- return Object.fromEntries(
- Object.keys(elementIDs).map((key, index) => [key, elements[index]])
- );
- }
-
- const elements = await getElements();
-
- function applyZoomAndPan(targetElement, elemId) {
- targetElement.style.transformOrigin = "0 0";
- let [zoomLevel, panX, panY] = [1, 0, 0];
- let fullScreenMode = false;
-
- // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
- function fixCanvas() {
- const activeTab = getActiveTab(elements).textContent.trim();
-
- if (activeTab !== "img2img") {
- const img = targetElement.querySelector(`${elemId} img`);
-
- if (img && img.style.display !== "none") {
- img.style.display = "none";
- img.style.visibility = "hidden";
- }
- }
- }
-
- // Reset the zoom level and pan position of the target element to their initial values
- function resetZoom() {
- zoomLevel = 1;
- panX = 0;
- panY = 0;
-
- fixCanvas();
- targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
-
- const canvas = document.querySelector(
- `${elemId} canvas[key="interface"]`
- );
-
- toggleOverlap("off");
- fullScreenMode = false;
-
- if (
- canvas &&
- parseFloat(canvas.style.width) > 865 &&
- parseFloat(targetElement.style.width) > 865
- ) {
- fitToElement();
- return;
- }
-
- targetElement.style.width = "";
- if (canvas) {
- targetElement.style.height = canvas.style.height;
- }
- }
-
- // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
- function toggleOverlap(forced = "") {
- const zIndex1 = "0";
- const zIndex2 = "998";
-
- targetElement.style.zIndex =
- targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
-
- if (forced === "off") {
- targetElement.style.zIndex = zIndex1;
- } else if (forced === "on") {
- targetElement.style.zIndex = zIndex2;
- }
- }
-
- // Adjust the brush size based on the deltaY value from a mouse wheel event
- function adjustBrushSize(
- elemId,
- deltaY,
- withoutValue = false,
- percentage = 5
- ) {
- const input =
- document.querySelector(`${elemId} input[aria-label='Brush radius']`) ||
- document.querySelector(`${elemId} button[aria-label="Use brush"]`);
-
- if (input) {
- input.click();
- if (!withoutValue) {
- const maxValue = parseFloat(input.getAttribute("max")) || 100;
- const changeAmount = maxValue * (percentage / 100);
- const newValue =
- parseFloat(input.value) +
- (deltaY > 0 ? -changeAmount : changeAmount);
- input.value = Math.min(Math.max(newValue, 0), maxValue);
- input.dispatchEvent(new Event("change"));
- }
- }
- }
-
- // Reset zoom when uploading a new image
- fileInput = document.querySelector(
- `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
- );
- fileInput.addEventListener("click", resetZoom);
-
- // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
- function updateZoom(newZoomLevel, mouseX, mouseY) {
- newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
- panX += mouseX - (mouseX * newZoomLevel) / zoomLevel;
- panY += mouseY - (mouseY * newZoomLevel) / zoomLevel;
-
- targetElement.style.transformOrigin = "0 0";
- targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${newZoomLevel})`;
-
- toggleOverlap("on");
- return newZoomLevel;
- }
-
- // Change the zoom level based on user interaction
- function changeZoomLevel(operation, e) {
- if (e.shiftKey) {
- e.preventDefault();
-
- let zoomPosX, zoomPosY;
- let delta = 0.2;
- if (zoomLevel > 7) {
- delta = 0.9;
- } else if (zoomLevel > 2) {
- delta = 0.6;
- }
-
- zoomPosX = e.clientX;
- zoomPosY = e.clientY;
-
- fullScreenMode = false;
- zoomLevel = updateZoom(
- zoomLevel + (operation === "+" ? delta : -delta),
- zoomPosX - targetElement.getBoundingClientRect().left,
- zoomPosY - targetElement.getBoundingClientRect().top
+ async function getElements() {
+ const elements = await Promise.all(
+ Object.values(elementIDs).map(id => document.querySelector(id))
+ );
+ return Object.fromEntries(
+ Object.keys(elementIDs).map((key, index) => [key, elements[index]])
);
- }
}
- /**
- * This function fits the target element to the screen by calculating
- * the required scale and offsets. It also updates the global variables
- * zoomLevel, panX, and panY to reflect the new state.
- */
+ const elements = await getElements();
- function fitToElement() {
- //Reset Zoom
- targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+ function applyZoomAndPan(targetElement, elemId) {
+ targetElement.style.transformOrigin = "0 0";
+ let [zoomLevel, panX, panY] = [1, 0, 0];
+ let fullScreenMode = false;
- // Get element and screen dimensions
- const elementWidth = targetElement.offsetWidth;
- const elementHeight = targetElement.offsetHeight;
- const parentElement = targetElement.parentElement;
- const screenWidth = parentElement.clientWidth;
- const screenHeight = parentElement.clientHeight;
+ // In the course of research, it was found that the tag img is very harmful when zooming and creates white canvases. This hack allows you to almost never think about this problem, it has no effect on webui.
+ function fixCanvas() {
+ const activeTab = getActiveTab(elements).textContent.trim();
- // Get element's coordinates relative to the parent element
- const elementRect = targetElement.getBoundingClientRect();
- const parentRect = parentElement.getBoundingClientRect();
- const elementX = elementRect.x - parentRect.x;
+ if (activeTab !== "img2img") {
+ const img = targetElement.querySelector(`${elemId} img`);
- // Calculate scale and offsets
- const scaleX = screenWidth / elementWidth;
- const scaleY = screenHeight / elementHeight;
- const scale = Math.min(scaleX, scaleY);
-
- const transformOrigin =
- window.getComputedStyle(targetElement).transformOrigin;
- const [originX, originY] = transformOrigin.split(" ");
- const originXValue = parseFloat(originX);
- const originYValue = parseFloat(originY);
-
- const offsetX =
- (screenWidth - elementWidth * scale) / 2 - originXValue * (1 - scale);
- const offsetY =
- (screenHeight - elementHeight * scale) / 2.5 -
- originYValue * (1 - scale);
-
- // Apply scale and offsets to the element
- targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
-
- // Update global variables
- zoomLevel = scale;
- panX = offsetX;
- panY = offsetY;
-
- fullScreenMode = false;
- toggleOverlap("off");
- }
-
- /**
- * This function fits the target element to the screen by calculating
- * the required scale and offsets. It also updates the global variables
- * zoomLevel, panX, and panY to reflect the new state.
- */
-
- // Fullscreen mode
- function fitToScreen() {
- const canvas = document.querySelector(
- `${elemId} canvas[key="interface"]`
- );
-
- if (!canvas) return;
-
- if (canvas.offsetWidth > 862) {
- targetElement.style.width = canvas.offsetWidth + "px";
- }
-
- if (fullScreenMode) {
- resetZoom();
- fullScreenMode = false;
- return;
- }
-
- //Reset Zoom
- targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
-
- // Get element and screen dimensions
- const elementWidth = targetElement.offsetWidth;
- const elementHeight = targetElement.offsetHeight;
- const screenWidth = window.innerWidth;
- const screenHeight = window.innerHeight;
-
- // Get element's coordinates relative to the page
- const elementRect = targetElement.getBoundingClientRect();
- const elementY = elementRect.y;
- const elementX = elementRect.x;
-
- // Calculate scale and offsets
- const scaleX = screenWidth / elementWidth;
- const scaleY = screenHeight / elementHeight;
- const scale = Math.min(scaleX, scaleY);
-
- // Get the current transformOrigin
- const computedStyle = window.getComputedStyle(targetElement);
- const transformOrigin = computedStyle.transformOrigin;
- const [originX, originY] = transformOrigin.split(" ");
- const originXValue = parseFloat(originX);
- const originYValue = parseFloat(originY);
-
- // Calculate offsets with respect to the transformOrigin
- const offsetX =
- (screenWidth - elementWidth * scale) / 2 -
- elementX -
- originXValue * (1 - scale);
- const offsetY =
- (screenHeight - elementHeight * scale) / 2 -
- elementY -
- originYValue * (1 - scale);
-
- // Apply scale and offsets to the element
- targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
-
- // Update global variables
- zoomLevel = scale;
- panX = offsetX;
- panY = offsetY;
-
- fullScreenMode = true;
- toggleOverlap("on");
- }
-
- // Handle keydown events
- function handleKeyDown(event) {
- const hotkeyActions = {
- [hotkeysConfig.resetZoom]: resetZoom,
- [hotkeysConfig.overlap]: toggleOverlap,
- [hotkeysConfig.fitToScreen]: fitToScreen,
- // [hotkeysConfig.moveKey] : moveCanvas,
- };
-
- const action = hotkeyActions[event.code];
- if (action) {
- event.preventDefault();
- action(event);
- }
- }
-
- // Get Mouse position
- function getMousePosition(e) {
- mouseX = e.offsetX;
- mouseY = e.offsetY;
- }
-
- targetElement.addEventListener("mousemove", getMousePosition);
-
- // Handle events only inside the targetElement
- let isKeyDownHandlerAttached = false;
-
- function handleMouseMove() {
- if (!isKeyDownHandlerAttached) {
- document.addEventListener("keydown", handleKeyDown);
- isKeyDownHandlerAttached = true;
- }
- }
-
- function handleMouseLeave() {
- if (isKeyDownHandlerAttached) {
- document.removeEventListener("keydown", handleKeyDown);
- isKeyDownHandlerAttached = false;
- }
- }
-
- // Add mouse event handlers
- targetElement.addEventListener("mousemove", handleMouseMove);
- targetElement.addEventListener("mouseleave", handleMouseLeave);
-
- // Reset zoom when click on another tab
- elements.img2imgTabs.addEventListener("click", resetZoom);
- elements.img2imgTabs.addEventListener("click", () => {
- // targetElement.style.width = "";
- if (parseInt(targetElement.style.width) > 865) {
- setTimeout(fitToElement, 0);
- }
- });
-
- targetElement.addEventListener("wheel", (e) => {
- // change zoom level
- const operation = e.deltaY > 0 ? "-" : "+";
- changeZoomLevel(operation, e);
-
- // Handle brush size adjustment with ctrl key pressed
- if (e.ctrlKey || e.metaKey) {
- e.preventDefault();
-
- // Increase or decrease brush size based on scroll direction
- adjustBrushSize(elemId, e.deltaY);
- }
- });
-
- /**
- * Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
- * @param {MouseEvent} e - The mouse event.
- */
- function handleMoveKeyDown(e) {
- if (e.code === hotkeysConfig.moveKey) {
- if (!e.ctrlKey && !e.metaKey) {
- isMoving = true;
+ if (img && img.style.display !== "none") {
+ img.style.display = "none";
+ img.style.visibility = "hidden";
+ }
+ }
}
- }
+
+ // Reset the zoom level and pan position of the target element to their initial values
+ function resetZoom() {
+ zoomLevel = 1;
+ panX = 0;
+ panY = 0;
+
+ fixCanvas();
+ targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
+
+ const canvas = document.querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
+
+ toggleOverlap("off");
+ fullScreenMode = false;
+
+ if (
+ canvas &&
+ parseFloat(canvas.style.width) > 865 &&
+ parseFloat(targetElement.style.width) > 865
+ ) {
+ fitToElement();
+ return;
+ }
+
+ targetElement.style.width = "";
+ if (canvas) {
+ targetElement.style.height = canvas.style.height;
+ }
+ }
+
+ // Toggle the zIndex of the target element between two values, allowing it to overlap or be overlapped by other elements
+ function toggleOverlap(forced = "") {
+ const zIndex1 = "0";
+ const zIndex2 = "998";
+
+ targetElement.style.zIndex =
+ targetElement.style.zIndex !== zIndex2 ? zIndex2 : zIndex1;
+
+ if (forced === "off") {
+ targetElement.style.zIndex = zIndex1;
+ } else if (forced === "on") {
+ targetElement.style.zIndex = zIndex2;
+ }
+ }
+
+ // Adjust the brush size based on the deltaY value from a mouse wheel event
+ function adjustBrushSize(
+ elemId,
+ deltaY,
+ withoutValue = false,
+ percentage = 5
+ ) {
+ const input =
+ document.querySelector(
+ `${elemId} input[aria-label='Brush radius']`
+ ) ||
+ document.querySelector(
+ `${elemId} button[aria-label="Use brush"]`
+ );
+
+ if (input) {
+ input.click();
+ if (!withoutValue) {
+ const maxValue =
+ parseFloat(input.getAttribute("max")) || 100;
+ const changeAmount = maxValue * (percentage / 100);
+ const newValue =
+ parseFloat(input.value) +
+ (deltaY > 0 ? -changeAmount : changeAmount);
+ input.value = Math.min(Math.max(newValue, 0), maxValue);
+ input.dispatchEvent(new Event("change"));
+ }
+ }
+ }
+
+ // Reset zoom when uploading a new image
+ const fileInput = document.querySelector(
+ `${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
+ );
+ fileInput.addEventListener("click", resetZoom);
+
+ // Update the zoom level and pan position of the target element based on the values of the zoomLevel, panX and panY variables
+ function updateZoom(newZoomLevel, mouseX, mouseY) {
+ newZoomLevel = Math.max(0.5, Math.min(newZoomLevel, 15));
+ panX += mouseX - (mouseX * newZoomLevel) / zoomLevel;
+ panY += mouseY - (mouseY * newZoomLevel) / zoomLevel;
+
+ targetElement.style.transformOrigin = "0 0";
+ targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${newZoomLevel})`;
+
+ toggleOverlap("on");
+ return newZoomLevel;
+ }
+
+ // Change the zoom level based on user interaction
+ function changeZoomLevel(operation, e) {
+ if (e.shiftKey) {
+ e.preventDefault();
+
+ let zoomPosX, zoomPosY;
+ let delta = 0.2;
+ if (zoomLevel > 7) {
+ delta = 0.9;
+ } else if (zoomLevel > 2) {
+ delta = 0.6;
+ }
+
+ zoomPosX = e.clientX;
+ zoomPosY = e.clientY;
+
+ fullScreenMode = false;
+ zoomLevel = updateZoom(
+ zoomLevel + (operation === "+" ? delta : -delta),
+ zoomPosX - targetElement.getBoundingClientRect().left,
+ zoomPosY - targetElement.getBoundingClientRect().top
+ );
+ }
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ function fitToElement() {
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const parentElement = targetElement.parentElement;
+ const screenWidth = parentElement.clientWidth;
+ const screenHeight = parentElement.clientHeight;
+
+ // Get element's coordinates relative to the parent element
+ const elementRect = targetElement.getBoundingClientRect();
+ const parentRect = parentElement.getBoundingClientRect();
+ const elementX = elementRect.x - parentRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ const transformOrigin =
+ window.getComputedStyle(targetElement).transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 -
+ originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2.5 -
+ originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ zoomLevel = scale;
+ panX = offsetX;
+ panY = offsetY;
+
+ fullScreenMode = false;
+ toggleOverlap("off");
+ }
+
+ /**
+ * This function fits the target element to the screen by calculating
+ * the required scale and offsets. It also updates the global variables
+ * zoomLevel, panX, and panY to reflect the new state.
+ */
+
+ // Fullscreen mode
+ function fitToScreen() {
+ const canvas = document.querySelector(
+ `${elemId} canvas[key="interface"]`
+ );
+
+ if (!canvas) return;
+
+ if (canvas.offsetWidth > 862) {
+ targetElement.style.width = canvas.offsetWidth + "px";
+ }
+
+ if (fullScreenMode) {
+ resetZoom();
+ fullScreenMode = false;
+ return;
+ }
+
+ //Reset Zoom
+ targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+
+ // Get element and screen dimensions
+ const elementWidth = targetElement.offsetWidth;
+ const elementHeight = targetElement.offsetHeight;
+ const screenWidth = window.innerWidth;
+ const screenHeight = window.innerHeight;
+
+ // Get element's coordinates relative to the page
+ const elementRect = targetElement.getBoundingClientRect();
+ const elementY = elementRect.y;
+ const elementX = elementRect.x;
+
+ // Calculate scale and offsets
+ const scaleX = screenWidth / elementWidth;
+ const scaleY = screenHeight / elementHeight;
+ const scale = Math.min(scaleX, scaleY);
+
+ // Get the current transformOrigin
+ const computedStyle = window.getComputedStyle(targetElement);
+ const transformOrigin = computedStyle.transformOrigin;
+ const [originX, originY] = transformOrigin.split(" ");
+ const originXValue = parseFloat(originX);
+ const originYValue = parseFloat(originY);
+
+ // Calculate offsets with respect to the transformOrigin
+ const offsetX =
+ (screenWidth - elementWidth * scale) / 2 -
+ elementX -
+ originXValue * (1 - scale);
+ const offsetY =
+ (screenHeight - elementHeight * scale) / 2 -
+ elementY -
+ originYValue * (1 - scale);
+
+ // Apply scale and offsets to the element
+ targetElement.style.transform = `translate(${offsetX}px, ${offsetY}px) scale(${scale})`;
+
+ // Update global variables
+ zoomLevel = scale;
+ panX = offsetX;
+ panY = offsetY;
+
+ fullScreenMode = true;
+ toggleOverlap("on");
+ }
+
+ // Handle keydown events
+ function handleKeyDown(event) {
+ const hotkeyActions = {
+ [hotkeysConfig.resetZoom]: resetZoom,
+ [hotkeysConfig.overlap]: toggleOverlap,
+ [hotkeysConfig.fitToScreen]: fitToScreen
+ // [hotkeysConfig.moveKey] : moveCanvas,
+ };
+
+ const action = hotkeyActions[event.code];
+ if (action) {
+ event.preventDefault();
+ action(event);
+ }
+ }
+
+ // Get Mouse position
+ function getMousePosition(e) {
+ mouseX = e.offsetX;
+ mouseY = e.offsetY;
+ }
+
+ targetElement.addEventListener("mousemove", getMousePosition);
+
+ // Handle events only inside the targetElement
+ let isKeyDownHandlerAttached = false;
+
+ function handleMouseMove() {
+ if (!isKeyDownHandlerAttached) {
+ document.addEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = true;
+ }
+ }
+
+ function handleMouseLeave() {
+ if (isKeyDownHandlerAttached) {
+ document.removeEventListener("keydown", handleKeyDown);
+ isKeyDownHandlerAttached = false;
+ }
+ }
+
+ // Add mouse event handlers
+ targetElement.addEventListener("mousemove", handleMouseMove);
+ targetElement.addEventListener("mouseleave", handleMouseLeave);
+
+ // Reset zoom when click on another tab
+ elements.img2imgTabs.addEventListener("click", resetZoom);
+ elements.img2imgTabs.addEventListener("click", () => {
+ // targetElement.style.width = "";
+ if (parseInt(targetElement.style.width) > 865) {
+ setTimeout(fitToElement, 0);
+ }
+ });
+
+ targetElement.addEventListener("wheel", e => {
+ // change zoom level
+ const operation = e.deltaY > 0 ? "-" : "+";
+ changeZoomLevel(operation, e);
+
+ // Handle brush size adjustment with ctrl key pressed
+ if (e.ctrlKey || e.metaKey) {
+ e.preventDefault();
+
+ // Increase or decrease brush size based on scroll direction
+ adjustBrushSize(elemId, e.deltaY);
+ }
+ });
+
+ /**
+ * Handle the move event for pan functionality. Updates the panX and panY variables and applies the new transform to the target element.
+ * @param {MouseEvent} e - The mouse event.
+ */
+ function handleMoveKeyDown(e) {
+ if (e.code === hotkeysConfig.moveKey) {
+ if (!e.ctrlKey && !e.metaKey) {
+ isMoving = true;
+ }
+ }
+ }
+
+ function handleMoveKeyUp(e) {
+ if (e.code === hotkeysConfig.moveKey) {
+ isMoving = false;
+ }
+ }
+
+ document.addEventListener("keydown", handleMoveKeyDown);
+ document.addEventListener("keyup", handleMoveKeyUp);
+
+ // Detect zoom level and update the pan speed.
+ function updatePanPosition(movementX, movementY) {
+ let panSpeed = 1.5;
+
+ if (zoomLevel > 8) {
+ panSpeed = 2.5;
+ }
+
+ panX = panX + movementX * panSpeed;
+ panY = panY + movementY * panSpeed;
+
+ targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${zoomLevel})`;
+ toggleOverlap("on");
+ }
+
+ function handleMoveByKey(e) {
+ if (isMoving) {
+ updatePanPosition(e.movementX, e.movementY);
+ targetElement.style.pointerEvents = "none";
+ } else {
+ targetElement.style.pointerEvents = "auto";
+ }
+ }
+
+ document.addEventListener("mousemove", handleMoveByKey);
}
- function handleMoveKeyUp(e) {
- if (e.code === hotkeysConfig.moveKey) {
- isMoving = false;
- }
- }
-
- document.addEventListener("keydown", handleMoveKeyDown);
- document.addEventListener("keyup", handleMoveKeyUp);
-
- // Detect zoom level and update the pan speed.
- function updatePanPosition(movementX, movementY) {
- let panSpeed = 1.5;
-
- if (zoomLevel > 8) {
- panSpeed = 2.5;
- }
-
- panX = panX + movementX * panSpeed;
- panY = panY + movementY * panSpeed;
-
- targetElement.style.transform = `translate(${panX}px, ${panY}px) scale(${zoomLevel})`;
- toggleOverlap("on");
- }
-
- function handleMoveByKey(e) {
- if (isMoving) {
- updatePanPosition(e.movementX, e.movementY);
- targetElement.style.pointerEvents = "none";
- } else {
- targetElement.style.pointerEvents = "auto";
- }
- }
-
- document.addEventListener("mousemove", handleMoveByKey);
- }
-
- applyZoomAndPan(elements.sketch, elementIDs.sketch);
- applyZoomAndPan(elements.inpaint, elementIDs.inpaint);
- applyZoomAndPan(elements.inpaintSketch, elementIDs.inpaintSketch);
+ applyZoomAndPan(elements.sketch, elementIDs.sketch);
+ applyZoomAndPan(elements.inpaint, elementIDs.inpaint);
+ applyZoomAndPan(elements.inpaintSketch, elementIDs.inpaintSketch);
});
From 4d7b63f489c9a4c2a5e872aa214052102987dac8 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Sun, 28 May 2023 20:32:21 +0300
Subject: [PATCH 067/168] changed the document to gradioApp()
---
javascript/zoom.js | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/javascript/zoom.js b/javascript/zoom.js
index 519b76f5f..4bbec34f7 100644
--- a/javascript/zoom.js
+++ b/javascript/zoom.js
@@ -71,7 +71,7 @@ onUiLoaded(async() => {
fixCanvas();
targetElement.style.transform = `scale(${zoomLevel}) translate(${panX}px, ${panY}px)`;
- const canvas = document.querySelector(
+ const canvas = gradioApp().querySelector(
`${elemId} canvas[key="interface"]`
);
@@ -116,10 +116,10 @@ onUiLoaded(async() => {
percentage = 5
) {
const input =
- document.querySelector(
+ gradioApp().querySelector(
`${elemId} input[aria-label='Brush radius']`
) ||
- document.querySelector(
+ gradioApp().querySelector(
`${elemId} button[aria-label="Use brush"]`
);
@@ -139,7 +139,7 @@ onUiLoaded(async() => {
}
// Reset zoom when uploading a new image
- const fileInput = document.querySelector(
+ const fileInput = gradioApp().querySelector(
`${elemId} input[type="file"][accept="image/*"].svelte-116rqfv`
);
fileInput.addEventListener("click", resetZoom);
@@ -242,7 +242,7 @@ onUiLoaded(async() => {
// Fullscreen mode
function fitToScreen() {
- const canvas = document.querySelector(
+ const canvas = gradioApp().querySelector(
`${elemId} canvas[key="interface"]`
);
@@ -419,7 +419,7 @@ onUiLoaded(async() => {
}
}
- document.addEventListener("mousemove", handleMoveByKey);
+ gradioApp().addEventListener("mousemove", handleMoveByKey);
}
applyZoomAndPan(elements.sketch, elementIDs.sketch);
From 3539885f0ecd75876191e5df4578fac0654b70c0 Mon Sep 17 00:00:00 2001
From: ramyma
Date: Sun, 28 May 2023 21:24:39 +0300
Subject: [PATCH 068/168] Round down scale destination dimensions to nearest
multiple of 8
---
modules/upscaler.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 7b1046d64..3c82861d7 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -53,8 +53,8 @@ class Upscaler:
def upscale(self, img: PIL.Image, scale, selected_model: str = None):
self.scale = scale
- dest_w = int(img.width * scale)
- dest_h = int(img.height * scale)
+ dest_w = round((img.width * scale - 4) / 8) * 8
+ dest_h = round((img.height * scale - 4) / 8) * 8
for _ in range(3):
shape = (img.width, img.height)
From 4635f31270d1b5d41ad63815cb400b1ca73ea859 Mon Sep 17 00:00:00 2001
From: klimaleksus
Date: Mon, 29 May 2023 01:09:59 +0500
Subject: [PATCH 069/168] Refactor EmbeddingDatabase.register_embedding() to
allow unregistering
---
.../textual_inversion/textual_inversion.py | 25 ++++++++++++++-----
1 file changed, 19 insertions(+), 6 deletions(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index d489ed1e0..cbf944989 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -120,16 +120,29 @@ class EmbeddingDatabase:
self.embedding_dirs.clear()
def register_embedding(self, embedding, model):
- self.word_embeddings[embedding.name] = embedding
-
- ids = model.cond_stage_model.tokenize([embedding.name])[0]
+ return self.register_embedding_by_name(embedding, model, embedding.name)
+ def register_embedding_by_name(self, embedding, model, name):
+ ids = model.cond_stage_model.tokenize([name])[0]
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
-
- self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
-
+ if name in self.word_embeddings:
+ # remove old one from the lookup list
+ lookup = [x for x in self.ids_lookup[first_id] if x[1].name!=name]
+ else:
+ lookup = self.ids_lookup[first_id]
+ if embedding is not None:
+ lookup += [(ids, embedding)]
+ self.ids_lookup[first_id] = sorted(lookup, key=lambda x: len(x[0]), reverse=True)
+ if embedding is None:
+ # unregister embedding with specified name
+ if name in self.word_embeddings:
+ del self.word_embeddings[name]
+ if len(self.ids_lookup[first_id])==0:
+ del self.ids_lookup[first_id]
+ return None
+ self.word_embeddings[name] = embedding
return embedding
def get_expected_shape(self):
From edd766e70ae3ea8039e7b9e24d55fbd26792ef10 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Mon, 29 May 2023 05:40:38 +0900
Subject: [PATCH 070/168] fix xyz clip
---
scripts/xyz_grid.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 7821cc655..00a81060a 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -94,7 +94,10 @@ def confirm_checkpoints(p, xs):
def apply_clip_skip(p, x, xs):
- opts.data["CLIP_stop_at_last_layers"] = x
+ if opts.data["CLIP_stop_at_last_layers"] != x:
+ opts.data["CLIP_stop_at_last_layers"] = x
+ p.cached_c = [None, None]
+ p.cached_uc = [None, None]
def apply_upscale_latent_space(p, x, xs):
From 018f77f0b859937656e98300ea7a416cff92f22a Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Mon, 29 May 2023 00:58:52 +0300
Subject: [PATCH 071/168] Upgrade transformers
Refs https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/9035#issuecomment-1485461039
---
requirements_versions.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 31b179a9e..b2a3a3276 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -1,5 +1,5 @@
blendmodes==2022
-transformers==4.25.1
+transformers==4.29.2
accelerate==0.18.0
basicsr==1.4.2
gfpgan==1.3.8
From 2aca613a61ac98bb008f6dca2594295743593804 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Mon, 29 May 2023 07:30:32 +0900
Subject: [PATCH 072/168] fix disable png info
---
modules/images.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index e21e554cf..6f91c52c7 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -511,9 +511,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
existing_pnginfo['parameters'] = geninfo
if extension.lower() == '.png':
- pnginfo_data = PngImagePlugin.PngInfo()
- for k, v in (existing_pnginfo or {}).items():
- pnginfo_data.add_text(k, str(v))
+ if opts.enable_pnginfo:
+ pnginfo_data = PngImagePlugin.PngInfo()
+ for k, v in (existing_pnginfo or {}).items():
+ pnginfo_data.add_text(k, str(v))
+ else:
+ pnginfo_data = None
image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
From 7dfee8a3bd7cb8b1d89ceeb971e33be5f4067453 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Mon, 29 May 2023 11:01:58 +0900
Subject: [PATCH 073/168] clarify issue template
---
.github/ISSUE_TEMPLATE/bug_report.yml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 3a8b99535..9cc16d017 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -43,8 +43,8 @@ body:
- type: input
id: commit
attributes:
- label: Commit where the problem happens
- description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
+ label: Version or Commit where the problem happens
+ description: "Which webui version or commit are you running ? (Do not write *Latest Version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Version: v1.2.3** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)"
validations:
required: true
- type: dropdown
From df59b74cedf6db12419de43f3b45c71cf457896e Mon Sep 17 00:00:00 2001
From: missionfloyd
Date: Sun, 28 May 2023 20:42:47 -0600
Subject: [PATCH 074/168] Only poll gamepads while connected
---
javascript/imageviewerGamepad.js | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js
index 31d226dee..0510c02a2 100644
--- a/javascript/imageviewerGamepad.js
+++ b/javascript/imageviewerGamepad.js
@@ -1,7 +1,9 @@
+let gamepads = [];
+
window.addEventListener('gamepadconnected', (e) => {
const index = e.gamepad.index;
let isWaiting = false;
- setInterval(async() => {
+ gamepads[index] = setInterval(async() => {
if (!opts.js_modal_lightbox_gamepad || isWaiting) return;
const gamepad = navigator.getGamepads()[index];
const xValue = gamepad.axes[0];
@@ -22,6 +24,7 @@ window.addEventListener('gamepadconnected', (e) => {
isWaiting = false;
}
}, 10);
+ window.addEventListener('gamepaddisconnected', (e) => clearInterval(gamepads[e.gamepad.index]))
});
/*
From 679e8738759b9aa2678a40d518bf6d67915e98fe Mon Sep 17 00:00:00 2001
From: missionfloyd
Date: Sun, 28 May 2023 20:49:46 -0600
Subject: [PATCH 075/168] Update imageviewerGamepad.js
---
javascript/imageviewerGamepad.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js
index 0510c02a2..45cc4cbf9 100644
--- a/javascript/imageviewerGamepad.js
+++ b/javascript/imageviewerGamepad.js
@@ -24,7 +24,7 @@ window.addEventListener('gamepadconnected', (e) => {
isWaiting = false;
}
}, 10);
- window.addEventListener('gamepaddisconnected', (e) => clearInterval(gamepads[e.gamepad.index]))
+ window.addEventListener('gamepaddisconnected', (e) => clearInterval(gamepads[e.gamepad.index]));
});
/*
From 77a10c62c9a44a27e8030eff6e5b3fb182be55ae Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Mon, 29 May 2023 00:41:12 +0300
Subject: [PATCH 076/168] Patch GitPython to not use leaky persistent processes
---
modules/extensions.py | 9 ++++-----
modules/gitpython_hack.py | 42 +++++++++++++++++++++++++++++++++++++++
modules/ui_extensions.py | 6 ++++++
3 files changed, 52 insertions(+), 5 deletions(-)
create mode 100644 modules/gitpython_hack.py
diff --git a/modules/extensions.py b/modules/extensions.py
index 624832a00..fb7250e6a 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -3,9 +3,8 @@ import sys
import threading
import traceback
-import git
-
from modules import shared
+from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
extensions = []
@@ -54,7 +53,7 @@ class Extension:
repo = None
try:
if os.path.exists(os.path.join(self.path, ".git")):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
except Exception:
print(f"Error reading github repository info from {self.path}:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
@@ -94,7 +93,7 @@ class Extension:
return res
def check_updates(self):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
for fetch in repo.remote().fetch(dry_run=True):
if fetch.flags != fetch.HEAD_UPTODATE:
self.can_update = True
@@ -116,7 +115,7 @@ class Extension:
self.status = "latest"
def fetch_and_reset_hard(self, commit='origin'):
- repo = git.Repo(self.path)
+ repo = Repo(self.path)
# Fix: `error: Your local changes to the following files would be overwritten by merge`,
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
repo.git.fetch(all=True)
diff --git a/modules/gitpython_hack.py b/modules/gitpython_hack.py
new file mode 100644
index 000000000..e537c1df9
--- /dev/null
+++ b/modules/gitpython_hack.py
@@ -0,0 +1,42 @@
+from __future__ import annotations
+
+import io
+import subprocess
+
+import git
+
+
+class Git(git.Git):
+ """
+ Git subclassed to never use persistent processes.
+ """
+
+ def _get_persistent_cmd(self, attr_name, cmd_name, *args, **kwargs):
+ raise NotImplementedError(f"Refusing to use persistent process: {attr_name} ({cmd_name} {args} {kwargs})")
+
+ def get_object_header(self, ref: str | bytes) -> tuple[str, str, int]:
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch-check"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=2,
+ )
+ return self._parse_object_header(ret)
+
+ def stream_object_data(self, ref: str) -> tuple[str, str, int, "Git.CatFileContentStream"]:
+ # Not really streaming, per se; this buffers the entire object in memory.
+ # Shouldn't be a problem for our use case, since we're only using this for
+ # object headers (commit objects).
+ ret = subprocess.check_output(
+ [self.GIT_PYTHON_GIT_EXECUTABLE, "cat-file", "--batch"],
+ input=self._prepare_ref(ref),
+ cwd=self._working_dir,
+ timeout=30,
+ )
+ bio = io.BytesIO(ret)
+ hexsha, typename, size = self._parse_object_header(bio.readline())
+ return (hexsha, typename, size, self.CatFileContentStream(size, bio))
+
+
+class Repo(git.Repo):
+ GitCommandWrapperType = Git
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index 515ec2622..1c3f5ed93 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -490,8 +490,14 @@ def refresh_available_extensions_from_data(hide_tags, sort_column, filter_text="
def preload_extensions_git_metadata():
+ t0 = time.time()
for extension in extensions.extensions:
extension.read_info_from_repo()
+ print(
+ f"preload_extensions_git_metadata for "
+ f"{len(extensions.extensions)} extensions took "
+ f"{time.time() - t0:.2f}s"
+ )
def create_ui():
From 00dfe27f59727407c5b408a80ff2a262934df495 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Mon, 29 May 2023 08:54:13 +0300
Subject: [PATCH 077/168] Add & use modules.errors.print_error where currently
printing exception info by hand
---
extensions-builtin/LDSR/scripts/ldsr_model.py | 7 ++--
.../ScuNET/scripts/scunet_model.py | 6 ++--
modules/api/api.py | 7 ++--
modules/call_queue.py | 22 +++++-------
modules/codeformer_model.py | 10 +++---
modules/config_states.py | 12 +++----
modules/errors.py | 16 +++++++++
modules/extensions.py | 10 +++---
modules/gfpgan_model.py | 6 ++--
modules/hypernetworks/hypernetwork.py | 14 +++-----
modules/images.py | 9 ++---
modules/interrogate.py | 5 ++-
modules/launch_utils.py | 7 ++--
modules/localization.py | 6 ++--
modules/processing.py | 2 +-
modules/realesrgan_model.py | 14 +++-----
modules/safe.py | 26 +++++++-------
modules/script_callbacks.py | 9 +++--
modules/script_loading.py | 7 ++--
modules/scripts.py | 35 +++++++------------
modules/sd_hijack_optimizations.py | 6 ++--
.../textual_inversion/textual_inversion.py | 9 ++---
modules/ui.py | 10 +++---
modules/ui_extensions.py | 9 ++---
scripts/prompts_from_file.py | 6 ++--
25 files changed, 117 insertions(+), 153 deletions(-)
diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py
index c4da79f31..95f1669d1 100644
--- a/extensions-builtin/LDSR/scripts/ldsr_model.py
+++ b/extensions-builtin/LDSR/scripts/ldsr_model.py
@@ -1,9 +1,8 @@
import os
-import sys
-import traceback
from basicsr.utils.download_util import load_file_from_url
+from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
@@ -51,10 +50,8 @@ class UpscalerLDSR(Upscaler):
try:
return LDSR(model, yaml)
-
except Exception:
- print("Error importing LDSR:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error importing LDSR", exc_info=True)
return None
def do_upscale(self, img, path):
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index 45d9297b6..dd1b822ed 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -1,6 +1,5 @@
import os.path
import sys
-import traceback
import PIL.Image
import numpy as np
@@ -12,6 +11,8 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader, script_callbacks
from scunet_model_arch import SCUNet as net
+
+from modules.errors import print_error
from modules.shared import opts
@@ -38,8 +39,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
scalers.append(scaler_data)
except Exception:
- print(f"Error loading ScuNET model: {file}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error loading ScuNET model: {file}", exc_info=True)
if add_model2:
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
scalers.append(scaler_data2)
diff --git a/modules/api/api.py b/modules/api/api.py
index 6a4568619..79ce9228f 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -16,6 +16,7 @@ from secrets import compare_digest
import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
from modules.api import models
+from modules.errors import print_error
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
@@ -108,7 +109,6 @@ def api_middleware(app: FastAPI):
from rich.console import Console
console = Console()
except Exception:
- import traceback
rich_available = False
@app.middleware("http")
@@ -139,11 +139,12 @@ def api_middleware(app: FastAPI):
"errors": str(e),
}
if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
- print(f"API error: {request.method}: {request.url} {err}")
+ message = f"API error: {request.method}: {request.url} {err}"
if rich_available:
+ print(message)
console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
else:
- traceback.print_exc()
+ print_error(message, exc_info=True)
return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
@app.middleware("http")
diff --git a/modules/call_queue.py b/modules/call_queue.py
index 447bb7644..dba2a9b4d 100644
--- a/modules/call_queue.py
+++ b/modules/call_queue.py
@@ -1,10 +1,9 @@
import html
-import sys
import threading
-import traceback
import time
from modules import shared, progress
+from modules.errors import print_error
queue_lock = threading.Lock()
@@ -56,16 +55,14 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
try:
res = list(func(*args, **kwargs))
except Exception as e:
- # When printing out our debug argument list, do not print out more than a MB of text
- max_debug_str_len = 131072 # (1024*1024)/8
-
- print("Error completing request", file=sys.stderr)
- argStr = f"Arguments: {args} {kwargs}"
- print(argStr[:max_debug_str_len], file=sys.stderr)
- if len(argStr) > max_debug_str_len:
- print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr)
-
- print(traceback.format_exc(), file=sys.stderr)
+ # When printing out our debug argument list,
+ # do not print out more than a 100 KB of text
+ max_debug_str_len = 131072
+ message = "Error completing request"
+ arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
+ if len(arg_str) > max_debug_str_len:
+ arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
+ print_error(f"{message}\n{arg_str}", exc_info=True)
shared.state.job = ""
shared.state.job_count = 0
@@ -108,4 +105,3 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
return tuple(res)
return f
-
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index ececdbae4..76143e9f2 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -1,6 +1,4 @@
import os
-import sys
-import traceback
import cv2
import torch
@@ -8,6 +6,7 @@ import torch
import modules.face_restoration
import modules.shared
from modules import shared, devices, modelloader
+from modules.errors import print_error
from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes
@@ -105,8 +104,8 @@ def setup_model(dirname):
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
del output
torch.cuda.empty_cache()
- except Exception as error:
- print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr)
+ except Exception:
+ print_error('Failed inference for CodeFormer', exc_info=True)
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
restored_face = restored_face.astype('uint8')
@@ -135,7 +134,6 @@ def setup_model(dirname):
shared.face_restorers.append(codeformer)
except Exception:
- print("Error setting up CodeFormer:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error setting up CodeFormer", exc_info=True)
# sys.path = stored_sys_path
diff --git a/modules/config_states.py b/modules/config_states.py
index db65bcdbf..faeaf28bd 100644
--- a/modules/config_states.py
+++ b/modules/config_states.py
@@ -3,8 +3,6 @@ Supports saving and restoring webui and extensions from a known working set of c
"""
import os
-import sys
-import traceback
import json
import time
import tqdm
@@ -14,6 +12,7 @@ from collections import OrderedDict
import git
from modules import shared, extensions
+from modules.errors import print_error
from modules.paths_internal import script_path, config_states_dir
@@ -53,8 +52,7 @@ def get_webui_config():
if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path)
except Exception:
- print(f"Error reading webui git info from {script_path}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error reading webui git info from {script_path}", exc_info=True)
webui_remote = None
webui_commit_hash = None
@@ -134,8 +132,7 @@ def restore_webui_config(config):
if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path)
except Exception:
- print(f"Error reading webui git info from {script_path}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error reading webui git info from {script_path}", exc_info=True)
return
try:
@@ -143,8 +140,7 @@ def restore_webui_config(config):
webui_repo.git.reset(webui_commit_hash, hard=True)
print(f"* Restored webui to commit {webui_commit_hash}.")
except Exception:
- print(f"Error restoring webui to commit {webui_commit_hash}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error restoring webui to commit{webui_commit_hash}")
def restore_extension_config(config):
diff --git a/modules/errors.py b/modules/errors.py
index da4694f85..41d8dc933 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -1,7 +1,23 @@
import sys
+import textwrap
import traceback
+def print_error(
+ message: str,
+ *,
+ exc_info: bool = False,
+) -> None:
+ """
+ Print an error message to stderr, with optional traceback.
+ """
+ for line in message.splitlines():
+ print("***", line, file=sys.stderr)
+ if exc_info:
+ print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr)
+ print("---")
+
+
def print_error_explanation(message):
lines = message.strip().split("\n")
max_len = max([len(x) for x in lines])
diff --git a/modules/extensions.py b/modules/extensions.py
index 624832a00..369d25842 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -1,11 +1,10 @@
import os
-import sys
import threading
-import traceback
import git
from modules import shared
+from modules.errors import print_error
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
extensions = []
@@ -56,8 +55,7 @@ class Extension:
if os.path.exists(os.path.join(self.path, ".git")):
repo = git.Repo(self.path)
except Exception:
- print(f"Error reading github repository info from {self.path}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error reading github repository info from {self.path}", exc_info=True)
if repo is None or repo.bare:
self.remote = None
@@ -72,8 +70,8 @@ class Extension:
self.commit_hash = commit.hexsha
self.version = self.commit_hash[:8]
- except Exception as ex:
- print(f"Failed reading extension data from Git repository ({self.name}): {ex}", file=sys.stderr)
+ except Exception:
+ print_error(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
self.remote = None
self.have_info_from_repo = True
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 0131dea42..d2f647fe3 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -1,12 +1,11 @@
import os
-import sys
-import traceback
import facexlib
import gfpgan
import modules.face_restoration
from modules import paths, shared, devices, modelloader
+from modules.errors import print_error
model_dir = "GFPGAN"
user_path = None
@@ -112,5 +111,4 @@ def setup_model(dirname):
shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception:
- print("Error setting up GFPGAN:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error setting up GFPGAN", exc_info=True)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 570b5603d..fcc1ef209 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -2,8 +2,6 @@ import datetime
import glob
import html
import os
-import sys
-import traceback
import inspect
import modules.textual_inversion.dataset
@@ -12,6 +10,7 @@ import tqdm
from einops import rearrange, repeat
from ldm.util import default
from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint
+from modules.errors import print_error
from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
@@ -325,17 +324,14 @@ def load_hypernetwork(name):
if path is None:
return None
- hypernetwork = Hypernetwork()
-
try:
+ hypernetwork = Hypernetwork()
hypernetwork.load(path)
+ return hypernetwork
except Exception:
- print(f"Error loading hypernetwork {path}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error loading hypernetwork {path}", exc_info=True)
return None
- return hypernetwork
-
def load_hypernetworks(names, multipliers=None):
already_loaded = {}
@@ -770,7 +766,7 @@ Last saved image: {html.escape(last_saved_image)}
"""
except Exception:
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Exception in training hypernetwork", exc_info=True)
finally:
pbar.leave = False
pbar.close()
diff --git a/modules/images.py b/modules/images.py
index e21e554cf..69151becd 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -1,6 +1,4 @@
import datetime
-import sys
-import traceback
import pytz
import io
@@ -18,6 +16,7 @@ import json
import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
+from modules.errors import print_error
from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
@@ -464,8 +463,7 @@ class FilenameGenerator:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
- print(f"Error adding [{pattern}] to filename", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error adding [{pattern}] to filename", exc_info=True)
if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
continue
@@ -697,8 +695,7 @@ def read_info_from_image(image):
Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except Exception:
- print("Error parsing NovelAI image generation parameters:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error parsing NovelAI image generation parameters", exc_info=True)
return geninfo, items
diff --git a/modules/interrogate.py b/modules/interrogate.py
index 111b1322c..d36e1a5ab 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -1,6 +1,5 @@
import os
import sys
-import traceback
from collections import namedtuple
from pathlib import Path
import re
@@ -12,6 +11,7 @@ from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from modules import devices, paths, shared, lowvram, modelloader, errors
+from modules.errors import print_error
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
@@ -216,8 +216,7 @@ class InterrogateModels:
res += f", {match}"
except Exception:
- print("Error interrogating", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error interrogating", exc_info=True)
res += ""
self.unload()
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 35a52310b..22edc1064 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -8,6 +8,7 @@ import json
from functools import lru_cache
from modules import cmd_args
+from modules.errors import print_error
from modules.paths_internal import script_path, extensions_dir
args, _ = cmd_args.parser.parse_known_args()
@@ -188,7 +189,7 @@ def run_extension_installer(extension_dir):
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
except Exception as e:
- print(e, file=sys.stderr)
+ print_error(str(e))
def list_extensions(settings_file):
@@ -198,8 +199,8 @@ def list_extensions(settings_file):
if os.path.isfile(settings_file):
with open(settings_file, "r", encoding="utf8") as file:
settings = json.load(file)
- except Exception as e:
- print(e, file=sys.stderr)
+ except Exception:
+ print_error("Could not load settings", exc_info=True)
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
diff --git a/modules/localization.py b/modules/localization.py
index ee9c65e7d..9a1df343b 100644
--- a/modules/localization.py
+++ b/modules/localization.py
@@ -1,8 +1,7 @@
import json
import os
-import sys
-import traceback
+from modules.errors import print_error
localizations = {}
@@ -31,7 +30,6 @@ def localization_js(current_localization_name: str) -> str:
with open(fn, "r", encoding="utf8") as file:
data = json.load(file)
except Exception:
- print(f"Error loading localization from {fn}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error loading localization from {fn}", exc_info=True)
return f"window.localization = {json.dumps(data)}"
diff --git a/modules/processing.py b/modules/processing.py
index b75f25157..5c9bcce8e 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1,4 +1,5 @@
import json
+import logging
import math
import os
import sys
@@ -23,7 +24,6 @@ import modules.images as images
import modules.styles
import modules.sd_models as sd_models
import modules.sd_vae as sd_vae
-import logging
from ldm.data.util import AddMiDaS
from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index 99983678d..c8d0c64f7 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -1,12 +1,11 @@
import os
-import sys
-import traceback
import numpy as np
from PIL import Image
from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer
+from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import cmd_opts, opts
from modules import modelloader
@@ -36,8 +35,7 @@ class UpscalerRealESRGAN(Upscaler):
self.scalers.append(scaler)
except Exception:
- print("Error importing Real-ESRGAN:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error importing Real-ESRGAN", exc_info=True)
self.enable = False
self.scalers = []
@@ -76,9 +74,8 @@ class UpscalerRealESRGAN(Upscaler):
info.local_data_path = load_file_from_url(url=info.data_path, model_dir=self.model_download_path, progress=True)
return info
- except Exception as e:
- print(f"Error making Real-ESRGAN models list: {e}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ except Exception:
+ print_error("Error making Real-ESRGAN models list", exc_info=True)
return None
def load_models(self, _):
@@ -135,5 +132,4 @@ def get_realesrgan_models(scaler):
]
return models
except Exception:
- print("Error making Real-ESRGAN models list:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error making Real-ESRGAN models list", exc_info=True)
diff --git a/modules/safe.py b/modules/safe.py
index e8f507743..b596f5658 100644
--- a/modules/safe.py
+++ b/modules/safe.py
@@ -2,8 +2,6 @@
import pickle
import collections
-import sys
-import traceback
import torch
import numpy
@@ -11,6 +9,8 @@ import _codecs
import zipfile
import re
+from modules.errors import print_error
+
# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
@@ -136,17 +136,20 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
check_pt(filename, extra_handler)
except pickle.UnpicklingError:
- print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- print("-----> !!!! The file is most likely corrupted !!!! <-----", file=sys.stderr)
- print("You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n", file=sys.stderr)
+ print_error(
+ f"Error verifying pickled file from {filename}\n"
+ "-----> !!!! The file is most likely corrupted !!!! <-----\n"
+ "You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n",
+ exc_info=True,
+ )
return None
-
except Exception:
- print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- print("\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
- print("You can skip this check with --disable-safe-unpickle commandline argument.\n\n", file=sys.stderr)
+ print_error(
+ f"Error verifying pickled file from {filename}\n"
+ f"The file may be malicious, so the program is not going to read it.\n"
+ f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n",
+ exc_info=True,
+ )
return None
return unsafe_torch_load(filename, *args, **kwargs)
@@ -190,4 +193,3 @@ with safe.Extra(handler):
unsafe_torch_load = torch.load
torch.load = load
global_extra_handler = None
-
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index d2728e12c..6aa9c3b63 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -1,16 +1,15 @@
-import sys
-import traceback
-from collections import namedtuple
import inspect
+from collections import namedtuple
from typing import Optional, Dict, Any
from fastapi import FastAPI
from gradio import Blocks
+from modules.errors import print_error
+
def report_exception(c, job):
- print(f"Error executing callback {job} for {c.script}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error executing callback {job} for {c.script}", exc_info=True)
class ImageSaveParams:
diff --git a/modules/script_loading.py b/modules/script_loading.py
index 57b158624..26efffcb3 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -1,8 +1,8 @@
import os
-import sys
-import traceback
import importlib.util
+from modules.errors import print_error
+
def load_module(path):
module_spec = importlib.util.spec_from_file_location(os.path.basename(path), path)
@@ -27,5 +27,4 @@ def preload_extensions(extensions_dir, parser):
module.preload(parser)
except Exception:
- print(f"Error running preload() for {preload_script}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running preload() for {preload_script}", exc_info=True)
diff --git a/modules/scripts.py b/modules/scripts.py
index c902804b6..a7168fd12 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -1,12 +1,12 @@
import os
import re
import sys
-import traceback
from collections import namedtuple
import gradio as gr
from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
+from modules.errors import print_error
AlwaysVisible = object()
@@ -264,8 +264,7 @@ def load_scripts():
register_scripts_from_module(script_module)
except Exception:
- print(f"Error loading script: {scriptfile.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error loading script: {scriptfile.filename}", exc_info=True)
finally:
sys.path = syspath
@@ -280,11 +279,9 @@ def load_scripts():
def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
- res = func(*args, **kwargs)
- return res
+ return func(*args, **kwargs)
except Exception:
- print(f"Error calling: {filename}/{funcname}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error calling: {filename}/{funcname}", exc_info=True)
return default
@@ -450,8 +447,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.process(p, *script_args)
except Exception:
- print(f"Error running process: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running process: {script.filename}", exc_info=True)
def before_process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
@@ -459,8 +455,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.before_process_batch(p, *script_args, **kwargs)
except Exception:
- print(f"Error running before_process_batch: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running before_process_batch: {script.filename}", exc_info=True)
def process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
@@ -468,8 +463,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.process_batch(p, *script_args, **kwargs)
except Exception:
- print(f"Error running process_batch: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running process_batch: {script.filename}", exc_info=True)
def postprocess(self, p, processed):
for script in self.alwayson_scripts:
@@ -477,8 +471,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess(p, processed, *script_args)
except Exception:
- print(f"Error running postprocess: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running postprocess: {script.filename}", exc_info=True)
def postprocess_batch(self, p, images, **kwargs):
for script in self.alwayson_scripts:
@@ -486,8 +479,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_batch(p, *script_args, images=images, **kwargs)
except Exception:
- print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running postprocess_batch: {script.filename}", exc_info=True)
def postprocess_image(self, p, pp: PostprocessImageArgs):
for script in self.alwayson_scripts:
@@ -495,24 +487,21 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_image(p, pp, *script_args)
except Exception:
- print(f"Error running postprocess_batch: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running postprocess_image: {script.filename}", exc_info=True)
def before_component(self, component, **kwargs):
for script in self.scripts:
try:
script.before_component(component, **kwargs)
except Exception:
- print(f"Error running before_component: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running before_component: {script.filename}", exc_info=True)
def after_component(self, component, **kwargs):
for script in self.scripts:
try:
script.after_component(component, **kwargs)
except Exception:
- print(f"Error running after_component: {script.filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error running after_component: {script.filename}", exc_info=True)
def reload_sources(self, cache):
for si, script in list(enumerate(self.scripts)):
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 2ec0b0490..fd186fa26 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,7 +1,5 @@
from __future__ import annotations
import math
-import sys
-import traceback
import psutil
import torch
@@ -11,6 +9,7 @@ from ldm.util import default
from einops import rearrange
from modules import shared, errors, devices, sub_quadratic_attention
+from modules.errors import print_error
from modules.hypernetworks import hypernetwork
import ldm.modules.attention
@@ -140,8 +139,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
import xformers.ops
shared.xformers_available = True
except Exception:
- print("Cannot import xformers", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Cannot import xformers", exc_info=True)
def get_available_vram():
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index d489ed1e0..a040a9884 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -1,6 +1,4 @@
import os
-import sys
-import traceback
from collections import namedtuple
import torch
@@ -16,6 +14,7 @@ from torch.utils.tensorboard import SummaryWriter
from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint
import modules.textual_inversion.dataset
+from modules.errors import print_error
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
@@ -207,8 +206,7 @@ class EmbeddingDatabase:
self.load_from_file(fullfn, fn)
except Exception:
- print(f"Error loading embedding {fn}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error loading embedding {fn}", exc_info=True)
continue
def load_textual_inversion_embeddings(self, force_reload=False):
@@ -632,8 +630,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
- print(traceback.format_exc(), file=sys.stderr)
- pass
+ print_error("Error training embedding", exc_info=True)
finally:
pbar.leave = False
pbar.close()
diff --git a/modules/ui.py b/modules/ui.py
index 001b97923..1ad94f027 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -2,7 +2,6 @@ import json
import mimetypes
import os
import sys
-import traceback
from functools import reduce
import warnings
@@ -14,6 +13,7 @@ from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave
+from modules.errors import print_error
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
@@ -231,9 +231,8 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
res = all_seeds[index if 0 <= index < len(all_seeds) else 0]
except json.decoder.JSONDecodeError:
- if gen_info_string != '':
- print("Error parsing JSON generation info:", file=sys.stderr)
- print(gen_info_string, file=sys.stderr)
+ if gen_info_string:
+ print_error(f"Error parsing JSON generation info: {gen_info_string}")
return [res, gr_show(False)]
@@ -1753,8 +1752,7 @@ def create_ui():
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
- print("Error loading/saving model file:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error("Error loading/saving model file", exc_info=True)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
return results
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index 515ec2622..cadf56be2 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -1,10 +1,8 @@
import json
import os.path
-import sys
import threading
import time
from datetime import datetime
-import traceback
import git
@@ -14,6 +12,7 @@ import shutil
import errno
from modules import extensions, shared, paths, config_states
+from modules.errors import print_error
from modules.paths_internal import config_states_dir
from modules.call_queue import wrap_gradio_gpu_call
@@ -46,8 +45,7 @@ def apply_and_restart(disable_list, update_list, disable_all):
try:
ext.fetch_and_reset_hard()
except Exception:
- print(f"Error getting updates for {ext.name}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error getting updates for {ext.name}", exc_info=True)
shared.opts.disabled_extensions = disabled
shared.opts.disable_all_extensions = disable_all
@@ -113,8 +111,7 @@ def check_updates(id_task, disable_list):
if 'FETCH_HEAD' not in str(e):
raise
except Exception:
- print(f"Error checking updates for {ext.name}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error checking updates for {ext.name}", exc_info=True)
shared.state.nextjob()
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index b918a764e..4dc24615a 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -1,13 +1,12 @@
import copy
import random
-import sys
-import traceback
import shlex
import modules.scripts as scripts
import gradio as gr
from modules import sd_samplers
+from modules.errors import print_error
from modules.processing import Processed, process_images
from modules.shared import state
@@ -136,8 +135,7 @@ class Script(scripts.Script):
try:
args = cmdargs(line)
except Exception:
- print(f"Error parsing line {line} as commandline:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
+ print_error(f"Error parsing line {line} as commandline", exc_info=True)
args = {"prompt": line}
else:
args = {"prompt": line}
From 123641e4ec93c0cd99f7ab03f4f39ca8ae4b4cef Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Tue, 30 May 2023 01:06:23 +0900
Subject: [PATCH 078/168] Revert "fix xyz clip"
This reverts commit edd766e70ae3ea8039e7b9e24d55fbd26792ef10.
---
scripts/xyz_grid.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py
index 00a81060a..7821cc655 100644
--- a/scripts/xyz_grid.py
+++ b/scripts/xyz_grid.py
@@ -94,10 +94,7 @@ def confirm_checkpoints(p, xs):
def apply_clip_skip(p, x, xs):
- if opts.data["CLIP_stop_at_last_layers"] != x:
- opts.data["CLIP_stop_at_last_layers"] = x
- p.cached_c = [None, None]
- p.cached_uc = [None, None]
+ opts.data["CLIP_stop_at_last_layers"] = x
def apply_upscale_latent_space(p, x, xs):
From 4a449375a20267035402eed5d93074c2f0a91bc8 Mon Sep 17 00:00:00 2001
From: w-e-w <40751091+w-e-w@users.noreply.github.com>
Date: Tue, 30 May 2023 01:07:35 +0900
Subject: [PATCH 079/168] fix get_conds_with_caching()
---
modules/processing.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index b75f25157..395c851f1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -321,14 +321,13 @@ class StableDiffusionProcessing:
have been used before. The second element is where the previously
computed result is stored.
"""
-
- if cache[0] is not None and (required_prompts, steps) == cache[0]:
+ if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
return cache[1]
with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)
- cache[0] = (required_prompts, steps)
+ cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
return cache[1]
def setup_conds(self):
From c8e67b67320f5d090b758303e675c1e5586575a5 Mon Sep 17 00:00:00 2001
From: Artem Kotov
Date: Mon, 29 May 2023 20:39:24 +0400
Subject: [PATCH 080/168] improve filename matching for mask
we should not rely that mask filename will be of the same extension
as the image filename so better pattern matching is added
---
modules/img2img.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index d704bf900..bc79ea1f9 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -1,4 +1,5 @@
import os
+from pathlib import Path
import numpy as np
from PIL import Image, ImageOps, ImageFilter, ImageEnhance, ImageChops, UnidentifiedImageError
@@ -53,7 +54,11 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
if is_inpaint_batch:
# try to find corresponding mask for an image using simple filename matching
- mask_image_path = os.path.join(inpaint_mask_dir, os.path.basename(image))
+ path = Path(os.path.join(inpaint_mask_dir, os.path.basename(image)))
+ mask_image_path = list(path.parent.glob(f"**/{path.stem}*"))
+ if len(mask_image_path) > 0:
+ mask_image_path = str(mask_image_path[0])
+
# if not found use first one ("same mask for all images" use-case)
if mask_image_path not in inpaint_masks:
mask_image_path = inpaint_masks[0]
From 6c610a8a951ea8141024dbb659c2528bf24225ec Mon Sep 17 00:00:00 2001
From: Artem Kotov
Date: Mon, 29 May 2023 20:47:20 +0400
Subject: [PATCH 081/168] add scale_by to batch processing
---
modules/img2img.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index bc79ea1f9..5d6ad5200 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -14,7 +14,7 @@ from modules.ui import plaintext_to_html
import modules.scripts
-def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
+def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0):
processing.fix_seed(p)
images = shared.listfiles(input_dir)
@@ -50,6 +50,11 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args):
continue
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
+
+ if to_scale:
+ p.width = int(img.width * scale_by)
+ p.height = int(img.height * scale_by)
+
p.init_images = [img] * p.batch_size
if is_inpaint_batch:
@@ -119,7 +124,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if image is not None:
image = ImageOps.exif_transpose(image)
- if selected_scale_tab == 1:
+ if selected_scale_tab == 1 and not is_batch:
assert image, "Can't scale by because no image is selected"
width = int(image.width * scale_by)
@@ -174,7 +179,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
- process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args)
+ process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by)
processed = Processed(p, [], p.seed, "")
else:
From 23314a6e27a24fc8bf98717ddc180aec06674abd Mon Sep 17 00:00:00 2001
From: Artem Kotov
Date: Mon, 29 May 2023 21:38:49 +0400
Subject: [PATCH 082/168] ruffed
---
modules/img2img.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index 5d6ad5200..a29add3a0 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -50,11 +50,11 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
continue
# Use the EXIF orientation of photos taken by smartphones.
img = ImageOps.exif_transpose(img)
-
+
if to_scale:
p.width = int(img.width * scale_by)
p.height = int(img.height * scale_by)
-
+
p.init_images = [img] * p.batch_size
if is_inpaint_batch:
@@ -63,7 +63,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
mask_image_path = list(path.parent.glob(f"**/{path.stem}*"))
if len(mask_image_path) > 0:
mask_image_path = str(mask_image_path[0])
-
+
# if not found use first one ("same mask for all images" use-case)
if mask_image_path not in inpaint_masks:
mask_image_path = inpaint_masks[0]
From 8ab4e55fe3a7f953201eeb887de664f0db3d9e93 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Mon, 29 May 2023 21:39:10 +0300
Subject: [PATCH 083/168] Moved the script to the extension build-in
---
.../canvas-zoom-and-pan/javascript}/zoom.js | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename {javascript => extensions-builtin/canvas-zoom-and-pan/javascript}/zoom.js (100%)
diff --git a/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
similarity index 100%
rename from javascript/zoom.js
rename to extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
From 42e020c1c1c31b29ee7fc2e493a60613459642a1 Mon Sep 17 00:00:00 2001
From: James
Date: Mon, 29 May 2023 22:25:43 +0100
Subject: [PATCH 084/168] Added VAE listing to web API.
---
modules/api/api.py | 5 +++++
modules/api/models.py | 4 ++++
2 files changed, 9 insertions(+)
diff --git a/modules/api/api.py b/modules/api/api.py
index eee99bbb2..31b9d90ca 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -23,6 +23,7 @@ from modules.textual_inversion.preprocess import preprocess
from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork
from PIL import PngImagePlugin,Image
from modules.sd_models import checkpoints_list, unload_model_weights, reload_model_weights
+from modules.sd_vae import vae_dict
from modules.sd_models_config import find_checkpoint_config_near_filename
from modules.realesrgan_model import get_realesrgan_models
from modules import devices
@@ -189,6 +190,7 @@ class Api:
self.add_api_route("/sdapi/v1/samplers", self.get_samplers, methods=["GET"], response_model=List[models.SamplerItem])
self.add_api_route("/sdapi/v1/upscalers", self.get_upscalers, methods=["GET"], response_model=List[models.UpscalerItem])
self.add_api_route("/sdapi/v1/sd-models", self.get_sd_models, methods=["GET"], response_model=List[models.SDModelItem])
+ self.add_api_route("/sdapi/v1/sd-vae", self.get_sd_vaes, methods=["GET"], response_model=List[models.SDVaeItem])
self.add_api_route("/sdapi/v1/hypernetworks", self.get_hypernetworks, methods=["GET"], response_model=List[models.HypernetworkItem])
self.add_api_route("/sdapi/v1/face-restorers", self.get_face_restorers, methods=["GET"], response_model=List[models.FaceRestorerItem])
self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=List[models.RealesrganItem])
@@ -541,6 +543,9 @@ class Api:
def get_sd_models(self):
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config_near_filename(x)} for x in checkpoints_list.values()]
+ def get_sd_vaes(self):
+ return [{"model_name": x, "filename": vae_dict[x]} for x in vae_dict.keys()]
+
def get_hypernetworks(self):
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
diff --git a/modules/api/models.py b/modules/api/models.py
index 1ff2fb338..47fdede2c 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -249,6 +249,10 @@ class SDModelItem(BaseModel):
filename: str = Field(title="Filename")
config: Optional[str] = Field(title="Config file")
+class SDVaeItem(BaseModel):
+ model_name: str = Field(title="Model Name")
+ filename: str = Field(title="Filename")
+
class HypernetworkItem(BaseModel):
name: str = Field(title="Name")
path: Optional[str] = Field(title="Path")
From 3fc8aeb48d10257bb36771330e9c53a944cff792 Mon Sep 17 00:00:00 2001
From: David Chuang
Date: Mon, 29 May 2023 20:17:25 -0400
Subject: [PATCH 085/168] Fix s_min_uncond default type int
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 3099d1d2e..32c001f2b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -419,7 +419,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
options_templates.update(options_section(('optimizations', "Optimizations"), {
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
- "s_min_uncond": OptionInfo(0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
+ "s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 4.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
From baa81126c485434211cb6d58292f77de00dc2432 Mon Sep 17 00:00:00 2001
From: missionfloyd
Date: Mon, 29 May 2023 23:52:19 -0600
Subject: [PATCH 086/168] Move gamepaddisconnected listener
---
javascript/imageviewerGamepad.js | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/javascript/imageviewerGamepad.js b/javascript/imageviewerGamepad.js
index 45cc4cbf9..a22c7e6e6 100644
--- a/javascript/imageviewerGamepad.js
+++ b/javascript/imageviewerGamepad.js
@@ -24,7 +24,10 @@ window.addEventListener('gamepadconnected', (e) => {
isWaiting = false;
}
}, 10);
- window.addEventListener('gamepaddisconnected', (e) => clearInterval(gamepads[e.gamepad.index]));
+});
+
+window.addEventListener('gamepaddisconnected', (e) => {
+ clearInterval(gamepads[e.gamepad.index]);
});
/*
From 5fcdaa6a7f19d083a6393cc0d2b933ff5080f5b3 Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Tue, 30 May 2023 12:36:55 +0300
Subject: [PATCH 087/168] Vendor in the single module used from
taming_transformers; remove taming_transformers dependency
(and fix the two ruff complaints)
---
.../LDSR/sd_hijack_autoencoder.py | 2 +-
extensions-builtin/LDSR/vqvae_quantize.py | 147 ++++++++++++++++++
modules/launch_utils.py | 3 -
modules/paths.py | 1 -
webui-user.sh | 1 -
5 files changed, 148 insertions(+), 6 deletions(-)
create mode 100644 extensions-builtin/LDSR/vqvae_quantize.py
diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
index 81c5101b7..27a86e139 100644
--- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py
+++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py
@@ -10,7 +10,7 @@ from contextlib import contextmanager
from torch.optim.lr_scheduler import LambdaLR
from ldm.modules.ema import LitEma
-from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
+from vqvae_quantize import VectorQuantizer2 as VectorQuantizer
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.util import instantiate_from_config
diff --git a/extensions-builtin/LDSR/vqvae_quantize.py b/extensions-builtin/LDSR/vqvae_quantize.py
new file mode 100644
index 000000000..dd14b8fda
--- /dev/null
+++ b/extensions-builtin/LDSR/vqvae_quantize.py
@@ -0,0 +1,147 @@
+# Vendored from https://raw.githubusercontent.com/CompVis/taming-transformers/24268930bf1dce879235a7fddd0b2355b84d7ea6/taming/modules/vqvae/quantize.py,
+# where the license is as follows:
+#
+# Copyright (c) 2020 Patrick Esser and Robin Rombach and Björn Ommer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
+# OR OTHER DEALINGS IN THE SOFTWARE./
+
+import torch
+import torch.nn as nn
+import numpy as np
+from einops import rearrange
+
+
+class VectorQuantizer2(nn.Module):
+ """
+ Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly
+ avoids costly matrix multiplications and allows for post-hoc remapping of indices.
+ """
+
+ # NOTE: due to a bug the beta term was applied to the wrong term. for
+ # backwards compatibility we use the buggy version by default, but you can
+ # specify legacy=False to fix it.
+ def __init__(self, n_e, e_dim, beta, remap=None, unknown_index="random",
+ sane_index_shape=False, legacy=True):
+ super().__init__()
+ self.n_e = n_e
+ self.e_dim = e_dim
+ self.beta = beta
+ self.legacy = legacy
+
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
+
+ self.remap = remap
+ if self.remap is not None:
+ self.register_buffer("used", torch.tensor(np.load(self.remap)))
+ self.re_embed = self.used.shape[0]
+ self.unknown_index = unknown_index # "random" or "extra" or integer
+ if self.unknown_index == "extra":
+ self.unknown_index = self.re_embed
+ self.re_embed = self.re_embed + 1
+ print(f"Remapping {self.n_e} indices to {self.re_embed} indices. "
+ f"Using {self.unknown_index} for unknown indices.")
+ else:
+ self.re_embed = n_e
+
+ self.sane_index_shape = sane_index_shape
+
+ def remap_to_used(self, inds):
+ ishape = inds.shape
+ assert len(ishape) > 1
+ inds = inds.reshape(ishape[0], -1)
+ used = self.used.to(inds)
+ match = (inds[:, :, None] == used[None, None, ...]).long()
+ new = match.argmax(-1)
+ unknown = match.sum(2) < 1
+ if self.unknown_index == "random":
+ new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
+ else:
+ new[unknown] = self.unknown_index
+ return new.reshape(ishape)
+
+ def unmap_to_all(self, inds):
+ ishape = inds.shape
+ assert len(ishape) > 1
+ inds = inds.reshape(ishape[0], -1)
+ used = self.used.to(inds)
+ if self.re_embed > self.used.shape[0]: # extra token
+ inds[inds >= self.used.shape[0]] = 0 # simply set to zero
+ back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
+ return back.reshape(ishape)
+
+ def forward(self, z, temp=None, rescale_logits=False, return_logits=False):
+ assert temp is None or temp == 1.0, "Only for interface compatible with Gumbel"
+ assert rescale_logits is False, "Only for interface compatible with Gumbel"
+ assert return_logits is False, "Only for interface compatible with Gumbel"
+ # reshape z -> (batch, height, width, channel) and flatten
+ z = rearrange(z, 'b c h w -> b h w c').contiguous()
+ z_flattened = z.view(-1, self.e_dim)
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
+
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
+ torch.sum(self.embedding.weight ** 2, dim=1) - 2 * \
+ torch.einsum('bd,dn->bn', z_flattened, rearrange(self.embedding.weight, 'n d -> d n'))
+
+ min_encoding_indices = torch.argmin(d, dim=1)
+ z_q = self.embedding(min_encoding_indices).view(z.shape)
+ perplexity = None
+ min_encodings = None
+
+ # compute loss for embedding
+ if not self.legacy:
+ loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + \
+ torch.mean((z_q - z.detach()) ** 2)
+ else:
+ loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * \
+ torch.mean((z_q - z.detach()) ** 2)
+
+ # preserve gradients
+ z_q = z + (z_q - z).detach()
+
+ # reshape back to match original input shape
+ z_q = rearrange(z_q, 'b h w c -> b c h w').contiguous()
+
+ if self.remap is not None:
+ min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1) # add batch axis
+ min_encoding_indices = self.remap_to_used(min_encoding_indices)
+ min_encoding_indices = min_encoding_indices.reshape(-1, 1) # flatten
+
+ if self.sane_index_shape:
+ min_encoding_indices = min_encoding_indices.reshape(
+ z_q.shape[0], z_q.shape[2], z_q.shape[3])
+
+ return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
+
+ def get_codebook_entry(self, indices, shape):
+ # shape specifying (batch, height, width, channel)
+ if self.remap is not None:
+ indices = indices.reshape(shape[0], -1) # add batch axis
+ indices = self.unmap_to_all(indices)
+ indices = indices.reshape(-1) # flatten again
+
+ # get quantized latent vectors
+ z_q = self.embedding(indices)
+
+ if shape is not None:
+ z_q = z_q.view(shape)
+ # reshape back to match original input shape
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
+
+ return z_q
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 35a52310b..ca089674f 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -229,13 +229,11 @@ def prepare_environment():
openclip_package = os.environ.get('OPENCLIP_PACKAGE', "https://github.com/mlfoundations/open_clip/archive/bb6e834e9c70d9c27d0dc3ecedeebeaeb1ffad6b.zip")
stable_diffusion_repo = os.environ.get('STABLE_DIFFUSION_REPO', "https://github.com/Stability-AI/stablediffusion.git")
- taming_transformers_repo = os.environ.get('TAMING_TRANSFORMERS_REPO', "https://github.com/CompVis/taming-transformers.git")
k_diffusion_repo = os.environ.get('K_DIFFUSION_REPO', 'https://github.com/crowsonkb/k-diffusion.git')
codeformer_repo = os.environ.get('CODEFORMER_REPO', 'https://github.com/sczhou/CodeFormer.git')
blip_repo = os.environ.get('BLIP_REPO', 'https://github.com/salesforce/BLIP.git')
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "cf1d67a6fd5ea1aa600c4df58e5b47da45f6bdbf")
- taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "c9fe758757e022f05ca5a53fa8fac28889e4f1cf")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
@@ -286,7 +284,6 @@ def prepare_environment():
os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)
git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
- git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash)
diff --git a/modules/paths.py b/modules/paths.py
index 5f6474c03..5171df4f8 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -20,7 +20,6 @@ assert sd_path is not None, f"Couldn't find Stable Diffusion in any of: {possibl
path_dirs = [
(sd_path, 'ldm', 'Stable Diffusion', []),
- (os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers', []),
(os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
diff --git a/webui-user.sh b/webui-user.sh
index 49a426ff9..70306c60d 100644
--- a/webui-user.sh
+++ b/webui-user.sh
@@ -36,7 +36,6 @@
# Fixed git commits
#export STABLE_DIFFUSION_COMMIT_HASH=""
-#export TAMING_TRANSFORMERS_COMMIT_HASH=""
#export CODEFORMER_COMMIT_HASH=""
#export BLIP_COMMIT_HASH=""
From c928c228af428b2743ac4442ceff3118fa1dca48 Mon Sep 17 00:00:00 2001
From: Danil Boldyrev
Date: Tue, 30 May 2023 16:35:52 +0300
Subject: [PATCH 088/168] a small fix for very wide images, because of the
scroll bar was the wrong zoom
---
extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
index 4bbec34f7..f555960d2 100644
--- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
+++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
@@ -261,10 +261,13 @@ onUiLoaded(async() => {
//Reset Zoom
targetElement.style.transform = `translate(${0}px, ${0}px) scale(${1})`;
+ // Get scrollbar width to right-align the image
+ const scrollbarWidth = window.innerWidth - document.documentElement.clientWidth;
+
// Get element and screen dimensions
const elementWidth = targetElement.offsetWidth;
const elementHeight = targetElement.offsetHeight;
- const screenWidth = window.innerWidth;
+ const screenWidth = window.innerWidth - scrollbarWidth;
const screenHeight = window.innerHeight;
// Get element's coordinates relative to the page
From f81931c591d4513420a0998bbf1591e35a92d14e Mon Sep 17 00:00:00 2001
From: Aarni Koskela
Date: Tue, 30 May 2023 17:50:09 +0300
Subject: [PATCH 089/168] Frontend: only look at top-level tabs, not nested
tabs
Refs https://github.com/adieyal/sd-dynamic-prompts/issues/459#issuecomment-1568543926
---
script.js | 10 ++++++++--
1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/script.js b/script.js
index de9d7e22a..34cca7651 100644
--- a/script.js
+++ b/script.js
@@ -10,12 +10,18 @@ function gradioApp() {
return elem.shadowRoot ? elem.shadowRoot : elem;
}
+/**
+ * Get the currently selected top-level UI tab button (e.g. the button that says "Extras").
+ */
function get_uiCurrentTab() {
- return gradioApp().querySelector('#tabs button.selected');
+ return gradioApp().querySelector('#tabs > .tab-nav > button.selected');
}
+/**
+ * Get the first currently visible top-level UI tab content (e.g. the div hosting the "txt2img" UI).
+ */
function get_uiCurrentTabContent() {
- return gradioApp().querySelector('.tabitem[id^=tab_]:not([style*="display: none"])');
+ return gradioApp().querySelector('#tabs > .tabitem[id^=tab_]:not([style*="display: none"])');
}
var uiUpdateCallbacks = [];
From fb1cb6d3642036609e124744d91f58f2deb9b570 Mon Sep 17 00:00:00 2001
From: Will Frey
Date: Tue, 30 May 2023 22:05:12 -0400
Subject: [PATCH 090/168] Fix typo in `--update-check` help message
Change `chck` to `check`
---
modules/cmd_args.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/cmd_args.py b/modules/cmd_args.py
index 3eeb84d53..0974056d7 100644
--- a/modules/cmd_args.py
+++ b/modules/cmd_args.py
@@ -11,7 +11,7 @@ parser.add_argument("--skip-python-version-check", action='store_true', help="la
parser.add_argument("--skip-torch-cuda-test", action='store_true', help="launch.py argument: do not check if CUDA is able to work properly")
parser.add_argument("--reinstall-xformers", action='store_true', help="launch.py argument: install the appropriate version of xformers even if you have some version already installed")
parser.add_argument("--reinstall-torch", action='store_true', help="launch.py argument: install the appropriate version of torch even if you have some version already installed")
-parser.add_argument("--update-check", action='store_true', help="launch.py argument: chck for updates at startup")
+parser.add_argument("--update-check", action='store_true', help="launch.py argument: check for updates at startup")
parser.add_argument("--test-server", action='store_true', help="launch.py argument: configure server for testing")
parser.add_argument("--skip-prepare-environment", action='store_true', help="launch.py argument: skip all environment preparation")
parser.add_argument("--skip-install", action='store_true', help="launch.py argument: skip installation of packages")
From 05933840f0676dd1a90a7e2ad3f2a0672624b2cd Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 31 May 2023 19:56:37 +0300
Subject: [PATCH 091/168] rename print_error to report, use it with together
with package name
---
extensions-builtin/LDSR/scripts/ldsr_model.py | 5 ++--
.../ScuNET/scripts/scunet_model.py | 5 ++--
modules/api/api.py | 5 ++--
modules/call_queue.py | 5 ++--
modules/codeformer_model.py | 7 +++---
modules/config_states.py | 9 ++++----
modules/errors.py | 8 ++-----
modules/extensions.py | 7 +++---
modules/gfpgan_model.py | 5 ++--
modules/hypernetworks/hypernetwork.py | 7 +++---
modules/images.py | 5 ++--
modules/interrogate.py | 3 +--
modules/launch_utils.py | 7 +++---
modules/localization.py | 4 ++--
modules/realesrgan_model.py | 10 ++++----
modules/safe.py | 7 +++---
modules/script_callbacks.py | 4 ++--
modules/script_loading.py | 4 ++--
modules/scripts.py | 23 +++++++++----------
modules/sd_hijack_optimizations.py | 3 +--
.../textual_inversion/textual_inversion.py | 7 +++---
modules/ui.py | 7 +++---
modules/ui_extensions.py | 7 +++---
scripts/prompts_from_file.py | 5 ++--
24 files changed, 69 insertions(+), 90 deletions(-)
diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py
index 95f1669d1..dbd6d331d 100644
--- a/extensions-builtin/LDSR/scripts/ldsr_model.py
+++ b/extensions-builtin/LDSR/scripts/ldsr_model.py
@@ -2,10 +2,9 @@ import os
from basicsr.utils.download_util import load_file_from_url
-from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
-from modules import shared, script_callbacks
+from modules import shared, script_callbacks, errors
import sd_hijack_autoencoder # noqa: F401
import sd_hijack_ddpm_v1 # noqa: F401
@@ -51,7 +50,7 @@ class UpscalerLDSR(Upscaler):
try:
return LDSR(model, yaml)
except Exception:
- print_error("Error importing LDSR", exc_info=True)
+ errors.report("Error importing LDSR", exc_info=True)
return None
def do_upscale(self, img, path):
diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py
index dd1b822ed..85b4505f6 100644
--- a/extensions-builtin/ScuNET/scripts/scunet_model.py
+++ b/extensions-builtin/ScuNET/scripts/scunet_model.py
@@ -9,10 +9,9 @@ from tqdm import tqdm
from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
-from modules import devices, modelloader, script_callbacks
+from modules import devices, modelloader, script_callbacks, errors
from scunet_model_arch import SCUNet as net
-from modules.errors import print_error
from modules.shared import opts
@@ -39,7 +38,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
scalers.append(scaler_data)
except Exception:
- print_error(f"Error loading ScuNET model: {file}", exc_info=True)
+ errors.report(f"Error loading ScuNET model: {file}", exc_info=True)
if add_model2:
scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
scalers.append(scaler_data2)
diff --git a/modules/api/api.py b/modules/api/api.py
index fbd616a3d..d34ab422c 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -14,9 +14,8 @@ from fastapi.encoders import jsonable_encoder
from secrets import compare_digest
import modules.shared as shared
-from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
+from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing, errors
from modules.api import models
-from modules.errors import print_error
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
@@ -145,7 +144,7 @@ def api_middleware(app: FastAPI):
print(message)
console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
else:
- print_error(message, exc_info=True)
+ errors.report(message, exc_info=True)
return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))
@app.middleware("http")
diff --git a/modules/call_queue.py b/modules/call_queue.py
index dba2a9b4d..53af6d703 100644
--- a/modules/call_queue.py
+++ b/modules/call_queue.py
@@ -2,8 +2,7 @@ import html
import threading
import time
-from modules import shared, progress
-from modules.errors import print_error
+from modules import shared, progress, errors
queue_lock = threading.Lock()
@@ -62,7 +61,7 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
if len(arg_str) > max_debug_str_len:
arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
- print_error(f"{message}\n{arg_str}", exc_info=True)
+ errors.report(f"{message}\n{arg_str}", exc_info=True)
shared.state.job = ""
shared.state.job_count = 0
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index 76143e9f2..4260b016c 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -5,8 +5,7 @@ import torch
import modules.face_restoration
import modules.shared
-from modules import shared, devices, modelloader
-from modules.errors import print_error
+from modules import shared, devices, modelloader, errors
from modules.paths import models_path
# codeformer people made a choice to include modified basicsr library to their project which makes
@@ -105,7 +104,7 @@ def setup_model(dirname):
del output
torch.cuda.empty_cache()
except Exception:
- print_error('Failed inference for CodeFormer', exc_info=True)
+ errors.report('Failed inference for CodeFormer', exc_info=True)
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
restored_face = restored_face.astype('uint8')
@@ -134,6 +133,6 @@ def setup_model(dirname):
shared.face_restorers.append(codeformer)
except Exception:
- print_error("Error setting up CodeFormer", exc_info=True)
+ errors.report("Error setting up CodeFormer", exc_info=True)
# sys.path = stored_sys_path
diff --git a/modules/config_states.py b/modules/config_states.py
index faeaf28bd..6f1ab53fc 100644
--- a/modules/config_states.py
+++ b/modules/config_states.py
@@ -11,8 +11,7 @@ from datetime import datetime
from collections import OrderedDict
import git
-from modules import shared, extensions
-from modules.errors import print_error
+from modules import shared, extensions, errors
from modules.paths_internal import script_path, config_states_dir
@@ -52,7 +51,7 @@ def get_webui_config():
if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path)
except Exception:
- print_error(f"Error reading webui git info from {script_path}", exc_info=True)
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
webui_remote = None
webui_commit_hash = None
@@ -132,7 +131,7 @@ def restore_webui_config(config):
if os.path.exists(os.path.join(script_path, ".git")):
webui_repo = git.Repo(script_path)
except Exception:
- print_error(f"Error reading webui git info from {script_path}", exc_info=True)
+ errors.report(f"Error reading webui git info from {script_path}", exc_info=True)
return
try:
@@ -140,7 +139,7 @@ def restore_webui_config(config):
webui_repo.git.reset(webui_commit_hash, hard=True)
print(f"* Restored webui to commit {webui_commit_hash}.")
except Exception:
- print_error(f"Error restoring webui to commit{webui_commit_hash}")
+ errors.report(f"Error restoring webui to commit{webui_commit_hash}")
def restore_extension_config(config):
diff --git a/modules/errors.py b/modules/errors.py
index 41d8dc933..e408f5008 100644
--- a/modules/errors.py
+++ b/modules/errors.py
@@ -3,11 +3,7 @@ import textwrap
import traceback
-def print_error(
- message: str,
- *,
- exc_info: bool = False,
-) -> None:
+def report(message: str, *, exc_info: bool = False) -> None:
"""
Print an error message to stderr, with optional traceback.
"""
@@ -15,7 +11,7 @@ def print_error(
print("***", line, file=sys.stderr)
if exc_info:
print(textwrap.indent(traceback.format_exc(), " "), file=sys.stderr)
- print("---")
+ print("---", file=sys.stderr)
def print_error_explanation(message):
diff --git a/modules/extensions.py b/modules/extensions.py
index 92f93ad99..8608584b1 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -1,8 +1,7 @@
import os
import threading
-from modules import shared
-from modules.errors import print_error
+from modules import shared, errors
from modules.gitpython_hack import Repo
from modules.paths_internal import extensions_dir, extensions_builtin_dir, script_path # noqa: F401
@@ -54,7 +53,7 @@ class Extension:
if os.path.exists(os.path.join(self.path, ".git")):
repo = Repo(self.path)
except Exception:
- print_error(f"Error reading github repository info from {self.path}", exc_info=True)
+ errors.report(f"Error reading github repository info from {self.path}", exc_info=True)
if repo is None or repo.bare:
self.remote = None
@@ -70,7 +69,7 @@ class Extension:
self.version = self.commit_hash[:8]
except Exception:
- print_error(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
+ errors.report(f"Failed reading extension data from Git repository ({self.name})", exc_info=True)
self.remote = None
self.have_info_from_repo = True
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index d2f647fe3..e239a09d6 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -4,8 +4,7 @@ import facexlib
import gfpgan
import modules.face_restoration
-from modules import paths, shared, devices, modelloader
-from modules.errors import print_error
+from modules import paths, shared, devices, modelloader, errors
model_dir = "GFPGAN"
user_path = None
@@ -111,4 +110,4 @@ def setup_model(dirname):
shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception:
- print_error("Error setting up GFPGAN", exc_info=True)
+ errors.report("Error setting up GFPGAN", exc_info=True)
diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index fcc1ef209..5d12b4490 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -9,8 +9,7 @@ import torch
import tqdm
from einops import rearrange, repeat
from ldm.util import default
-from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint
-from modules.errors import print_error
+from modules import devices, processing, sd_models, shared, sd_samplers, hashes, sd_hijack_checkpoint, errors
from modules.textual_inversion import textual_inversion, logging
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from torch import einsum
@@ -329,7 +328,7 @@ def load_hypernetwork(name):
hypernetwork.load(path)
return hypernetwork
except Exception:
- print_error(f"Error loading hypernetwork {path}", exc_info=True)
+ errors.report(f"Error loading hypernetwork {path}", exc_info=True)
return None
@@ -766,7 +765,7 @@ Last saved image: {html.escape(last_saved_image)}
"""
except Exception:
- print_error("Exception in training hypernetwork", exc_info=True)
+ errors.report("Exception in training hypernetwork", exc_info=True)
finally:
pbar.leave = False
pbar.close()
diff --git a/modules/images.py b/modules/images.py
index 09f728df7..30e9ffc5a 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -16,7 +16,6 @@ import json
import hashlib
from modules import sd_samplers, shared, script_callbacks, errors
-from modules.errors import print_error
from modules.paths_internal import roboto_ttf_file
from modules.shared import opts
@@ -463,7 +462,7 @@ class FilenameGenerator:
replacement = fun(self, *pattern_args)
except Exception:
replacement = None
- print_error(f"Error adding [{pattern}] to filename", exc_info=True)
+ errors.report(f"Error adding [{pattern}] to filename", exc_info=True)
if replacement == NOTHING_AND_SKIP_PREVIOUS_TEXT:
continue
@@ -698,7 +697,7 @@ def read_info_from_image(image):
Negative prompt: {json_info["uc"]}
Steps: {json_info["steps"]}, Sampler: {sampler}, CFG scale: {json_info["scale"]}, Seed: {json_info["seed"]}, Size: {image.width}x{image.height}, Clip skip: 2, ENSD: 31337"""
except Exception:
- print_error("Error parsing NovelAI image generation parameters", exc_info=True)
+ errors.report("Error parsing NovelAI image generation parameters", exc_info=True)
return geninfo, items
diff --git a/modules/interrogate.py b/modules/interrogate.py
index d36e1a5ab..9b2c5b60e 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -11,7 +11,6 @@ from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from modules import devices, paths, shared, lowvram, modelloader, errors
-from modules.errors import print_error
blip_image_eval_size = 384
clip_model_name = 'ViT-L/14'
@@ -216,7 +215,7 @@ class InterrogateModels:
res += f", {match}"
except Exception:
- print_error("Error interrogating", exc_info=True)
+ errors.report("Error interrogating", exc_info=True)
res += ""
self.unload()
diff --git a/modules/launch_utils.py b/modules/launch_utils.py
index 0bf4cb7eb..6e9bb770a 100644
--- a/modules/launch_utils.py
+++ b/modules/launch_utils.py
@@ -7,8 +7,7 @@ import platform
import json
from functools import lru_cache
-from modules import cmd_args
-from modules.errors import print_error
+from modules import cmd_args, errors
from modules.paths_internal import script_path, extensions_dir
args, _ = cmd_args.parser.parse_known_args()
@@ -189,7 +188,7 @@ def run_extension_installer(extension_dir):
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
except Exception as e:
- print_error(str(e))
+ errors.report(str(e))
def list_extensions(settings_file):
@@ -200,7 +199,7 @@ def list_extensions(settings_file):
with open(settings_file, "r", encoding="utf8") as file:
settings = json.load(file)
except Exception:
- print_error("Could not load settings", exc_info=True)
+ errors.report("Could not load settings", exc_info=True)
disabled_extensions = set(settings.get('disabled_extensions', []))
disable_all_extensions = settings.get('disable_all_extensions', 'none')
diff --git a/modules/localization.py b/modules/localization.py
index 9a1df343b..e8f585dab 100644
--- a/modules/localization.py
+++ b/modules/localization.py
@@ -1,7 +1,7 @@
import json
import os
-from modules.errors import print_error
+from modules import errors
localizations = {}
@@ -30,6 +30,6 @@ def localization_js(current_localization_name: str) -> str:
with open(fn, "r", encoding="utf8") as file:
data = json.load(file)
except Exception:
- print_error(f"Error loading localization from {fn}", exc_info=True)
+ errors.report(f"Error loading localization from {fn}", exc_info=True)
return f"window.localization = {json.dumps(data)}"
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index c8d0c64f7..2d27b321c 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -5,10 +5,10 @@ from PIL import Image
from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer
-from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import cmd_opts, opts
-from modules import modelloader
+from modules import modelloader, errors
+
class UpscalerRealESRGAN(Upscaler):
def __init__(self, path):
@@ -35,7 +35,7 @@ class UpscalerRealESRGAN(Upscaler):
self.scalers.append(scaler)
except Exception:
- print_error("Error importing Real-ESRGAN", exc_info=True)
+ errors.report("Error importing Real-ESRGAN", exc_info=True)
self.enable = False
self.scalers = []
@@ -75,7 +75,7 @@ class UpscalerRealESRGAN(Upscaler):
return info
except Exception:
- print_error("Error making Real-ESRGAN models list", exc_info=True)
+ errors.report("Error making Real-ESRGAN models list", exc_info=True)
return None
def load_models(self, _):
@@ -132,4 +132,4 @@ def get_realesrgan_models(scaler):
]
return models
except Exception:
- print_error("Error making Real-ESRGAN models list", exc_info=True)
+ errors.report("Error making Real-ESRGAN models list", exc_info=True)
diff --git a/modules/safe.py b/modules/safe.py
index b596f5658..b1d08a792 100644
--- a/modules/safe.py
+++ b/modules/safe.py
@@ -9,9 +9,10 @@ import _codecs
import zipfile
import re
-from modules.errors import print_error
# PyTorch 1.13 and later have _TypedStorage renamed to TypedStorage
+from modules import errors
+
TypedStorage = torch.storage.TypedStorage if hasattr(torch.storage, 'TypedStorage') else torch.storage._TypedStorage
def encode(*args):
@@ -136,7 +137,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
check_pt(filename, extra_handler)
except pickle.UnpicklingError:
- print_error(
+ errors.report(
f"Error verifying pickled file from {filename}\n"
"-----> !!!! The file is most likely corrupted !!!! <-----\n"
"You can skip this check with --disable-safe-unpickle commandline argument, but that is not going to help you.\n\n",
@@ -144,7 +145,7 @@ def load_with_extra(filename, extra_handler=None, *args, **kwargs):
)
return None
except Exception:
- print_error(
+ errors.report(
f"Error verifying pickled file from {filename}\n"
f"The file may be malicious, so the program is not going to read it.\n"
f"You can skip this check with --disable-safe-unpickle commandline argument.\n\n",
diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py
index 6aa9c3b63..ec1469d0d 100644
--- a/modules/script_callbacks.py
+++ b/modules/script_callbacks.py
@@ -5,11 +5,11 @@ from typing import Optional, Dict, Any
from fastapi import FastAPI
from gradio import Blocks
-from modules.errors import print_error
+from modules import errors
def report_exception(c, job):
- print_error(f"Error executing callback {job} for {c.script}", exc_info=True)
+ errors.report(f"Error executing callback {job} for {c.script}", exc_info=True)
class ImageSaveParams:
diff --git a/modules/script_loading.py b/modules/script_loading.py
index 26efffcb3..306a1f35f 100644
--- a/modules/script_loading.py
+++ b/modules/script_loading.py
@@ -1,7 +1,7 @@
import os
import importlib.util
-from modules.errors import print_error
+from modules import errors
def load_module(path):
@@ -27,4 +27,4 @@ def preload_extensions(extensions_dir, parser):
module.preload(parser)
except Exception:
- print_error(f"Error running preload() for {preload_script}", exc_info=True)
+ errors.report(f"Error running preload() for {preload_script}", exc_info=True)
diff --git a/modules/scripts.py b/modules/scripts.py
index a7168fd12..0970f38e0 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -5,8 +5,7 @@ from collections import namedtuple
import gradio as gr
-from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing
-from modules.errors import print_error
+from modules import shared, paths, script_callbacks, extensions, script_loading, scripts_postprocessing, errors
AlwaysVisible = object()
@@ -264,7 +263,7 @@ def load_scripts():
register_scripts_from_module(script_module)
except Exception:
- print_error(f"Error loading script: {scriptfile.filename}", exc_info=True)
+ errors.report(f"Error loading script: {scriptfile.filename}", exc_info=True)
finally:
sys.path = syspath
@@ -281,7 +280,7 @@ def wrap_call(func, filename, funcname, *args, default=None, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
- print_error(f"Error calling: {filename}/{funcname}", exc_info=True)
+ errors.report(f"Error calling: {filename}/{funcname}", exc_info=True)
return default
@@ -447,7 +446,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.process(p, *script_args)
except Exception:
- print_error(f"Error running process: {script.filename}", exc_info=True)
+ errors.report(f"Error running process: {script.filename}", exc_info=True)
def before_process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
@@ -455,7 +454,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.before_process_batch(p, *script_args, **kwargs)
except Exception:
- print_error(f"Error running before_process_batch: {script.filename}", exc_info=True)
+ errors.report(f"Error running before_process_batch: {script.filename}", exc_info=True)
def process_batch(self, p, **kwargs):
for script in self.alwayson_scripts:
@@ -463,7 +462,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.process_batch(p, *script_args, **kwargs)
except Exception:
- print_error(f"Error running process_batch: {script.filename}", exc_info=True)
+ errors.report(f"Error running process_batch: {script.filename}", exc_info=True)
def postprocess(self, p, processed):
for script in self.alwayson_scripts:
@@ -471,7 +470,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess(p, processed, *script_args)
except Exception:
- print_error(f"Error running postprocess: {script.filename}", exc_info=True)
+ errors.report(f"Error running postprocess: {script.filename}", exc_info=True)
def postprocess_batch(self, p, images, **kwargs):
for script in self.alwayson_scripts:
@@ -479,7 +478,7 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_batch(p, *script_args, images=images, **kwargs)
except Exception:
- print_error(f"Error running postprocess_batch: {script.filename}", exc_info=True)
+ errors.report(f"Error running postprocess_batch: {script.filename}", exc_info=True)
def postprocess_image(self, p, pp: PostprocessImageArgs):
for script in self.alwayson_scripts:
@@ -487,21 +486,21 @@ class ScriptRunner:
script_args = p.script_args[script.args_from:script.args_to]
script.postprocess_image(p, pp, *script_args)
except Exception:
- print_error(f"Error running postprocess_image: {script.filename}", exc_info=True)
+ errors.report(f"Error running postprocess_image: {script.filename}", exc_info=True)
def before_component(self, component, **kwargs):
for script in self.scripts:
try:
script.before_component(component, **kwargs)
except Exception:
- print_error(f"Error running before_component: {script.filename}", exc_info=True)
+ errors.report(f"Error running before_component: {script.filename}", exc_info=True)
def after_component(self, component, **kwargs):
for script in self.scripts:
try:
script.after_component(component, **kwargs)
except Exception:
- print_error(f"Error running after_component: {script.filename}", exc_info=True)
+ errors.report(f"Error running after_component: {script.filename}", exc_info=True)
def reload_sources(self, cache):
for si, script in list(enumerate(self.scripts)):
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index fd186fa26..5f0ff5132 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -9,7 +9,6 @@ from ldm.util import default
from einops import rearrange
from modules import shared, errors, devices, sub_quadratic_attention
-from modules.errors import print_error
from modules.hypernetworks import hypernetwork
import ldm.modules.attention
@@ -139,7 +138,7 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
import xformers.ops
shared.xformers_available = True
except Exception:
- print_error("Cannot import xformers", exc_info=True)
+ errors.report("Cannot import xformers", exc_info=True)
def get_available_vram():
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index b3dcb1406..8da050cab 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -12,9 +12,8 @@ import numpy as np
from PIL import Image, PngImagePlugin
from torch.utils.tensorboard import SummaryWriter
-from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint
+from modules import shared, devices, sd_hijack, processing, sd_models, images, sd_samplers, sd_hijack_checkpoint, errors
import modules.textual_inversion.dataset
-from modules.errors import print_error
from modules.textual_inversion.learn_schedule import LearnRateScheduler
from modules.textual_inversion.image_embedding import embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay
@@ -219,7 +218,7 @@ class EmbeddingDatabase:
self.load_from_file(fullfn, fn)
except Exception:
- print_error(f"Error loading embedding {fn}", exc_info=True)
+ errors.report(f"Error loading embedding {fn}", exc_info=True)
continue
def load_textual_inversion_embeddings(self, force_reload=False):
@@ -643,7 +642,7 @@ Last saved image: {html.escape(last_saved_image)}
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True)
except Exception:
- print_error("Error training embedding", exc_info=True)
+ errors.report("Error training embedding", exc_info=True)
finally:
pbar.leave = False
pbar.close()
diff --git a/modules/ui.py b/modules/ui.py
index fb6b2498d..f361264ce 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -12,8 +12,7 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave
-from modules.errors import print_error
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
@@ -232,7 +231,7 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
except json.decoder.JSONDecodeError:
if gen_info_string:
- print_error(f"Error parsing JSON generation info: {gen_info_string}")
+ errors.report(f"Error parsing JSON generation info: {gen_info_string}")
return [res, gr_show(False)]
@@ -1752,7 +1751,7 @@ def create_ui():
try:
results = modules.extras.run_modelmerger(*args)
except Exception as e:
- print_error("Error loading/saving model file", exc_info=True)
+ errors.report("Error loading/saving model file", exc_info=True)
modules.sd_models.list_models() # to remove the potentially missing models from the list
return [*[gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)], f"Error merging checkpoints: {e}"]
return results
diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index e2ee9d72b..3140ed649 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -11,8 +11,7 @@ import html
import shutil
import errno
-from modules import extensions, shared, paths, config_states
-from modules.errors import print_error
+from modules import extensions, shared, paths, config_states, errors
from modules.paths_internal import config_states_dir
from modules.call_queue import wrap_gradio_gpu_call
@@ -45,7 +44,7 @@ def apply_and_restart(disable_list, update_list, disable_all):
try:
ext.fetch_and_reset_hard()
except Exception:
- print_error(f"Error getting updates for {ext.name}", exc_info=True)
+ errors.report(f"Error getting updates for {ext.name}", exc_info=True)
shared.opts.disabled_extensions = disabled
shared.opts.disable_all_extensions = disable_all
@@ -111,7 +110,7 @@ def check_updates(id_task, disable_list):
if 'FETCH_HEAD' not in str(e):
raise
except Exception:
- print_error(f"Error checking updates for {ext.name}", exc_info=True)
+ errors.report(f"Error checking updates for {ext.name}", exc_info=True)
shared.state.nextjob()
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 4dc24615a..83a2f2204 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -5,8 +5,7 @@ import shlex
import modules.scripts as scripts
import gradio as gr
-from modules import sd_samplers
-from modules.errors import print_error
+from modules import sd_samplers, errors
from modules.processing import Processed, process_images
from modules.shared import state
@@ -135,7 +134,7 @@ class Script(scripts.Script):
try:
args = cmdargs(line)
except Exception:
- print_error(f"Error parsing line {line} as commandline", exc_info=True)
+ errors.report(f"Error parsing line {line} as commandline", exc_info=True)
args = {"prompt": line}
else:
args = {"prompt": line}
From 583fb9f066e1f9aa95a404a9d6446c138d6f6167 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 31 May 2023 20:31:17 +0300
Subject: [PATCH 092/168] change UI reorder setting to multiselect
---
modules/shared.py | 19 +++++--------------
modules/shared_items.py | 17 +++++++++++++++++
modules/ui.py | 6 +++---
3 files changed, 25 insertions(+), 17 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index acec7f185..3c7ae6541 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -43,19 +43,6 @@ restricted_opts = {
"outdir_init_images"
}
-ui_reorder_categories = [
- "inpaint",
- "sampler",
- "checkboxes",
- "hires_fix",
- "dimensions",
- "cfg",
- "seed",
- "batch",
- "override_settings",
- "scripts",
-]
-
# https://huggingface.co/datasets/freddyaboulton/gradio-theme-subdomains/resolve/main/subdomains.json
gradio_hf_hub_themes = [
"gradio/glass",
@@ -487,7 +474,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"quicksettings_list": OptionInfo(["sd_model_checkpoint"], "Quicksettings list", ui_components.DropdownMulti, lambda: {"choices": list(opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that appear at the top of page rather than in settings tab").needs_restart(),
"ui_tab_order": OptionInfo([], "UI tab order", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
"hidden_tabs": OptionInfo([], "Hidden UI tabs", ui_components.DropdownMulti, lambda: {"choices": list(tab_names)}).needs_restart(),
- "ui_reorder": OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order").needs_restart(),
+ "ui_reorder": OptionInfo([], "txt2img/img2img UI item order", ui_components.DropdownMulti, lambda: {"choices": list(shared_items.ui_reorder_categories())}).info("selected items appear first").needs_restart(),
"hires_fix_show_sampler": OptionInfo(False, "Hires fix: show hires sampler selection").needs_restart(),
"hires_fix_show_prompts": OptionInfo(False, "Hires fix: show hires prompt and negative prompt").needs_restart(),
"disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_restart(),
@@ -638,6 +625,10 @@ class Options:
if self.data.get('quicksettings') is not None and self.data.get('quicksettings_list') is None:
self.data['quicksettings_list'] = [i.strip() for i in self.data.get('quicksettings').split(',')]
+ # 1.4.0 ui_reorder
+ if isinstance(self.data.get('ui_reorder'), str):
+ self.data['ui_reorder'] = [i.strip() for i in self.data.get('ui_reorder').split(',')]
+
bad_settings = 0
for k, v in self.data.items():
info = self.data_labels.get(k, None)
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 7f306a06f..27bceb181 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -40,3 +40,20 @@ def refresh_unet_list():
modules.sd_unet.list_unets()
+
+ui_reorder_categories_builtin_items = [
+ "inpaint",
+ "sampler",
+ "checkboxes",
+ "hires_fix",
+ "dimensions",
+ "cfg",
+ "seed",
+ "batch",
+ "override_settings",
+]
+
+
+def ui_reorder_categories():
+ yield from ui_reorder_categories_builtin_items
+ yield "scripts"
diff --git a/modules/ui.py b/modules/ui.py
index f361264ce..35563669d 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -12,7 +12,7 @@ import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors
+from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
from modules.paths import script_path, data_path
@@ -403,9 +403,9 @@ def create_sampler_and_steps_selection(choices, tabname):
def ordered_ui_categories():
- user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder.split(","))}
+ user_order = {x.strip(): i * 2 + 1 for i, x in enumerate(shared.opts.ui_reorder)}
- for _, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
+ for _, category in sorted(enumerate(shared_items.ui_reorder_categories()), key=lambda x: user_order.get(x[1], x[0] * 2 + 0)):
yield category
From df02498d03e4296b7d7581aff69571a49be1d27a Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 31 May 2023 22:40:09 +0300
Subject: [PATCH 093/168] add an option to show selected setting in main
txt2img/img2img UI split some code from ui.py into ui_settings.py
ui_gradio_edxtensions.py add before_process callback for scripts add ability
for alwayson scripts to specify section and let user reorder those sections
---
.../scripts/extra_options_section.py | 48 +++
modules/processing.py | 6 +-
modules/scripts.py | 144 ++++---
modules/shared_items.py | 10 +
modules/ui.py | 351 ++----------------
modules/ui_common.py | 23 ++
modules/ui_gradio_extensions.py | 69 ++++
modules/ui_settings.py | 263 +++++++++++++
8 files changed, 526 insertions(+), 388 deletions(-)
create mode 100644 extensions-builtin/extra-options-section/scripts/extra_options_section.py
create mode 100644 modules/ui_gradio_extensions.py
create mode 100644 modules/ui_settings.py
diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
new file mode 100644
index 000000000..17f841844
--- /dev/null
+++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py
@@ -0,0 +1,48 @@
+import gradio as gr
+from modules import scripts, shared, ui_components, ui_settings
+from modules.ui_components import FormColumn
+
+
+class ExtraOptionsSection(scripts.Script):
+ section = "extra_options"
+
+ def __init__(self):
+ self.comps = None
+ self.setting_names = None
+
+ def title(self):
+ return "Extra options"
+
+ def show(self, is_img2img):
+ return scripts.AlwaysVisible
+
+ def ui(self, is_img2img):
+ self.comps = []
+ self.setting_names = []
+
+ with gr.Blocks() as interface:
+ with gr.Accordion("Options", open=False) if shared.opts.extra_options_accordion and len(shared.opts.extra_options) > 0 else gr.Group(), gr.Row():
+ for setting_name in shared.opts.extra_options:
+ with FormColumn():
+ comp = ui_settings.create_setting_component(setting_name)
+
+ self.comps.append(comp)
+ self.setting_names.append(setting_name)
+
+ def get_settings_values():
+ return [ui_settings.get_value_for_setting(key) for key in self.setting_names]
+
+ interface.load(fn=get_settings_values, inputs=[], outputs=self.comps, queue=False, show_progress=False)
+
+ return self.comps
+
+ def before_process(self, p, *args):
+ for name, value in zip(self.setting_names, args):
+ if name not in p.override_settings:
+ p.override_settings[name] = value
+
+
+shared.options_templates.update(shared.options_section(('ui', "User interface"), {
+ "extra_options": shared.OptionInfo([], "Options in main UI", ui_components.DropdownMulti, lambda: {"choices": list(shared.opts.data_labels.keys())}).js("info", "settingsHintsShowQuicksettings").info("setting entries that also appear in txt2img/img2img interfaces").needs_restart(),
+ "extra_options_accordion": shared.OptionInfo(False, "Place options in main UI into an accordion")
+}))
diff --git a/modules/processing.py b/modules/processing.py
index f628d88bd..baa9b2782 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -588,11 +588,15 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
def process_images(p: StableDiffusionProcessing) -> Processed:
+ if p.scripts is not None:
+ p.scripts.before_process(p)
+
stored_opts = {k: opts.data[k] for k in p.override_settings.keys()}
try:
# if no checkpoint override or the override checkpoint can't be found, remove override entry and load opts checkpoint
- if sd_models.checkpoint_alisases.get(p.override_settings.get('sd_model_checkpoint')) is None:
+ override_checkpoint = p.override_settings.get('sd_model_checkpoint')
+ if override_checkpoint is not None and sd_models.checkpoint_alisases.get(override_checkpoint) is None:
p.override_settings.pop('sd_model_checkpoint', None)
sd_models.reload_model_weights()
diff --git a/modules/scripts.py b/modules/scripts.py
index 0970f38e0..b901862dc 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -19,6 +19,9 @@ class Script:
name = None
"""script's internal name derived from title"""
+ section = None
+ """name of UI section that the script's controls will be placed into"""
+
filename = None
args_from = None
args_to = None
@@ -81,6 +84,15 @@ class Script:
pass
+ def before_process(self, p, *args):
+ """
+ This function is called very early before processing begins for AlwaysVisible scripts.
+ You can modify the processing object (p) here, inject hooks, etc.
+ args contains all values returned by components from ui()
+ """
+
+ pass
+
def process(self, p, *args):
"""
This function is called before processing begins for AlwaysVisible scripts.
@@ -293,6 +305,7 @@ class ScriptRunner:
self.titles = []
self.infotext_fields = []
self.paste_field_names = []
+ self.inputs = [None]
def initialize_scripts(self, is_img2img):
from modules import scripts_auto_postprocessing
@@ -320,69 +333,73 @@ class ScriptRunner:
self.scripts.append(script)
self.selectable_scripts.append(script)
- def setup_ui(self):
+ def create_script_ui(self, script):
import modules.api.models as api_models
+ script.args_from = len(self.inputs)
+ script.args_to = len(self.inputs)
+
+ controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img)
+
+ if controls is None:
+ return
+
+ script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower()
+ api_args = []
+
+ for control in controls:
+ control.custom_script_source = os.path.basename(script.filename)
+
+ arg_info = api_models.ScriptArg(label=control.label or "")
+
+ for field in ("value", "minimum", "maximum", "step", "choices"):
+ v = getattr(control, field, None)
+ if v is not None:
+ setattr(arg_info, field, v)
+
+ api_args.append(arg_info)
+
+ script.api_info = api_models.ScriptInfo(
+ name=script.name,
+ is_img2img=script.is_img2img,
+ is_alwayson=script.alwayson,
+ args=api_args,
+ )
+
+ if script.infotext_fields is not None:
+ self.infotext_fields += script.infotext_fields
+
+ if script.paste_field_names is not None:
+ self.paste_field_names += script.paste_field_names
+
+ self.inputs += controls
+ script.args_to = len(self.inputs)
+
+ def setup_ui_for_section(self, section, scriptlist=None):
+ if scriptlist is None:
+ scriptlist = self.alwayson_scripts
+
+ for script in scriptlist:
+ if script.alwayson and script.section != section:
+ continue
+
+ with gr.Group(visible=script.alwayson) as group:
+ self.create_script_ui(script)
+
+ script.group = group
+
+ def prepare_ui(self):
+ self.inputs = [None]
+
+ def setup_ui(self):
self.titles = [wrap_call(script.title, script.filename, "title") or f"{script.filename} [error]" for script in self.selectable_scripts]
- inputs = [None]
- inputs_alwayson = [True]
-
- def create_script_ui(script, inputs, inputs_alwayson):
- script.args_from = len(inputs)
- script.args_to = len(inputs)
-
- controls = wrap_call(script.ui, script.filename, "ui", script.is_img2img)
-
- if controls is None:
- return
-
- script.name = wrap_call(script.title, script.filename, "title", default=script.filename).lower()
- api_args = []
-
- for control in controls:
- control.custom_script_source = os.path.basename(script.filename)
-
- arg_info = api_models.ScriptArg(label=control.label or "")
-
- for field in ("value", "minimum", "maximum", "step", "choices"):
- v = getattr(control, field, None)
- if v is not None:
- setattr(arg_info, field, v)
-
- api_args.append(arg_info)
-
- script.api_info = api_models.ScriptInfo(
- name=script.name,
- is_img2img=script.is_img2img,
- is_alwayson=script.alwayson,
- args=api_args,
- )
-
- if script.infotext_fields is not None:
- self.infotext_fields += script.infotext_fields
-
- if script.paste_field_names is not None:
- self.paste_field_names += script.paste_field_names
-
- inputs += controls
- inputs_alwayson += [script.alwayson for _ in controls]
- script.args_to = len(inputs)
-
- for script in self.alwayson_scripts:
- with gr.Group() as group:
- create_script_ui(script, inputs, inputs_alwayson)
-
- script.group = group
+ self.setup_ui_for_section(None)
dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index")
- inputs[0] = dropdown
+ self.inputs[0] = dropdown
- for script in self.selectable_scripts:
- with gr.Group(visible=False) as group:
- create_script_ui(script, inputs, inputs_alwayson)
-
- script.group = group
+ self.setup_ui_for_section(None, self.selectable_scripts)
def select_script(script_index):
selected_script = self.selectable_scripts[script_index - 1] if script_index>0 else None
@@ -407,6 +424,7 @@ class ScriptRunner:
)
self.script_load_ctr = 0
+
def onload_script_visibility(params):
title = params.get('Script', None)
if title:
@@ -417,10 +435,10 @@ class ScriptRunner:
else:
return gr.update(visible=False)
- self.infotext_fields.append( (dropdown, lambda x: gr.update(value=x.get('Script', 'None'))) )
- self.infotext_fields.extend( [(script.group, onload_script_visibility) for script in self.selectable_scripts] )
+ self.infotext_fields.append((dropdown, lambda x: gr.update(value=x.get('Script', 'None'))))
+ self.infotext_fields.extend([(script.group, onload_script_visibility) for script in self.selectable_scripts])
- return inputs
+ return self.inputs
def run(self, p, *args):
script_index = args[0]
@@ -440,6 +458,14 @@ class ScriptRunner:
return processed
+ def before_process(self, p):
+ for script in self.alwayson_scripts:
+ try:
+ script_args = p.script_args[script.args_from:script.args_to]
+ script.before_process(p, *script_args)
+ except Exception:
+ errors.report(f"Error running before_process: {script.filename}", exc_info=True)
+
def process(self, p):
for script in self.alwayson_scripts:
try:
diff --git a/modules/shared_items.py b/modules/shared_items.py
index 27bceb181..89792e88a 100644
--- a/modules/shared_items.py
+++ b/modules/shared_items.py
@@ -55,5 +55,15 @@ ui_reorder_categories_builtin_items = [
def ui_reorder_categories():
+ from modules import scripts
+
yield from ui_reorder_categories_builtin_items
+
+ sections = {}
+ for script in scripts.scripts_txt2img.scripts + scripts.scripts_img2img.scripts:
+ if isinstance(script.section, str):
+ sections[script.section] = 1
+
+ yield from sections
+
yield "scripts"
diff --git a/modules/ui.py b/modules/ui.py
index 35563669d..4e0cf7763 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -6,15 +6,17 @@ from functools import reduce
import warnings
import gradio as gr
-import gradio.routes
import gradio.utils
import numpy as np
from PIL import Image, PngImagePlugin # noqa: F401
from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call
-from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items
+from modules import sd_hijack, sd_models, script_callbacks, ui_extensions, deepbooru, sd_vae, extra_networks, ui_common, ui_postprocessing, progress, ui_loadsave, errors, shared_items, ui_settings
from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML
-from modules.paths import script_path, data_path
+from modules.paths import script_path
+from modules.ui_common import create_refresh_button
+from modules.ui_gradio_extensions import reload_javascript
+
from modules.shared import opts, cmd_opts
@@ -34,6 +36,8 @@ import modules.hypernetworks.ui
from modules.generation_parameters_copypaste import image_from_url_text
import modules.extras
+create_setting_component = ui_settings.create_setting_component
+
warnings.filterwarnings("default" if opts.show_warnings else "ignore", category=UserWarning)
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
@@ -366,25 +370,6 @@ def apply_setting(key, value):
return getattr(opts, key)
-def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
- def refresh():
- refresh_method()
- args = refreshed_args() if callable(refreshed_args) else refreshed_args
-
- for k, v in args.items():
- setattr(refresh_component, k, v)
-
- return gr.update(**(args or {}))
-
- refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
- refresh_button.click(
- fn=refresh,
- inputs=[],
- outputs=[refresh_component]
- )
- return refresh_button
-
-
def create_output_panel(tabname, outdir):
return ui_common.create_output_panel(tabname, outdir)
@@ -409,16 +394,6 @@ def ordered_ui_categories():
yield category
-def get_value_for_setting(key):
- value = getattr(opts, key)
-
- info = opts.data_labels[key]
- args = info.component_args() if callable(info.component_args) else info.component_args or {}
- args = {k: v for k, v in args.items() if k not in {'precision'}}
-
- return gr.update(value=value, **args)
-
-
def create_override_settings_dropdown(tabname, row):
dropdown = gr.Dropdown([], label="Override settings", visible=False, elem_id=f"{tabname}_override_settings", multiselect=True)
@@ -454,6 +429,8 @@ def create_ui():
with gr.Row().style(equal_height=False):
with gr.Column(variant='compact', elem_id="txt2img_settings"):
+ modules.scripts.scripts_txt2img.prepare_ui()
+
for category in ordered_ui_categories():
if category == "sampler":
steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img")
@@ -522,6 +499,9 @@ def create_ui():
with FormGroup(elem_id="txt2img_script_container"):
custom_inputs = modules.scripts.scripts_txt2img.setup_ui()
+ else:
+ modules.scripts.scripts_txt2img.setup_ui_for_section(category)
+
hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y]
for component in hr_resolution_preview_inputs:
@@ -778,6 +758,8 @@ def create_ui():
with FormRow():
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize")
+ modules.scripts.scripts_img2img.prepare_ui()
+
for category in ordered_ui_categories():
if category == "sampler":
steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img")
@@ -887,6 +869,8 @@ def create_ui():
inputs=[],
outputs=[inpaint_controls, mask_alpha],
)
+ else:
+ modules.scripts.scripts_img2img.setup_ui_for_section(category)
img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples)
@@ -1460,195 +1444,10 @@ def create_ui():
outputs=[],
)
- def create_setting_component(key, is_quicksettings=False):
- def fun():
- return opts.data[key] if key in opts.data else opts.data_labels[key].default
-
- info = opts.data_labels[key]
- t = type(info.default)
-
- args = info.component_args() if callable(info.component_args) else info.component_args
-
- if info.component is not None:
- comp = info.component
- elif t == str:
- comp = gr.Textbox
- elif t == int:
- comp = gr.Number
- elif t == bool:
- comp = gr.Checkbox
- else:
- raise Exception(f'bad options item type: {t} for key {key}')
-
- elem_id = f"setting_{key}"
-
- if info.refresh is not None:
- if is_quicksettings:
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}")
- else:
- with FormRow():
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
- create_refresh_button(res, info.refresh, info.component_args, f"refresh_{key}")
- else:
- res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {}))
-
- return res
-
loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file)
- components = []
- component_dict = {}
- shared.settings_components = component_dict
-
- script_callbacks.ui_settings_callback()
- opts.reorder()
-
- def run_settings(*args):
- changed = []
-
- for key, value, comp in zip(opts.data_labels.keys(), args, components):
- assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}"
-
- for key, value, comp in zip(opts.data_labels.keys(), args, components):
- if comp == dummy_component:
- continue
-
- if opts.set(key, value):
- changed.append(key)
-
- try:
- opts.save(shared.config_filename)
- except RuntimeError:
- return opts.dumpjson(), f'{len(changed)} settings changed without save: {", ".join(changed)}.'
- return opts.dumpjson(), f'{len(changed)} settings changed{": " if len(changed) > 0 else ""}{", ".join(changed)}.'
-
- def run_settings_single(value, key):
- if not opts.same_type(value, opts.data_labels[key].default):
- return gr.update(visible=True), opts.dumpjson()
-
- if not opts.set(key, value):
- return gr.update(value=getattr(opts, key)), opts.dumpjson()
-
- opts.save(shared.config_filename)
-
- return get_value_for_setting(key), opts.dumpjson()
-
- with gr.Blocks(analytics_enabled=False) as settings_interface:
- with gr.Row():
- with gr.Column(scale=6):
- settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit")
- with gr.Column():
- restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio")
-
- result = gr.HTML(elem_id="settings_result")
-
- quicksettings_names = opts.quicksettings_list
- quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'}
-
- quicksettings_list = []
-
- previous_section = None
- current_tab = None
- current_row = None
- with gr.Tabs(elem_id="settings"):
- for i, (k, item) in enumerate(opts.data_labels.items()):
- section_must_be_skipped = item.section[0] is None
-
- if previous_section != item.section and not section_must_be_skipped:
- elem_id, text = item.section
-
- if current_tab is not None:
- current_row.__exit__()
- current_tab.__exit__()
-
- gr.Group()
- current_tab = gr.TabItem(elem_id=f"settings_{elem_id}", label=text)
- current_tab.__enter__()
- current_row = gr.Column(variant='compact')
- current_row.__enter__()
-
- previous_section = item.section
-
- if k in quicksettings_names and not shared.cmd_opts.freeze_settings:
- quicksettings_list.append((i, k, item))
- components.append(dummy_component)
- elif section_must_be_skipped:
- components.append(dummy_component)
- else:
- component = create_setting_component(k)
- component_dict[k] = component
- components.append(component)
-
- if current_tab is not None:
- current_row.__exit__()
- current_tab.__exit__()
-
- with gr.TabItem("Defaults", id="defaults", elem_id="settings_tab_defaults"):
- loadsave.create_ui()
-
- with gr.TabItem("Actions", id="actions", elem_id="settings_tab_actions"):
- request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications")
- download_localization = gr.Button(value='Download localization template', elem_id="download_localization")
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies")
- with gr.Row():
- unload_sd_model = gr.Button(value='Unload SD checkpoint to free VRAM', elem_id="sett_unload_sd_model")
- reload_sd_model = gr.Button(value='Reload the last SD checkpoint back into VRAM', elem_id="sett_reload_sd_model")
-
- with gr.TabItem("Licenses", id="licenses", elem_id="settings_tab_licenses"):
- gr.HTML(shared.html("licenses.html"), elem_id="licenses")
-
- gr.Button(value="Show all pages", elem_id="settings_show_all_pages")
-
-
- def unload_sd_weights():
- modules.sd_models.unload_model_weights()
-
- def reload_sd_weights():
- modules.sd_models.reload_model_weights()
-
- unload_sd_model.click(
- fn=unload_sd_weights,
- inputs=[],
- outputs=[]
- )
-
- reload_sd_model.click(
- fn=reload_sd_weights,
- inputs=[],
- outputs=[]
- )
-
- request_notifications.click(
- fn=lambda: None,
- inputs=[],
- outputs=[],
- _js='function(){}'
- )
-
- download_localization.click(
- fn=lambda: None,
- inputs=[],
- outputs=[],
- _js='download_localization'
- )
-
- def reload_scripts():
- modules.scripts.reload_script_body_only()
- reload_javascript() # need to refresh the html page
-
- reload_script_bodies.click(
- fn=reload_scripts,
- inputs=[],
- outputs=[]
- )
-
- restart_gradio.click(
- fn=shared.state.request_restart,
- _js='restart_reload',
- inputs=[],
- outputs=[],
- )
+ settings = ui_settings.UiSettings()
+ settings.create_ui(loadsave, dummy_component)
interfaces = [
(txt2img_interface, "txt2img", "txt2img"),
@@ -1660,7 +1459,7 @@ def create_ui():
]
interfaces += script_callbacks.ui_tabs_callback()
- interfaces += [(settings_interface, "Settings", "settings")]
+ interfaces += [(settings.interface, "Settings", "settings")]
extensions_interface = ui_extensions.create_ui()
interfaces += [(extensions_interface, "Extensions", "extensions")]
@@ -1670,10 +1469,7 @@ def create_ui():
shared.tab_names.append(label)
with gr.Blocks(theme=shared.gradio_theme, analytics_enabled=False, title="Stable Diffusion") as demo:
- with gr.Row(elem_id="quicksettings", variant="compact"):
- for _i, k, _item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])):
- component = create_setting_component(k, is_quicksettings=True)
- component_dict[k] = component
+ settings.add_quicksettings()
parameters_copypaste.connect_paste_params_buttons()
@@ -1704,49 +1500,12 @@ def create_ui():
footer = footer.format(versions=versions_html())
gr.HTML(footer, elem_id="footer")
- text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False)
- settings_submit.click(
- fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]),
- inputs=components,
- outputs=[text_settings, result],
- )
-
- for _i, k, _item in quicksettings_list:
- component = component_dict[k]
- info = opts.data_labels[k]
-
- change_handler = component.release if hasattr(component, 'release') else component.change
- change_handler(
- fn=lambda value, k=k: run_settings_single(value, key=k),
- inputs=[component],
- outputs=[component, text_settings],
- show_progress=info.refresh is not None,
- )
+ settings.add_functionality(demo)
update_image_cfg_scale_visibility = lambda: gr.update(visible=shared.sd_model and shared.sd_model.cond_stage_key == "edit")
- text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
+ settings.text_settings.change(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
demo.load(fn=update_image_cfg_scale_visibility, inputs=[], outputs=[image_cfg_scale])
- button_set_checkpoint = gr.Button('Change checkpoint', elem_id='change_checkpoint', visible=False)
- button_set_checkpoint.click(
- fn=lambda value, _: run_settings_single(value, key='sd_model_checkpoint'),
- _js="function(v){ var res = desiredCheckpointName; desiredCheckpointName = ''; return [res || v, null]; }",
- inputs=[component_dict['sd_model_checkpoint'], dummy_component],
- outputs=[component_dict['sd_model_checkpoint'], text_settings],
- )
-
- component_keys = [k for k in opts.data_labels.keys() if k in component_dict]
-
- def get_settings_values():
- return [get_value_for_setting(key) for key in component_keys]
-
- demo.load(
- fn=get_settings_values,
- inputs=[],
- outputs=[component_dict[k] for k in component_keys],
- queue=False,
- )
-
def modelmerger(*args):
try:
results = modules.extras.run_modelmerger(*args)
@@ -1779,7 +1538,7 @@ def create_ui():
primary_model_name,
secondary_model_name,
tertiary_model_name,
- component_dict['sd_model_checkpoint'],
+ settings.component_dict['sd_model_checkpoint'],
modelmerger_result,
]
)
@@ -1793,70 +1552,6 @@ def create_ui():
return demo
-def webpath(fn):
- if fn.startswith(script_path):
- web_path = os.path.relpath(fn, script_path).replace('\\', '/')
- else:
- web_path = os.path.abspath(fn)
-
- return f'file={web_path}?{os.path.getmtime(fn)}'
-
-
-def javascript_html():
- # Ensure localization is in `window` before scripts
- head = f'\n'
-
- script_js = os.path.join(script_path, "script.js")
- head += f'\n'
-
- for script in modules.scripts.list_scripts("javascript", ".js"):
- head += f'\n'
-
- for script in modules.scripts.list_scripts("javascript", ".mjs"):
- head += f'\n'
-
- if cmd_opts.theme:
- head += f'\n'
-
- return head
-
-
-def css_html():
- head = ""
-
- def stylesheet(fn):
- return f''
-
- for cssfile in modules.scripts.list_files_with_name("style.css"):
- if not os.path.isfile(cssfile):
- continue
-
- head += stylesheet(cssfile)
-
- if os.path.exists(os.path.join(data_path, "user.css")):
- head += stylesheet(os.path.join(data_path, "user.css"))
-
- return head
-
-
-def reload_javascript():
- js = javascript_html()
- css = css_html()
-
- def template_response(*args, **kwargs):
- res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'', f'{js}'.encode("utf8"))
- res.body = res.body.replace(b'