From bdc90837987ed8919dd611fd01553b0c170ded5c Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Thu, 27 Oct 2022 15:20:15 -0400 Subject: [PATCH 01/14] Add a barebones interrogate API --- launch.py | 2 +- modules/api/api.py | 25 ++++++++++++++++++++++++- modules/api/models.py | 13 ++++++++++++- webui.py | 12 ++++++++---- 4 files changed, 45 insertions(+), 7 deletions(-) diff --git a/launch.py b/launch.py index 8affd4109..ae79b4a42 100644 --- a/launch.py +++ b/launch.py @@ -198,7 +198,7 @@ def prepare_enviroment(): def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") import webui - webui.webui() + webui.webui_or_api() if __name__ == "__main__": diff --git a/modules/api/api.py b/modules/api/api.py index 6e9d6097b..eabdb7b8a 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,4 +1,4 @@ -from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI +from modules.api.models import StableDiffusionTxt2ImgProcessingAPI, StableDiffusionImg2ImgProcessingAPI, InterrogateAPI from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.sd_samplers import all_samplers from modules.extras import run_pnginfo @@ -25,6 +25,11 @@ class ImageToImageResponse(BaseModel): parameters: Json info: Json +class InterrogateResponse(BaseModel): + caption: str = Field(default=None, title="Caption", description="The generated caption for the image.") + parameters: Json + info: Json + class Api: def __init__(self, app, queue_lock): @@ -33,6 +38,7 @@ class Api: self.queue_lock = queue_lock self.app.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"]) self.app.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"]) + self.app.add_api_route("/sdapi/v1/interrogate", self.interrogateapi, methods=["POST"]) def __base64_to_image(self, base64_string): # if has a comma, deal with prefix @@ -118,6 +124,23 @@ class Api: return ImageToImageResponse(images=b64images, parameters=json.dumps(vars(img2imgreq)), info=processed.js()) + def interrogateapi(self, interrogatereq: InterrogateAPI): + image_b64 = interrogatereq.image + if image_b64 is None: + raise HTTPException(status_code=404, detail="Image not found") + + populate = interrogatereq.copy(update={ # Override __init__ params + } + ) + + img = self.__base64_to_image(image_b64) + + # Override object param + with self.queue_lock: + processed = shared.interrogator.interrogate(img) + + return InterrogateResponse(caption=processed, parameters=json.dumps(vars(interrogatereq)), info=None) + def extrasapi(self): raise NotImplementedError diff --git a/modules/api/models.py b/modules/api/models.py index 079e33d9b..8be64749a 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -63,7 +63,12 @@ class PydanticModelGenerator: self._model_name = model_name - self._class_data = merge_class_params(class_instance) + + if class_instance is not None: + self._class_data = merge_class_params(class_instance) + else: + self._class_data = {} + self._model_def = [ ModelDef( field=underscore(k), @@ -105,4 +110,10 @@ StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] +).generate_model() + +InterrogateAPI = PydanticModelGenerator( + "Interrogate", + None, + [{"key": "image", "type": str, "default": None}] ).generate_model() \ No newline at end of file diff --git a/webui.py b/webui.py index ade7334bf..7a4bb2a23 100644 --- a/webui.py +++ b/webui.py @@ -146,7 +146,9 @@ def webui(): app.add_middleware(GZipMiddleware, minimum_size=1000) if (launch_api): - create_api(app) + print('launching API') + api = create_api(app) + api.launch(server_name="0.0.0.0" if cmd_opts.listen else "127.0.0.1", port=cmd_opts.port if cmd_opts.port else 7861) wait_on_server(demo) @@ -161,10 +163,12 @@ def webui(): print('Restarting Gradio') - -task = [] -if __name__ == "__main__": +def webui_or_api(): if cmd_opts.nowebui: api_only() else: webui() + +task = [] +if __name__ == "__main__": + webui_or_api() \ No newline at end of file From df6a7ebfe8cc4da23861e3e2583693bb7808d573 Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Mon, 31 Oct 2022 11:50:33 -0400 Subject: [PATCH 02/14] revert things to master --- launch.py | 2 +- modules/api/api.py | 2 -- modules/api/models.py | 6 +----- 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/launch.py b/launch.py index fe9cef3cc..958336f21 100644 --- a/launch.py +++ b/launch.py @@ -220,7 +220,7 @@ def tests(argv): def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") import webui - webui.webui_or_api() + webui.webui() if __name__ == "__main__": diff --git a/modules/api/api.py b/modules/api/api.py index c510a8334..6a903e4c0 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -117,8 +117,6 @@ class Api: return ImageToImageResponse(images=b64images, parameters=vars(img2imgreq), info=processed.js()) - def extrasapi(self): - raise NotImplementedError def extras_single_image_api(self, req: ExtrasSingleImageRequest): reqDict = setUpscalers(req) diff --git a/modules/api/models.py b/modules/api/models.py index 035a7179c..82ab29b84 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -64,11 +64,7 @@ class PydanticModelGenerator: self._model_name = model_name - - if class_instance is not None: - self._class_data = merge_class_params(class_instance) - else: - self._class_data = {} + self._class_data = merge_class_params(class_instance) self._model_def = [ ModelDef( From 3f3d14afd5abd07d3843370dc1c28be299dbdbab Mon Sep 17 00:00:00 2001 From: Roy Shilkrot Date: Mon, 31 Oct 2022 11:51:21 -0400 Subject: [PATCH 03/14] nix unused thing --- modules/api/api.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 6a903e4c0..536e3f161 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -182,10 +182,6 @@ class Api: if image_b64 is None: raise HTTPException(status_code=404, detail="Image not found") - populate = interrogatereq.copy(update={ # Override __init__ params - } - ) - img = self.__base64_to_image(image_b64) # Override object param From 6603f63b7b8af39ab815091460c5c2a12d3f253e Mon Sep 17 00:00:00 2001 From: Han Lin Date: Sun, 6 Nov 2022 11:08:20 +0800 Subject: [PATCH 04/14] Fixes LDSR upscaler producing black bars --- modules/ldsr_model_arch.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/ldsr_model_arch.py b/modules/ldsr_model_arch.py index 14db50766..90e0a2f06 100644 --- a/modules/ldsr_model_arch.py +++ b/modules/ldsr_model_arch.py @@ -101,8 +101,8 @@ class LDSR: down_sample_rate = target_scale / 4 wd = width_og * down_sample_rate hd = height_og * down_sample_rate - width_downsampled_pre = int(wd) - height_downsampled_pre = int(hd) + width_downsampled_pre = int(np.ceil(wd)) + height_downsampled_pre = int(np.ceil(hd)) if down_sample_rate != 1: print( @@ -110,7 +110,12 @@ class LDSR: im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) else: print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") - logs = self.run(model["model"], im_og, diffusion_steps, eta) + + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts + pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size + im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) + + logs = self.run(model["model"], im_padded, diffusion_steps, eta) sample = logs["sample"] sample = sample.detach().cpu() @@ -120,6 +125,9 @@ class LDSR: sample = np.transpose(sample, (0, 2, 3, 1)) a = Image.fromarray(sample[0]) + # remove padding + a = a.crop((0, 0) + tuple(np.array(im_og.size) * 4)) + del model gc.collect() torch.cuda.empty_cache() From a2a1a2f7270a865175f64475229838a8d64509ea Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 09:02:25 +0300 Subject: [PATCH 05/14] add ability to create extensions that add localizations --- javascript/ui.js | 2 ++ modules/localization.py | 6 ++++++ modules/scripts.py | 1 - modules/shared.py | 2 -- modules/ui.py | 3 +-- webui.py | 9 +++++---- 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 7e1164658..95cfd106f 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -208,4 +208,6 @@ function update_token_counter(button_id) { function restart_reload(){ document.body.innerHTML='

Reloading...

'; setTimeout(function(){location.reload()},2000) + + return [] } diff --git a/modules/localization.py b/modules/localization.py index b1810cda2..f6a6f2fbd 100644 --- a/modules/localization.py +++ b/modules/localization.py @@ -3,6 +3,7 @@ import os import sys import traceback + localizations = {} @@ -16,6 +17,11 @@ def list_localizations(dirname): localizations[fn] = os.path.join(dirname, file) + from modules import scripts + for file in scripts.list_scripts("localizations", ".json"): + fn, ext = os.path.splitext(file.filename) + localizations[fn] = file.path + def localization_js(current_localization_name): fn = localizations.get(current_localization_name, None) diff --git a/modules/scripts.py b/modules/scripts.py index 366c90d7c..637b23292 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -3,7 +3,6 @@ import sys import traceback from collections import namedtuple -import modules.ui as ui import gradio as gr from modules.processing import StableDiffusionProcessing diff --git a/modules/shared.py b/modules/shared.py index 70b998ff3..e8bacd3c9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -221,8 +221,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] -localization.list_localizations(cmd_opts.localizations_dir) - def realesrgan_models_names(): import modules.realesrgan_model diff --git a/modules/ui.py b/modules/ui.py index 76ca9b071..23643c22d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1563,11 +1563,10 @@ def create_ui(wrap_gradio_gpu_call): shared.state.need_restart = True restart_gradio.click( - fn=request_restart, + _js='restart_reload', inputs=[], outputs=[], - _js='restart_reload' ) if column is not None: diff --git a/webui.py b/webui.py index a5a520f0c..4342a962c 100644 --- a/webui.py +++ b/webui.py @@ -10,7 +10,7 @@ from fastapi.middleware.gzip import GZipMiddleware from modules.paths import script_path -from modules import devices, sd_samplers, upscaler, extensions +from modules import devices, sd_samplers, upscaler, extensions, localization import modules.codeformer_model as codeformer import modules.extras import modules.face_restoration @@ -28,9 +28,7 @@ import modules.txt2img import modules.script_callbacks import modules.ui -from modules import devices from modules import modelloader -from modules.paths import script_path from modules.shared import cmd_opts import modules.hypernetworks.hypernetwork @@ -64,6 +62,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): def initialize(): extensions.list_extensions() + localization.list_localizations(cmd_opts.localizations_dir) if cmd_opts.ui_debug_mode: shared.sd_upscalers = upscaler.UpscalerLanczos().scalers @@ -99,7 +98,6 @@ def initialize(): else: print("Running with TLS") - # make the program just exit at ctrl+c without waiting for anything def sigint_handler(sig, frame): print(f'Interrupted with signal {sig} in {frame}') @@ -185,6 +183,9 @@ def webui(): print('Reloading extensions') extensions.list_extensions() + + localization.list_localizations(cmd_opts.localizations_dir) + print('Reloading custom scripts') modules.scripts.reload_scripts() print('Reloading modules: modules.ui') From e5b4e3f820cd09e751f1d168ab05d606d078a0d9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 10:12:53 +0300 Subject: [PATCH 06/14] add tags to extensions, and ability to filter out tags list changed Settings keys in UI do not print VRAM/etc stats everywhere but in calls that use GPU --- modules/ui.py | 25 ++++++++++-------- modules/ui_extensions.py | 55 ++++++++++++++++++++++++++++++++-------- style.css | 5 ++++ webui.py | 2 +- 4 files changed, 64 insertions(+), 23 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 23643c22d..c946ad592 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -174,9 +174,9 @@ def save_pil_to_file(pil_image, dir=None): gr.processing_utils.save_pil_to_file = save_pil_to_file -def wrap_gradio_call(func, extra_outputs=None): +def wrap_gradio_call(func, extra_outputs=None, add_stats=False): def f(*args, extra_outputs_array=extra_outputs, **kwargs): - run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled + run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled and add_stats if run_memmon: shared.mem_mon.monitor() t = time.perf_counter() @@ -203,11 +203,18 @@ def wrap_gradio_call(func, extra_outputs=None): res = extra_outputs_array + [f"
{plaintext_to_html(type(e).__name__+': '+str(e))}
"] + shared.state.skipped = False + shared.state.interrupted = False + shared.state.job_count = 0 + + if not add_stats: + return tuple(res) + elapsed = time.perf_counter() - t elapsed_m = int(elapsed // 60) elapsed_s = elapsed % 60 elapsed_text = f"{elapsed_s:.2f}s" - if (elapsed_m > 0): + if elapsed_m > 0: elapsed_text = f"{elapsed_m}m "+elapsed_text if run_memmon: @@ -225,10 +232,6 @@ def wrap_gradio_call(func, extra_outputs=None): # last item is always HTML res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" - shared.state.skipped = False - shared.state.interrupted = False - shared.state.job_count = 0 - return tuple(res) return f @@ -1436,7 +1439,7 @@ def create_ui(wrap_gradio_gpu_call): opts.reorder() def run_settings(*args): - changed = 0 + changed = [] for key, value, comp in zip(opts.data_labels.keys(), args, components): assert comp == dummy_component or opts.same_type(value, opts.data_labels[key].default), f"Bad value for setting {key}: {value}; expecting {type(opts.data_labels[key].default).__name__}" @@ -1454,12 +1457,12 @@ def create_ui(wrap_gradio_gpu_call): if opts.data_labels[key].onchange is not None: opts.data_labels[key].onchange() - changed += 1 + changed.append(key) try: opts.save(shared.config_filename) except RuntimeError: - return opts.dumpjson(), f'{changed} settings changed without save.' - return opts.dumpjson(), f'{changed} settings changed.' + return opts.dumpjson(), f'{len(changed)} settings changed without save: {", ".join(changed)}.' + return opts.dumpjson(), f'{len(changed)} settings changed: {", ".join(changed)}.' def run_settings_single(value, key): if not opts.same_type(value, opts.data_labels[key].default): diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index 8e0d41d57..02ab9643d 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -140,13 +140,15 @@ def install_extension_from_url(dirname, url): shutil.rmtree(tmpdir, True) -def install_extension_from_index(url): +def install_extension_from_index(url, hide_tags): ext_table, message = install_extension_from_url(None, url) - return refresh_available_extensions_from_data(), ext_table, message + code, _ = refresh_available_extensions_from_data(hide_tags) + + return code, ext_table, message -def refresh_available_extensions(url): +def refresh_available_extensions(url, hide_tags): global available_extensions import urllib.request @@ -155,13 +157,25 @@ def refresh_available_extensions(url): available_extensions = json.loads(text) - return url, refresh_available_extensions_from_data(), '' + code, tags = refresh_available_extensions_from_data(hide_tags) + + return url, code, gr.CheckboxGroup.update(choices=tags), '' -def refresh_available_extensions_from_data(): +def refresh_available_extensions_for_tags(hide_tags): + code, _ = refresh_available_extensions_from_data(hide_tags) + + return code, '' + + +def refresh_available_extensions_from_data(hide_tags): extlist = available_extensions["extensions"] installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions} + tags = available_extensions.get("tags", {}) + tags_to_hide = set(hide_tags) + hidden = 0 + code = f""" @@ -178,17 +192,24 @@ def refresh_available_extensions_from_data(): name = ext.get("name", "noname") url = ext.get("url", None) description = ext.get("description", "") + extension_tags = ext.get("tags", []) if url is None: continue + if len([x for x in extension_tags if x in tags_to_hide]) > 0: + hidden += 1 + continue + existing = installed_extension_urls.get(normalize_git_url(url), None) install_code = f"""""" + tags_text = ", ".join([f"{x}" for x in extension_tags]) + code += f""" - + @@ -199,7 +220,10 @@ def refresh_available_extensions_from_data():
{html.escape(name)}{html.escape(name)}
{tags_text}
{html.escape(description)} {install_code}
""" - return code + if hidden > 0: + code += f"

Extension hidden: {hidden}

" + + return code, list(tags) def create_ui(): @@ -238,21 +262,30 @@ def create_ui(): extension_to_install = gr.Text(elem_id="extension_to_install", visible=False) install_extension_button = gr.Button(elem_id="install_extension_button", visible=False) + with gr.Row(): + hide_tags = gr.CheckboxGroup(value=["ads", "localization"], label="Hide extensions with tags", choices=["script", "ads", "localization"]) + install_result = gr.HTML() available_extensions_table = gr.HTML() refresh_available_extensions_button.click( - fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update()]), - inputs=[available_extensions_index], - outputs=[available_extensions_index, available_extensions_table, install_result], + fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]), + inputs=[available_extensions_index, hide_tags], + outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result], ) install_extension_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]), - inputs=[extension_to_install], + inputs=[extension_to_install, hide_tags], outputs=[available_extensions_table, extensions_table, install_result], ) + hide_tags.change( + fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), + inputs=[hide_tags], + outputs=[available_extensions_table, install_result] + ) + with gr.TabItem("Install from URL"): install_url = gr.Text(label="URL for extension's git repository") install_dirname = gr.Text(label="Local directory name", placeholder="Leave empty for auto") diff --git a/style.css b/style.css index a0382a8cc..e2b71f253 100644 --- a/style.css +++ b/style.css @@ -563,6 +563,11 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h opacity: 0.5; } +.extension-tag{ + font-weight: bold; + font-size: 95%; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running diff --git a/webui.py b/webui.py index 4342a962c..f4f1d74d1 100644 --- a/webui.py +++ b/webui.py @@ -57,7 +57,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): return res - return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs) + return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs, add_stats=True) def initialize(): From 9cd1a66648b4c19136687100f9705d442f31e7f9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 10:37:08 +0300 Subject: [PATCH 07/14] remove localization people from CODEOWNERS add a note --- CODEOWNERS | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index a48d80121..7438c9bc6 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,13 +1,12 @@ * @AUTOMATIC1111 -/localizations/ar_AR.json @xmodar @blackneoo -/localizations/de_DE.json @LunixWasTaken -/localizations/es_ES.json @innovaciones -/localizations/fr_FR.json @tumbly -/localizations/it_IT.json @EugenioBuffo -/localizations/ja_JP.json @yuuki76 -/localizations/ko_KR.json @36DB -/localizations/pt_BR.json @M-art-ucci -/localizations/ru_RU.json @kabachuha -/localizations/tr_TR.json @camenduru -/localizations/zh_CN.json @dtlnor @bgluminous -/localizations/zh_TW.json @benlisquare + +# if you were managing a localization and were removed from this file, this is because +# the intended way to do localizations now is via extensions. See: +# https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Developing-extensions +# Make a repo with your localization and since you are still listed as a collaborator +# you can add it to the wiki page yourself. This change is because some people complained +# the git commit log is cluttered with things unrelated to almost everyone and +# because I believe this is the best overall for the project to handle localizations almost +# entirely without my oversight. + + From 6e4de5b4422dfc0d45063b2c8c78b19f00321615 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 11:20:23 +0300 Subject: [PATCH 08/14] add load_with_extra function for modules to load checkpoints with extended whitelist --- modules/safe.py | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/modules/safe.py b/modules/safe.py index 348a24fcd..a9209e385 100644 --- a/modules/safe.py +++ b/modules/safe.py @@ -23,11 +23,18 @@ def encode(*args): class RestrictedUnpickler(pickle.Unpickler): + extra_handler = None + def persistent_load(self, saved_id): assert saved_id[0] == 'storage' return TypedStorage() def find_class(self, module, name): + if self.extra_handler is not None: + res = self.extra_handler(module, name) + if res is not None: + return res + if module == 'collections' and name == 'OrderedDict': return getattr(collections, name) if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: @@ -52,7 +59,7 @@ class RestrictedUnpickler(pickle.Unpickler): return set # Forbid everything else. - raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden") + raise Exception(f"global '{module}/{name}' is forbidden") allowed_zip_names = ["archive/data.pkl", "archive/version"] @@ -69,7 +76,7 @@ def check_zip_filenames(filename, names): raise Exception(f"bad file inside {filename}: {name}") -def check_pt(filename): +def check_pt(filename, extra_handler): try: # new pytorch format is a zip file @@ -78,6 +85,7 @@ def check_pt(filename): with z.open('archive/data.pkl') as file: unpickler = RestrictedUnpickler(file) + unpickler.extra_handler = extra_handler unpickler.load() except zipfile.BadZipfile: @@ -85,16 +93,42 @@ def check_pt(filename): # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle with open(filename, "rb") as file: unpickler = RestrictedUnpickler(file) + unpickler.extra_handler = extra_handler for i in range(5): unpickler.load() def load(filename, *args, **kwargs): + return load_with_extra(filename, *args, **kwargs) + + +def load_with_extra(filename, extra_handler=None, *args, **kwargs): + """ + this functon is intended to be used by extensions that want to load models with + some extra classes in them that the usual unpickler would find suspicious. + + Use the extra_handler argument to specify a function that takes module and field name as text, + and returns that field's value: + + ```python + def extra(module, name): + if module == 'collections' and name == 'OrderedDict': + return collections.OrderedDict + + return None + + safe.load_with_extra('model.pt', extra_handler=extra) + ``` + + The alternative to this is just to use safe.unsafe_torch_load('model.pt'), which as the name implies is + definitely unsafe. + """ + from modules import shared try: if not shared.cmd_opts.disable_safe_unpickle: - check_pt(filename) + check_pt(filename, extra_handler) except pickle.UnpicklingError: print(f"Error verifying pickled file from {filename}:", file=sys.stderr) From 32c0eab89538ba3900bf499291720f80ae4b43e5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 14:39:41 +0300 Subject: [PATCH 09/14] load all settings in one call instead of one by one when the page loads --- modules/ui.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index c946ad592..34c31ef1f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1141,7 +1141,7 @@ def create_ui(wrap_gradio_gpu_call): outputs=[html, generation_info, html2], ) - with gr.Blocks() as modelmerger_interface: + with gr.Blocks(analytics_enabled=False) as modelmerger_interface: with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): gr.HTML(value="

A merger of the two checkpoints will be generated in your checkpoint directory.

") @@ -1161,7 +1161,7 @@ def create_ui(wrap_gradio_gpu_call): sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() - with gr.Blocks() as train_interface: + with gr.Blocks(analytics_enabled=False) as train_interface: with gr.Row().style(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") @@ -1420,15 +1420,14 @@ def create_ui(wrap_gradio_gpu_call): if info.refresh is not None: if is_quicksettings: - res = comp(label=info.label, value=fun, elem_id=elem_id, **(args or {})) + res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: with gr.Row(variant="compact"): - res = comp(label=info.label, value=fun, elem_id=elem_id, **(args or {})) + res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: - res = comp(label=info.label, value=fun, elem_id=elem_id, **(args or {})) - + res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) return res @@ -1639,6 +1638,17 @@ def create_ui(wrap_gradio_gpu_call): outputs=[component, text_settings], ) + component_keys = [k for k in opts.data_labels.keys() if k in component_dict] + + def get_settings_values(): + return [getattr(opts, key) for key in component_keys] + + demo.load( + fn=get_settings_values, + inputs=[], + outputs=[component_dict[k] for k in component_keys], + ) + def modelmerger(*args): try: results = modules.extras.run_modelmerger(*args) From 804d9fb83d0c63ca3acd36378707ce47b8f12599 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 6 Nov 2022 23:22:36 +0300 Subject: [PATCH 10/14] bump gradio to 3.9 --- requirements.txt | 2 +- requirements_versions.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 79e8b7c69..0fba0b233 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ fairscale==0.4.4 fonts font-roboto gfpgan -gradio==3.8 +gradio==3.9 invisible-watermark numpy omegaconf diff --git a/requirements_versions.txt b/requirements_versions.txt index 7bd16712c..f7059f205 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -2,7 +2,7 @@ transformers==4.19.2 diffusers==0.3.0 basicsr==1.4.2 gfpgan==1.3.8 -gradio==3.8 +gradio==3.9 numpy==1.23.3 Pillow==9.2.0 realesrgan==0.3.0 From c5334fc56b3d44976425da2e6d0a303ae96836a1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 8 Nov 2022 08:35:01 +0300 Subject: [PATCH 11/14] fix javascript duplication bug after pressing the restart UI button --- modules/ui.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 34c31ef1f..67cf1d6a3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1752,7 +1752,7 @@ def create_ui(wrap_gradio_gpu_call): return demo -def load_javascript(raw_response): +def reload_javascript(): with open(os.path.join(script_path, "script.js"), "r", encoding="utf8") as jsfile: javascript = f'' @@ -1768,7 +1768,7 @@ def load_javascript(raw_response): javascript += f"\n" def template_response(*args, **kwargs): - res = raw_response(*args, **kwargs) + res = shared.GradioTemplateResponseOriginal(*args, **kwargs) res.body = res.body.replace( b'', f'{javascript}'.encode("utf8")) res.init_headers() @@ -1777,4 +1777,5 @@ def load_javascript(raw_response): gradio.routes.templates.TemplateResponse = template_response -reload_javascript = partial(load_javascript, gradio.routes.templates.TemplateResponse) +if not hasattr(shared, 'GradioTemplateResponseOriginal'): + shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse From 8011be33c36eb7aa9e9498fc714614034e07f67a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 8 Nov 2022 08:37:05 +0300 Subject: [PATCH 12/14] move functions out of main body for image preprocessing for easier hijacking --- modules/textual_inversion/preprocess.py | 162 ++++++++++++++---------- 1 file changed, 93 insertions(+), 69 deletions(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index e13b18945..488aa5b5f 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -35,6 +35,84 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce deepbooru.release_process() +def listfiles(dirname): + return os.listdir(dirname) + + +class PreprocessParams: + src = None + dstdir = None + subindex = 0 + flip = False + process_caption = False + process_caption_deepbooru = False + preprocess_txt_action = None + + +def save_pic_with_caption(image, index, params: PreprocessParams, existing_caption=None): + caption = "" + + if params.process_caption: + caption += shared.interrogator.generate_caption(image) + + if params.process_caption_deepbooru: + if len(caption) > 0: + caption += ", " + caption += deepbooru.get_tags_from_process(image) + + filename_part = params.src + filename_part = os.path.splitext(filename_part)[0] + filename_part = os.path.basename(filename_part) + + basename = f"{index:05}-{params.subindex}-{filename_part}" + image.save(os.path.join(params.dstdir, f"{basename}.png")) + + if params.preprocess_txt_action == 'prepend' and existing_caption: + caption = existing_caption + ' ' + caption + elif params.preprocess_txt_action == 'append' and existing_caption: + caption = caption + ' ' + existing_caption + elif params.preprocess_txt_action == 'copy' and existing_caption: + caption = existing_caption + + caption = caption.strip() + + if len(caption) > 0: + with open(os.path.join(params.dstdir, f"{basename}.txt"), "w", encoding="utf8") as file: + file.write(caption) + + params.subindex += 1 + + +def save_pic(image, index, params, existing_caption=None): + save_pic_with_caption(image, index, params, existing_caption=existing_caption) + + if params.flip: + save_pic_with_caption(ImageOps.mirror(image), index, params, existing_caption=existing_caption) + + +def split_pic(image, inverse_xy, width, height, overlap_ratio): + if inverse_xy: + from_w, from_h = image.height, image.width + to_w, to_h = height, width + else: + from_w, from_h = image.width, image.height + to_w, to_h = width, height + h = from_h * to_w // from_w + if inverse_xy: + image = image.resize((h, to_w)) + else: + image = image.resize((to_w, h)) + + split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio))) + y_step = (h - to_h) / (split_count - 1) + for i in range(split_count): + y = int(y_step * i) + if inverse_xy: + splitted = image.crop((y, 0, y + to_h, to_w)) + else: + splitted = image.crop((0, y, to_w, y + to_h)) + yield splitted + def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False): width = process_width @@ -48,82 +126,28 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre os.makedirs(dst, exist_ok=True) - files = os.listdir(src) + files = listfiles(src) shared.state.textinfo = "Preprocessing..." shared.state.job_count = len(files) - def save_pic_with_caption(image, index, existing_caption=None): - caption = "" - - if process_caption: - caption += shared.interrogator.generate_caption(image) - - if process_caption_deepbooru: - if len(caption) > 0: - caption += ", " - caption += deepbooru.get_tags_from_process(image) - - filename_part = filename - filename_part = os.path.splitext(filename_part)[0] - filename_part = os.path.basename(filename_part) - - basename = f"{index:05}-{subindex[0]}-{filename_part}" - image.save(os.path.join(dst, f"{basename}.png")) - - if preprocess_txt_action == 'prepend' and existing_caption: - caption = existing_caption + ' ' + caption - elif preprocess_txt_action == 'append' and existing_caption: - caption = caption + ' ' + existing_caption - elif preprocess_txt_action == 'copy' and existing_caption: - caption = existing_caption - - caption = caption.strip() - - if len(caption) > 0: - with open(os.path.join(dst, f"{basename}.txt"), "w", encoding="utf8") as file: - file.write(caption) - - subindex[0] += 1 - - def save_pic(image, index, existing_caption=None): - save_pic_with_caption(image, index, existing_caption=existing_caption) - - if process_flip: - save_pic_with_caption(ImageOps.mirror(image), index, existing_caption=existing_caption) - - def split_pic(image, inverse_xy): - if inverse_xy: - from_w, from_h = image.height, image.width - to_w, to_h = height, width - else: - from_w, from_h = image.width, image.height - to_w, to_h = width, height - h = from_h * to_w // from_w - if inverse_xy: - image = image.resize((h, to_w)) - else: - image = image.resize((to_w, h)) - - split_count = math.ceil((h - to_h * overlap_ratio) / (to_h * (1.0 - overlap_ratio))) - y_step = (h - to_h) / (split_count - 1) - for i in range(split_count): - y = int(y_step * i) - if inverse_xy: - splitted = image.crop((y, 0, y + to_h, to_w)) - else: - splitted = image.crop((0, y, to_w, y + to_h)) - yield splitted - + params = PreprocessParams() + params.dstdir = dst + params.flip = process_flip + params.process_caption = process_caption + params.process_caption_deepbooru = process_caption_deepbooru + params.preprocess_txt_action = preprocess_txt_action for index, imagefile in enumerate(tqdm.tqdm(files)): - subindex = [0] + params.subindex = 0 filename = os.path.join(src, imagefile) try: img = Image.open(filename).convert("RGB") except Exception: continue + params.src = filename + existing_caption = None existing_caption_filename = os.path.splitext(filename)[0] + '.txt' if os.path.exists(existing_caption_filename): @@ -143,8 +167,8 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre process_default_resize = True if process_split and ratio < 1.0 and ratio <= split_threshold: - for splitted in split_pic(img, inverse_xy): - save_pic(splitted, index, existing_caption=existing_caption) + for splitted in split_pic(img, inverse_xy, width, height, overlap_ratio): + save_pic(splitted, index, params, existing_caption=existing_caption) process_default_resize = False if process_focal_crop and img.height != img.width: @@ -165,11 +189,11 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre dnn_model_path = dnn_model_path, ) for focal in autocrop.crop_image(img, autocrop_settings): - save_pic(focal, index, existing_caption=existing_caption) + save_pic(focal, index, params, existing_caption=existing_caption) process_default_resize = False if process_default_resize: img = images.resize_image(1, img, width, height) - save_pic(img, index, existing_caption=existing_caption) + save_pic(img, index, params, existing_caption=existing_caption) - shared.state.nextjob() \ No newline at end of file + shared.state.nextjob() From 1610b3258458025025e9c4faae57d290e4519745 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 8 Nov 2022 08:38:10 +0300 Subject: [PATCH 13/14] add callback for creating a tab in train UI --- modules/script_callbacks.py | 27 +++++++++++++++++++++++++-- modules/ui.py | 4 ++++ 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 74dfb8802..f19e164cf 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -7,6 +7,7 @@ from typing import Optional from fastapi import FastAPI from gradio import Blocks + def report_exception(c, job): print(f"Error executing callback {job} for {c.script}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) @@ -45,15 +46,21 @@ class CFGDenoiserParams: """Total number of sampling steps planned""" +class UiTrainTabParams: + def __init__(self, txt2img_preview_params): + self.txt2img_preview_params = txt2img_preview_params + + ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) callback_map = dict( callbacks_app_started=[], callbacks_model_loaded=[], callbacks_ui_tabs=[], + callbacks_ui_train_tabs=[], callbacks_ui_settings=[], callbacks_before_image_saved=[], callbacks_image_saved=[], - callbacks_cfg_denoiser=[] + callbacks_cfg_denoiser=[], ) @@ -61,6 +68,7 @@ def clear_callbacks(): for callback_list in callback_map.values(): callback_list.clear() + def app_started_callback(demo: Optional[Blocks], app: FastAPI): for c in callback_map['callbacks_app_started']: try: @@ -79,7 +87,7 @@ def model_loaded_callback(sd_model): def ui_tabs_callback(): res = [] - + for c in callback_map['callbacks_ui_tabs']: try: res += c.callback() or [] @@ -89,6 +97,14 @@ def ui_tabs_callback(): return res +def ui_train_tabs_callback(params: UiTrainTabParams): + for c in callback_map['callbacks_ui_train_tabs']: + try: + c.callback(params) + except Exception: + report_exception(c, 'callbacks_ui_train_tabs') + + def ui_settings_callback(): for c in callback_map['callbacks_ui_settings']: try: @@ -169,6 +185,13 @@ def on_ui_tabs(callback): add_callback(callback_map['callbacks_ui_tabs'], callback) +def on_ui_train_tabs(callback): + """register a function to be called when the UI is creating new tabs for the train tab. + Create your new tabs with gr.Tab. + """ + add_callback(callback_map['callbacks_ui_train_tabs'], callback) + + def on_ui_settings(callback): """register a function to be called before UI settings are populated; add your settings by using shared.opts.add_option(shared.OptionInfo(...)) """ diff --git a/modules/ui.py b/modules/ui.py index 67cf1d6a3..7ea1177f6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1270,6 +1270,10 @@ def create_ui(wrap_gradio_gpu_call): train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') train_embedding = gr.Button(value="Train Embedding", variant='primary') + params = script_callbacks.UiTrainTabParams(txt2img_preview_params) + + script_callbacks.ui_train_tabs_callback(params) + with gr.Column(): progressbar = gr.HTML(elem_id="ti_progressbar") ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) From ac085628540d0ec6a988fad93f5b8f2154209571 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 8 Nov 2022 10:01:06 +0300 Subject: [PATCH 14/14] Remove old localizations from the main repo. Where are they now? Here: https://github.com/AUTOMATIC1111/stable-diffusion-webui-old-localizations --- localizations/ar_AR.json | 518 ------------- localizations/de_DE.json | 458 ----------- localizations/es_ES.json | 692 ----------------- localizations/fr_FR.json | 415 ---------- localizations/it_IT.json | 1565 -------------------------------------- localizations/ja_JP.json | 482 ------------ localizations/ko_KR.json | 592 -------------- localizations/pt_BR.json | 485 ------------ localizations/ru_RU.json | 475 ------------ localizations/tr_TR.json | 423 ----------- localizations/zh_CN.json | 624 --------------- localizations/zh_TW.json | 610 --------------- 12 files changed, 7339 deletions(-) delete mode 100644 localizations/ar_AR.json delete mode 100644 localizations/de_DE.json delete mode 100644 localizations/es_ES.json delete mode 100644 localizations/fr_FR.json delete mode 100644 localizations/it_IT.json delete mode 100644 localizations/ja_JP.json delete mode 100644 localizations/ko_KR.json delete mode 100644 localizations/pt_BR.json delete mode 100644 localizations/ru_RU.json delete mode 100644 localizations/tr_TR.json delete mode 100644 localizations/zh_CN.json delete mode 100644 localizations/zh_TW.json diff --git a/localizations/ar_AR.json b/localizations/ar_AR.json deleted file mode 100644 index abbbcff40..000000000 --- a/localizations/ar_AR.json +++ /dev/null @@ -1,518 +0,0 @@ -{ - "rtl": true, - "Loading...": "لحظة...", - "view": "اعرض ", - "api": "واجهة البرمجة", - "built with gradio": "مبني باستخدام gradio", - "Stable Diffusion checkpoint": "أوزان نموذج الإنتشار المسقر", - "txt2img": "نص إلى صورة", - "Prompt": "الطلب", - "Prompt (press Ctrl+Enter or Alt+Enter to generate)": "الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)", - "Negative prompt": "عكس الطلب", - "Negative prompt (press Ctrl+Enter or Alt+Enter to generate)": "عكس الطلب (لبدء الإنتاج Ctrl+Enter أو Alt+Enter اضغط)", - "Add a random artist to the prompt.": "أضف فنان عشوائي للطلب", - "Read generation parameters from prompt or last generation if prompt is empty into user interface.": "اقرأ عوامل الإنتاج من الطلب أو من الإنتاج السابق إذا كان الطلب فارغا", - "Save style": "احتفظ بالطلب وعكسه كإضافة", - "Apply selected styles to current prompt": "ألحق الإضافات المحددة إلى الطلب وعكسه", - "Generate": "أنتج", - "Skip": "تخطى", - "Stop processing current image and continue processing.": "لا تكمل خطوات هذة الحزمة وانتقل إلى الحزمة التالية", - "Interrupt": "توقف", - "Stop processing images and return any results accumulated so far.": "توقف عن الإنتاج واعرض ما تم إلى الآن", - "Style 1": "الإضافة 1", - "Style to apply; styles have components for both positive and negative prompts and apply to both": "الإضافات (styles) عبارة عن كلمات تتكرر كثيرا يتم إلحاقها بالطلب وعكسه عند الرغبة", - "Style 2": "الإضافة 2", - "Do not do anything special": "لا يغير شيئا", - "Sampling Steps": "عدد الخطوات", - "Sampling method": "أسلوب الخطو", - "Which algorithm to use to produce the image": "Sampler: اسم نظام تحديد طريقة تغيير المسافات بين الخطوات", - "Euler a": "Euler a", - "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help": "Euler Ancestral: طريقة مبدعة يمكن أن تنتج صور مختلفة على حسب عدد الخطوات، لا تتغير بعد 30-40 خطوة", - "Euler": "Euler", - "LMS": "LMS", - "Heun": "Heun", - "DPM2": "DPM2", - "DPM2 a": "DPM2 a", - "DPM fast": "DPM fast", - "DPM adaptive": "DPM adaptive", - "LMS Karras": "LMS Karras", - "DPM2 Karras": "DPM2 Karras", - "DPM2 a Karras": "DPM2 a Karras", - "DDIM": "DDIM", - "Denoising Diffusion Implicit Models - best at inpainting": "Denoising Diffusion Implicit Models: الأفضل في الإنتاج الجزئي", - "PLMS": "PLMS", - "Width": "العرض", - "Height": "الإرتفاع", - "Restore faces": "تحسين الوجوه", - "Tiling": "ترصيف", - "Produce an image that can be tiled.": "أنتج صور يمكن ترصيفها بجانب بعضها كالبلاط", - "Highres. fix": "إصلاح الدقة العالية", - "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition": "أنتج صورة بدقة منخفضة ثم قم برفع الدقة فيما بعد لمنع التشوهات التي تحصل عندما تكون الدقة المطلوبة كبيرة", - "Firstpass width": "العرض الأولي", - "Firstpass height": "الإرتفاع الأولي", - "Denoising strength": "المدى", - "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.": "Denoising strength: حدد مدى الإبتعاد عن الصورة (عدد الخطوات الفعلي = عدد الخطوات * المدى)", - "Batch count": "عدد الحزم", - "How many batches of images to create": "يتم إنتاج الصور على دفعات، كل دفعة فيها حزمة من الصور", - "Batch size": "حجم الحزمة", - "How many image to create in a single batch": "Batch size: إنتاج حزمة صور أسرع من إنتاجهم فرادى، حدد عدد الصور في كل حزمة", - "CFG Scale": "التركيز", - "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results": "CFG scale: يحدد مقدار التركيز على تلبية الطلب وتجنب عكسه، كلما زاد قل الإبداع", - "Seed": "البذرة", - "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result": "Seed: رقم طبيعي عشوائي يسمح بإعادة إنتاج نفس الصورة إذا توافقت قيم العوامل الأخرى", - "Set seed to -1, which will cause a new random number to be used every time": "استخدم بذرة جديدة في كل مرة (نرمز لهذا الخيار بجعل قيمة البذرة 1-)", - "Reuse seed from last generation, mostly useful if it was randomed": "أعد استخدام البذرة من الإنتاج السابق", - "Extra": "مزج", - "Variation seed": "بذرة الممزوج", - "Seed of a different picture to be mixed into the generation.": "Variation seed: بذرة صورة أخرى ليتم مزجها مع الصورة الحالية", - "Variation strength": "أثر الممزوج", - "How strong of a variation to produce. At 0, there will be no effect. At 1, you will get the complete picture with variation seed (except for ancestral samplers, where you will just get something).": "Variation seed strength: مقدار أثر الصورة المدمجة على النتيجة النهائية (0: لا أثر، 1: أثر كامل ما عدا عند استخدام أسلوب خطو سلفي Ancestral)", - "Resize seed from width": "عرض الممزوج", - "Resize seed from height": "إرتفاع الممزوج", - "Make an attempt to produce a picture similar to what would have been produced with same seed at specified resolution": "Seed resize from: حدد دقة صورة الممزوج (0: نفس دقة الإنتاج)", - "Open for Clip Aesthetic!": "تضمين تجميلي", - "▼": "▼", - "Aesthetic weight": "أثر التضمين", - "Aesthetic steps": "عدد الخطوات", - "Aesthetic learning rate": "معدل التعلم", - "Slerp interpolation": "امزج بطريقة كروية", - "Aesthetic imgs embedding": "التضمين", - "None": "بدون", - "Aesthetic text for imgs": "الطلب (اختياري)", - "This text is used to rotate the feature space of the imgs embs": "لإعادة توجيه التضمين التجميلي", - "Slerp angle": "أثر الطلب", - "Is negative text": "الطلب عكسي", - "Script": "أدوات خاصة", - "Prompt matrix": "مصفوفة طلبات", - "Put variable parts at start of prompt": "الجزء المتغير في بداية الطلب", - "Prompts from file or textbox": " قائمة طلبات", - "Iterate seed every line": "غير البذرة مع كل طلب", - "List of prompt inputs": "قائمة الطلبات", - "Upload prompt inputs": "اجلب الطلبات من ملف", - "Drop File Here": "اسقط ملف هنا", - "-": "-", - "or": "أو", - "Click to Upload": "انقر للرفع", - "X/Y plot": "مصفوفة عوامل", - "X type": "العامل الأول", - "Nothing": "لا شيء", - "Var. seed": "بذرة الممزوج", - "Var. strength": "أثر الممزوج", - "Steps": "عدد الخطوات", - "Prompt S/R": "كلمات بديلة", - "Prompt order": "ترتيب الكلمات", - "Sampler": "أسلوب الخطو", - "Checkpoint name": "ملف الأوزان", - "Hypernetwork": "الشبكة الفائقة", - "Hypernet str.": "قوة الشبكة الفائقة", - "Inpainting conditioning mask strength": "قوة قناع الإنتاج الجزئي", - "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.": "حدد مدى صرامة قناع الإنتاج، يصبح القناع شفاف إذا قوته 0 (لا يعمل إلا مع ملفات أوزان الإنتاج الجزئي: inpainting)", - "Sigma Churn": "العشوائية (Schurn)", - "Sigma min": "أدنى تشويش (Stmin)", - "Sigma max": "أقصى تشويش (Stmax)", - "Sigma noise": "التشويش (Snoise)", - "Eta": "العامل Eta η", - "Clip skip": "تخطي آخر طبقات CLIP", - "Denoising": "المدى", - "Cond. Image Mask Weight": "قوة قناع الإنتاج الجزئي", - "X values": "قيم العامل الأول", - "Separate values for X axis using commas.": "افصل القيم بفواصل (,) من اليسار إلى اليمين", - "Y type": "العامل الثاني", - "Y values": "قيم العامل الثاني", - "Separate values for Y axis using commas.": "افصل القيم بفواصل (,) من الأعلى إلى الأسفل", - "Draw legend": "أضف مفتاح التوضيح", - "Include Separate Images": "أضف الصور منفصلة", - "Keep -1 for seeds": "استخدم بذور عشوائية", - "Save": "احفظ", - "Write image to a directory (default - log/images) and generation parameters into csv file.": "احفظ الصور مع ملف العوامل بصيغة CSV", - "Send to img2img": "أرسل لصورة إلى صورة", - "Send to inpaint": "أرسل للإنتاج الجزئي", - "Send to extras": "أرسل للمعالجة", - "Open images output directory": "افتح مجلد الصور المخرجة", - "Make Zip when Save?": "ضع النتائج في ملف مضغوط عند الحفظ", - "img2img": "صورة إلى صورة", - "Interrogate\nCLIP": "استجواب\nCLIP", - "Drop Image Here": "اسقط صورة هنا", - "Just resize": "تغيير الحجم فقط", - "Resize image to target resolution. Unless height and width match, you will get incorrect aspect ratio.": "غير حجم الصورة بدون مراعات اتزان الأبعاد", - "Crop and resize": "تغيير الحجم وقص الأطراف", - "Resize the image so that entirety of target resolution is filled with the image. Crop parts that stick out.": "غير حجم الصورة واقتص الأطراف الزائدة", - "Resize and fill": "تغيير الحجم وتبطين الأطراف", - "Resize the image so that entirety of image is inside target resolution. Fill empty space with image's colors.": "غير حجم الصورة واملأ الأطراف الزائدة بألوان من الصورة", - "img2img alternative test": "استجواب الصورة (تجريبي)", - "should be 2 or lower.": "يفترض أن يكون 2 أو أقل", - "Override `Sampling method` to Euler?(this method is built for it)": "استخدم أسلوب خطو Euler (مستحسن)", - "Override `prompt` to the same value as `original prompt`?(and `negative prompt`)": "استبدل الطلب وعكسه في الأعلى بالطلب الأصلي وعكسه التاليين", - "Original prompt": "الطلب الأصلي", - "Original negative prompt": "عكس الطلب الأصلي", - "Override `Sampling Steps` to the same value as `Decode steps`?": "استبدل عدد الخطوات بعدد الخطوات الأصلية", - "Decode steps": "عدد الخطوات الأصلية", - "Override `Denoising strength` to 1?": "اجعل المدى 1", - "Decode CFG scale": "التركيز", - "Randomness": "العشوائية", - "Sigma adjustment for finding noise for image": "لا تسمح بتثبيت قيمة التباين", - "Loopback": "اجترار وتكرار", - "Loops": "عدد المرات", - "How many times to repeat processing an image and using it as input for the next iteration": "كم مرة يتم أخذ مخرجات الإنتاج كمدخلات وإعادة الإنتاج مرة أخرى", - "Denoising strength change factor": "معدل تغيير المدى", - "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.": "يتم ضرب المدى بهذا الرقم في كل مرة، إذا استخدمت رقم أصغر من 1 يمكن الرسو على نتيجة، وإذا استخدمت رقم أكبر من 1 تصبح النتيجة عشوائية", - "Outpainting mk2": "توسيع الصورة (mk2)", - "Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8": "يفضل استخدام: 80-100 خطوة، أسلوب Euler a، المدى 0.8", - "Pixels to expand": "عدد البيكسلات", - "Mask blur": "تنعيم القناع", - "How much to blur the mask before processing, in pixels.": "مقدرا تنعيم القناع قبل استخدامه (يقاس بالبيكسل)", - "Outpainting direction": "اتجاه توسيع الصورة", - "left": "يسار", - "right": "يمين", - "up": "فوق", - "down": "تحت", - "Fall-off exponent (lower=higher detail)": "قوة السقوط (كلما قلت زادت التفاصيل)", - "Color variation": "تنوع الألوان", - "Poor man's outpainting": "توسيع الصورة (بدائي)", - "Masked content": "محتويات القناع", - "What to put inside the masked area before processing it with Stable Diffusion.": "ما يوضع مكان الفراغ في الصورة الذي نريد إنتاج محتوياته", - "fill": "ألوان", - "fill it with colors of the image": "املأ باستخدام ألوان مأخوذة من باقي الصورة", - "original": "بدون تغيير", - "keep whatever was there originally": "أبق محتويات ما تحت القناع كما هي", - "latent noise": "تشويش كامن", - "fill it with latent space noise": "املأه باستخدام تشويش من الفضاء الكامن", - "latent nothing": "تصفير كامن", - "fill it with latent space zeroes": "استبدل مكان القناع في الفضاء الكامن بأصفار", - "SD upscale": "مضاعفة الدقة بنموذج الإنتشار المستقر", - "Will upscale the image to twice the dimensions; use width and height sliders to set tile size": "سيتم تكبير حجم الصورة إلى الضعف، استخدم الطول والإرتفاع في الأعلى لتحديد حجم نافذة المكبر", - "Tile overlap": "تداخل النافذة", - "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.": "المكبر ينظر إلى أجزاء الصورة من خلال نافذة لتكبير المحتوى ثم ينتقل إلى الجزء المجاور، يفضل أن يكون هناك تداخل بين كل رقعة لكي لا يكون هناك اختلاف واضح بينهم", - "Upscaler": "طريقة التكبير", - "Lanczos": "Lanczos", - "LDSR": "LDSR", - "ScuNET GAN": "ScuNET GAN", - "ScuNET PSNR": "ScuNET PSNR", - "ESRGAN_4x": "ESRGAN_4x", - "SwinIR 4x": "SwinIR 4x", - "Inpaint": "إنتاج جزئي", - "Draw mask": "ارسم القناع", - "Upload mask": "ارفع القناع", - "Inpaint masked": "أنتج ما بداخل القناع", - "Inpaint not masked": "أنتج ما حول القناع", - "Inpaint at full resolution": "إنتاج بالدقة الكاملة", - "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image": "كبر ما يراد إعادة إنتاجه ثم صغر النتيجة وألصقها في مكانها", - "Inpaint at full resolution padding, pixels": "عدد بيكسلات التبطين", - "Batch img2img": "صور إلى صور", - "Process images in a directory on the same machine where the server is running.": "حدد مسار مجلد صور موجود في جهاز الخادم", - "Use an empty output directory to save pictures normally instead of writing to the output directory.": "يمكنك أيضا تحديد مجلد حفظ النتائج (غير الإفتراضي)", - "Input directory": "مجلد المدخلات", - "Output directory": "مجلد المخرجات", - "Extras": "معالجة", - "Single Image": "صورة واحدة", - "Source": "المصدر", - "Scale by": "مضاعفة الدقة", - "Resize": "تغيير الحجم", - "Upscaler 1": "المكبر الأول", - "Upscaler 2": "المكبر الثاني", - "Upscaler 2 visibility": "أثر المكبر الثاني", - "GFPGAN visibility": "أثر GFPGAN (محسن وجوه)", - "CodeFormer visibility": "أثر CodeFormer (محسن وجوه)", - "CodeFormer weight (0 = maximum effect, 1 = minimum effect)": "وزن CodeFormer (يزيد التفاصيل على حساب الجودة)", - "Upscale Before Restoring Faces": "كبر قبل تحسين الوجوه", - "Scale to": "دقة محددة", - "Crop to fit": "قص الأطراف الزائدة إذا لم تتناسب الأبعاد", - "Batch Process": "حزمة صور", - "Batch from Directory": "حزمة من مجلد", - "A directory on the same machine where the server is running.": "مسار مجلد صور موجود في جهاز الخادم", - "Leave blank to save images to the default path.": "اتركه فارغا لاستخدام المسار الإفتراضي", - "Show result images": "اعرض الصور الناتجة", - "PNG Info": "عوامل الصورة", - "Send to txt2img": "أرسل لنص إلى صورة", - "Checkpoint Merger": "مزج الأوزان", - "A merger of the two checkpoints will be generated in your": "سيتم مزج الأوزان التالية وحفظ الأوزان المدجمة مع ", - "checkpoint": "الأوزان", - "directory.": " مجلد.", - "Primary model (A)": "الأوزان الأولى (A)", - "Secondary model (B)": "الأوزان الثانية (B)", - "Tertiary model (C)": "الأوزان الثالثة (C)", - "Custom Name (Optional)": "الاسم الجديد (اختياري)", - "Multiplier (M) - set to 0 to get model A": "العامل M: مسافة الإبتعاد عن الأوزان الأولى A", - "Interpolation Method": "طريقة المزج", - "Weighted sum": "خطية", - "Result = A * (1 - M) + B * M": "النتيجة = A * (1 - M) + B * M", - "Add difference": "جمع الفرق", - "Result = A + (B - C) * M": "النتيجة = A + (B - C) * M", - "Save as float16": "احفظ بدقة float16", - "Run": "تشغيل", - "Train": "تدريب", - "See": "اقرأ", - "wiki": " الـwiki ", - "for detailed explanation.": "لمعرفة المزيد", - "Create embedding": "إنشاء تضمين", - "Name": "الاسم", - "Initialization text": "النص المبدأي", - "Number of vectors per token": "عدد المتجهات لكل وحدة لغوية", - "Overwrite Old Embedding": "استبدل التضمين القديم", - "Create hypernetwork": "إنشاء شبكة فائقة", - "Modules": "الأجزاء", - "Enter hypernetwork layer structure": "ترتيب مضاعفات عرض الطبقات", - "1st and last digit must be 1. ex:'1, 2, 1'": "المضاعفين الأول والأخير يجب أن يكونا 1، مثال: 1, 2, 1", - "Select activation function of hypernetwork": "دالة التنشيط", - "linear": "linear", - "relu": "relu", - "leakyrelu": "leakyrelu", - "elu": "elu", - "swish": "swish", - "tanh": "tanh", - "sigmoid": "sigmoid", - "celu": "celu", - "gelu": "gelu", - "glu": "glu", - "hardshrink": "hardshrink", - "hardsigmoid": "hardsigmoid", - "hardtanh": "hardtanh", - "logsigmoid": "logsigmoid", - "logsoftmax": "logsoftmax", - "mish": "mish", - "prelu": "prelu", - "rrelu": "rrelu", - "relu6": "relu6", - "selu": "selu", - "silu": "silu", - "softmax": "softmax", - "softmax2d": "softmax2d", - "softmin": "softmin", - "softplus": "softplus", - "softshrink": "softshrink", - "softsign": "softsign", - "tanhshrink": "tanhshrink", - "threshold": "threshold", - "Select Layer weights initialization. relu-like - Kaiming, sigmoid-like - Xavier is recommended": "تهيئة الأوزان (استخدم Kaiming مع relu وأمثالها وXavier مع sigmoid وأمثالها)", - "Normal": "Normal", - "KaimingUniform": "KaimingUniform", - "KaimingNormal": "KaimingNormal", - "XavierUniform": "XavierUniform", - "XavierNormal": "XavierNormal", - "Add layer normalization": "أضف تسوية الطبقات (LayerNorm)", - "Use dropout": "استخدم الإسقاط (Dropout)", - "Overwrite Old Hypernetwork": "استبدل الشبكة الفائقة القديمة", - "Preprocess images": "معالجة مسبقة للصور", - "Source directory": "مجلد المدخلات", - "Destination directory": "مجلد المخرجات", - "Existing Caption txt Action": "اذا كانت الصورة لديها توصيف (طلب)", - "ignore": "تجاهل", - "copy": "انسخ", - "prepend": "أسبق", - "append": "ألحق", - "Create flipped copies": "انشئ نسخ معكوسة للصور", - "Split oversized images": "قسّم الصور الكبيرة", - "Split image threshold": "حد تقسيم الصور الكبيرة", - "Split image overlap ratio": "نسبة تداخل اقسام الصور الكبيرة", - "Auto focal point crop": "اقتصاص تلقائي", - "Focal point face weight": "تمركز الوجوه", - "Focal point entropy weight": "تمركز التنوع", - "Focal point edges weight": "تمركز الحواف", - "Create debug image": "احفظ نتائج التحليل أيضا", - "Use BLIP for caption": "استخدم BLIP لتوصيف الصور", - "Use deepbooru for caption": "استخدم deepbooru لتوصيف الصور", - "Preprocess": "معالجة مسبقة", - "Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images": "درب التضمين أو الشبكة الفائقة: يجب تحديد مجلد يحتوي صور مربعة فقط ", - "[wiki]": "[wiki]", - "Embedding": "التضمين", - "Embedding Learning rate": "معدل تعلم التضمين", - "Hypernetwork Learning rate": "معدل تعلم الشبكة الفائقة", - "Dataset directory": "مجلد مجموعة البيانات", - "Path to directory with input images": "مسار مجلد الصور المدخلة", - "Log directory": "مجلد السجل", - "Path to directory where to write outputs": "مسار مجلد الصور المخرجة", - "Prompt template file": "ملف صيغ الطلبات", - "Max steps": "أقصى عدد لخطوات التدريب", - "Save an image to log directory every N steps, 0 to disable": "احفظ صورة في السجل بعد كل كم خطوة تدريب (إذا 0 لا تحفظ)", - "Save a copy of embedding to log directory every N steps, 0 to disable": "احفظ التضمين في السجل بعد كل كم خطوة تدريب (إذا 0 لا تحفظ)", - "Save images with embedding in PNG chunks": "احفظ التضمين بداخل ملف الصورة كعامل يمكن استخراجه من عوامل الصورة (صيغة PNG)", - "Read parameters (prompt, etc...) from txt2img tab when making previews": "استخدم قيم العوامل الموجودة في تبويب نص إلى صورة لعرض نتائجهم أثناء التدريب", - "Train Hypernetwork": "درّب الشبكة الفائقة", - "Train Embedding": "درّب التضمين", - "Create aesthetic embedding": "تضمين تجميلي", - "Create an aesthetic embedding out of any number of images": "انشئ تضمين تجميلي يعبر عن مجموعة من الصور", - "Create images embedding": "انشئ التضمين التجميلي", - "Image Browser": "معرض الصور", - "Load": "حمّل", - "Images directory": "مجلد الصور", - "First Page": "الصفحة الأولى", - "Prev Page": "الصفحة السابقة", - "Page Index": "رقم الصفحة", - "Next Page": "الصفحة التالية", - "End Page": "الصفحة الأخيرة", - "number of images to delete consecutively next": "عدد الصور المتتالية للحذف", - "Delete": "احذف", - "Generate Info": "معلومات عامة", - "File Name": "اسم الملف", - "Collect": "اجمع", - "extras": "معالجة", - "favorites": "المفضلة", - "custom fold": "مجلد آخر", - "Input images directory": "مجلد الصور المدخلة", - "Settings": "إعدادات", - "Apply settings": "طبق الإعدادت", - "Saving images/grids": "حفظ الصور وجداولها", - "Always save all generated images": "احفظ كل الصور المنتجة", - "File format for images": "صيغة ملفات الصور", - "Images filename pattern": "نمط تسمية الصور", - "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [prompt_words], [date], [datetime], [datetime], [datetime