From 887a5122083d27fd819bfeb54524dbdc791961cc Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 15 Mar 2024 21:06:54 +0900 Subject: [PATCH] fix issue with Styles when Hires prompt is used --- modules/infotext_utils.py | 31 ++++++++++++++++++++----------- modules/infotext_versions.py | 1 + modules/processing.py | 15 ++++++++------- 3 files changed, 29 insertions(+), 18 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index a1cbfb17d..723cb1f82 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -265,17 +265,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model else: prompt += ("" if prompt == "" else "\n") + line - if shared.opts.infotext_styles != "Ignore": - found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) - - if shared.opts.infotext_styles == "Apply": - res["Styles array"] = found_styles - elif shared.opts.infotext_styles == "Apply if any" and found_styles: - res["Styles array"] = found_styles - - res["Prompt"] = prompt - res["Negative prompt"] = negative_prompt - for k, v in re_param.findall(lastline): try: if v[0] == '"' and v[-1] == '"': @@ -290,6 +279,26 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model except Exception: print(f"Error parsing \"{k}: {v}\"") + # Extract styles from prompt + if shared.opts.infotext_styles != "Ignore": + found_styles, prompt_no_styles, negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt) + + same_hr_styles = True + if ("Hires prompt" in res or "Hires negative prompt" in res) and (infotext_ver > infotext_versions.v180_hr_styles if (infotext_ver := infotext_versions.parse_version(res.get("Version"))) else True): + hr_prompt, hr_negative_prompt = res.get("Hires prompt", prompt), res.get("Hires negative prompt", negative_prompt) + hr_found_styles, hr_prompt_no_styles, hr_negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(hr_prompt, hr_negative_prompt) + if same_hr_styles := found_styles == hr_found_styles: + res["Hires prompt"] = '' if hr_prompt_no_styles == prompt_no_styles else hr_prompt_no_styles + res['Hires negative prompt'] = '' if hr_negative_prompt_no_styles == negative_prompt_no_styles else hr_negative_prompt_no_styles + + if same_hr_styles: + prompt, negative_prompt = prompt_no_styles, negative_prompt_no_styles + if (shared.opts.infotext_styles == "Apply if any" and found_styles) or shared.opts.infotext_styles == "Apply": + res['Styles array'] = found_styles + + res["Prompt"] = prompt + res["Negative prompt"] = negative_prompt + # Missing CLIP skip means it was set to 1 (the default) if "Clip skip" not in res: res["Clip skip"] = "1" diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index b5552a312..0d2d6282a 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -6,6 +6,7 @@ import re v160 = version.parse("1.6.0") v170_tsnr = version.parse("v1.7.0-225") v180 = version.parse("1.8.0") +v180_hr_styles = version.parse("1.8.0-136") # todo: change to the actual version number after merge def parse_version(text): diff --git a/modules/processing.py b/modules/processing.py index 86194b057..d6873a510 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -702,7 +702,7 @@ def program_version(): return res -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None): if index is None: index = position_in_batch + iteration * p.batch_size @@ -745,11 +745,18 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter "RNG": opts.randn_source if opts.randn_source != "GPU" else None, "NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond, "Tiling": "True" if p.tiling else None, + "Hires prompt": None, # This is set later, insert here to keep order + "Hires negative prompt": None, # This is set later, insert here to keep order **p.extra_generation_params, "Version": program_version() if opts.add_version_to_infotext else None, "User": p.user if opts.add_user_name_to_info else None, } + if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None): + generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None + if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None): + generation_params['Hires negative prompt'] = all_hr_negative_prompts[index] if all_hr_negative_prompts[index] != all_negative_prompts[index] else None + generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None]) prompt_text = p.main_prompt if use_main_prompt else all_prompts[index] @@ -1194,12 +1201,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name: self.extra_generation_params["Hires sampler"] = self.hr_sampler_name - if tuple(self.hr_prompt) != tuple(self.prompt): - self.extra_generation_params["Hires prompt"] = self.hr_prompt - - if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt): - self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt - self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and self.latent_scale_mode is None: if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):