From f98f4f73aa4898c754681f411608df5f248619f6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 4 Jun 2023 10:56:48 +0300
Subject: [PATCH] infer styles from prompts, and an option to control the
behavior
---
modules/generation_parameters_copypaste.py | 8 +++
modules/shared.py | 13 ++++-
modules/styles.py | 67 +++++++++++++++++++++-
modules/ui.py | 2 +
4 files changed, 87 insertions(+), 3 deletions(-)
diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py
index 071bd9ead..4c420e5f1 100644
--- a/modules/generation_parameters_copypaste.py
+++ b/modules/generation_parameters_copypaste.py
@@ -265,6 +265,14 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
else:
prompt += ("" if prompt == "" else "\n") + line
+ if shared.opts.infotext_styles != "Ignore":
+ found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
+
+ if shared.opts.infotext_styles == "Apply":
+ res["Styles array"] = found_styles
+ elif shared.opts.infotext_styles == "Apply if any" and found_styles:
+ res["Styles array"] = found_styles
+
res["Prompt"] = prompt
res["Negative prompt"] = negative_prompt
diff --git a/modules/shared.py b/modules/shared.py
index 7025a7547..53e3d5daa 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -260,6 +260,10 @@ class OptionInfo:
self.comment_after += f"({info})"
return self
+ def html(self, html):
+ self.comment_after += html
+ return self
+
def needs_restart(self):
self.comment_after += " (requires restart)"
return self
@@ -488,7 +492,14 @@ options_templates.update(options_section(('infotext', "Infotext"), {
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
"add_version_to_infotext": OptionInfo(True, "Add program version to generation information"),
- "disable_weights_auto_swap": OptionInfo(True, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
+ "disable_weights_auto_swap": OptionInfo(True, "Disregard checkpoint information from pasted infotext").info("when reading generation parameters from text into UI"),
+ "infotext_styles": OptionInfo("Apply if any", "Infer styles from prompts of pasted infotext", gr.Radio, {"choices": ["Ignore", "Apply", "Discard", "Apply if any"]}).info("when reading generation parameters from text into UI)").html("""
+- Ignore: keep prompt and styles dropdown as it is.
+- Apply: remove style text from prompt, always replace styles dropdown value with found styles (even if none are found).
+- Discard: remove style text from prompt, keep styles dropdown as it is.
+- Apply if any: remove style text from prompt; if any styles are found in prompt, put them into styles dropdown, otherwise keep it as it is.
+
"""),
+
}))
options_templates.update(options_section(('ui', "Live previews"), {
diff --git a/modules/styles.py b/modules/styles.py
index 34e1b5e15..ec0e1bc51 100644
--- a/modules/styles.py
+++ b/modules/styles.py
@@ -1,6 +1,7 @@
import csv
import os
import os.path
+import re
import typing
import shutil
@@ -28,6 +29,44 @@ def apply_styles_to_prompt(prompt, styles):
return prompt
+re_spaces = re.compile(" +")
+
+
+def extract_style_text_from_prompt(style_text, prompt):
+ stripped_prompt = re.sub(re_spaces, " ", prompt.strip())
+ stripped_style_text = re.sub(re_spaces, " ", style_text.strip())
+ if "{prompt}" in stripped_style_text:
+ left, right = stripped_style_text.split("{prompt}", 2)
+ if stripped_prompt.startswith(left) and stripped_prompt.endswith(right):
+ prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)]
+ return True, prompt
+ else:
+ if stripped_prompt.endswith(stripped_style_text):
+ prompt = stripped_prompt[:len(stripped_prompt)-len(stripped_style_text)]
+
+ if prompt.endswith(', '):
+ prompt = prompt[:-2]
+
+ return True, prompt
+
+ return False, prompt
+
+
+def extract_style_from_prompts(style: PromptStyle, prompt, negative_prompt):
+ if not style.prompt and not style.negative_prompt:
+ return False, prompt, negative_prompt
+
+ match_positive, extracted_positive = extract_style_text_from_prompt(style.prompt, prompt)
+ if not match_positive:
+ return False, prompt, negative_prompt
+
+ match_negative, extracted_negative = extract_style_text_from_prompt(style.negative_prompt, negative_prompt)
+ if not match_negative:
+ return False, prompt, negative_prompt
+
+ return True, extracted_positive, extracted_negative
+
+
class StyleDatabase:
def __init__(self, path: str):
self.no_style = PromptStyle("None", "", "")
@@ -67,10 +106,34 @@ class StyleDatabase:
if os.path.exists(path):
shutil.copy(path, f"{path}.bak")
- fd = os.open(path, os.O_RDWR|os.O_CREAT)
+ fd = os.open(path, os.O_RDWR | os.O_CREAT)
with os.fdopen(fd, "w", encoding="utf-8-sig", newline='') as file:
# _fields is actually part of the public API: typing.NamedTuple is a replacement for collections.NamedTuple,
# and collections.NamedTuple has explicit documentation for accessing _fields. Same goes for _asdict()
writer = csv.DictWriter(file, fieldnames=PromptStyle._fields)
writer.writeheader()
- writer.writerows(style._asdict() for k, style in self.styles.items())
+ writer.writerows(style._asdict() for k, style in self.styles.items())
+
+ def extract_styles_from_prompt(self, prompt, negative_prompt):
+ extracted = []
+
+ applicable_styles = list(self.styles.values())
+
+ while True:
+ found_style = None
+
+ for style in applicable_styles:
+ is_match, new_prompt, new_neg_prompt = extract_style_from_prompts(style, prompt, negative_prompt)
+ if is_match:
+ found_style = style
+ prompt = new_prompt
+ negative_prompt = new_neg_prompt
+ break
+
+ if not found_style:
+ break
+
+ applicable_styles.remove(found_style)
+ extracted.append(found_style.name)
+
+ return list(reversed(extracted)), prompt, negative_prompt
diff --git a/modules/ui.py b/modules/ui.py
index 988b2003c..7ae33ab12 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -621,6 +621,7 @@ def create_ui():
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
+ (txt2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
(denoising_strength, "Denoising strength"),
(enable_hr, lambda d: "Denoising strength" in d),
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
@@ -1036,6 +1037,7 @@ def create_ui():
(subseed_strength, "Variation seed strength"),
(seed_resize_from_w, "Seed resize from-1"),
(seed_resize_from_h, "Seed resize from-2"),
+ (img2img_prompt_styles, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
(denoising_strength, "Denoising strength"),
(mask_blur, "Mask blur"),
*modules.scripts.scripts_img2img.infotext_fields