Make the upscale button update the gallery with the new image rather than replace it.
This commit is contained in:
parent
a06dab8d7a
commit
233c66b36e
|
@ -732,7 +732,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
||||||
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
|
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
|
||||||
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
|
||||||
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
|
||||||
"Denoising strength": getattr(p, 'denoising_strength', None),
|
"Denoising strength": p.extra_generation_params.get("Denoising strength"),
|
||||||
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
|
"Conditional mask weight": getattr(p, "inpainting_mask_weight", shared.opts.inpainting_mask_weight) if p.is_using_inpainting_conditioning else None,
|
||||||
"Clip skip": None if clip_skip <= 1 else clip_skip,
|
"Clip skip": None if clip_skip <= 1 else clip_skip,
|
||||||
"ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
|
"ENSD": opts.eta_noise_seed_delta if uses_ensd else None,
|
||||||
|
@ -1198,6 +1198,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||||
|
|
||||||
def init(self, all_prompts, all_seeds, all_subseeds):
|
def init(self, all_prompts, all_seeds, all_subseeds):
|
||||||
if self.enable_hr:
|
if self.enable_hr:
|
||||||
|
self.extra_generation_params["Denoising strength"] = self.denoising_strength
|
||||||
|
|
||||||
if self.hr_checkpoint_name and self.hr_checkpoint_name != 'Use same checkpoint':
|
if self.hr_checkpoint_name and self.hr_checkpoint_name != 'Use same checkpoint':
|
||||||
self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
|
self.hr_checkpoint_info = sd_models.get_closet_checkpoint_match(self.hr_checkpoint_name)
|
||||||
|
|
||||||
|
@ -1516,6 +1518,8 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
||||||
self.mask_blur_y = value
|
self.mask_blur_y = value
|
||||||
|
|
||||||
def init(self, all_prompts, all_seeds, all_subseeds):
|
def init(self, all_prompts, all_seeds, all_subseeds):
|
||||||
|
self.extra_generation_params["Denoising strength"] = self.denoising_strength
|
||||||
|
|
||||||
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
|
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
|
||||||
|
|
||||||
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
|
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
|
||||||
|
|
|
@ -7,36 +7,15 @@ from modules.infotext_utils import create_override_settings_dict
|
||||||
from modules.shared import opts
|
from modules.shared import opts
|
||||||
import modules.shared as shared
|
import modules.shared as shared
|
||||||
from modules.ui import plaintext_to_html
|
from modules.ui import plaintext_to_html
|
||||||
|
from PIL import Image
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
|
|
||||||
def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
|
def txt2img_create_processing(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, force_enable_hr=False):
|
||||||
assert len(gallery) > 0, 'No image to upscale'
|
|
||||||
assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
|
|
||||||
|
|
||||||
geninfo = json.loads(generation_info)
|
|
||||||
all_seeds = geninfo["all_seeds"]
|
|
||||||
|
|
||||||
image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
|
|
||||||
image = infotext_utils.image_from_url_text(image_info)
|
|
||||||
|
|
||||||
gallery_index_from_end = len(gallery) - gallery_index
|
|
||||||
image.seed = all_seeds[-gallery_index_from_end if gallery_index_from_end < len(all_seeds) + 1 else 0]
|
|
||||||
|
|
||||||
return txt2img(id_task, request, *args, firstpass_image=image)
|
|
||||||
|
|
||||||
|
|
||||||
def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str, prompt_styles, steps: int, sampler_name: str, n_iter: int, batch_size: int, cfg_scale: float, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, hr_checkpoint_name: str, hr_sampler_name: str, hr_prompt: str, hr_negative_prompt, override_settings_texts, *args, firstpass_image=None):
|
|
||||||
override_settings = create_override_settings_dict(override_settings_texts)
|
override_settings = create_override_settings_dict(override_settings_texts)
|
||||||
|
|
||||||
if firstpass_image is not None:
|
if force_enable_hr:
|
||||||
seed = getattr(firstpass_image, 'seed', None)
|
|
||||||
if seed:
|
|
||||||
args = modules.scripts.scripts_txt2img.set_named_arg(args, 'ScriptSeed', 'seed', seed)
|
|
||||||
|
|
||||||
enable_hr = True
|
enable_hr = True
|
||||||
batch_size = 1
|
|
||||||
n_iter = 1
|
|
||||||
|
|
||||||
p = processing.StableDiffusionProcessingTxt2Img(
|
p = processing.StableDiffusionProcessingTxt2Img(
|
||||||
sd_model=shared.sd_model,
|
sd_model=shared.sd_model,
|
||||||
|
@ -53,7 +32,7 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
|
||||||
width=width,
|
width=width,
|
||||||
height=height,
|
height=height,
|
||||||
enable_hr=enable_hr,
|
enable_hr=enable_hr,
|
||||||
denoising_strength=denoising_strength if enable_hr else None,
|
denoising_strength=denoising_strength,
|
||||||
hr_scale=hr_scale,
|
hr_scale=hr_scale,
|
||||||
hr_upscaler=hr_upscaler,
|
hr_upscaler=hr_upscaler,
|
||||||
hr_second_pass_steps=hr_second_pass_steps,
|
hr_second_pass_steps=hr_second_pass_steps,
|
||||||
|
@ -64,7 +43,6 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
|
||||||
hr_prompt=hr_prompt,
|
hr_prompt=hr_prompt,
|
||||||
hr_negative_prompt=hr_negative_prompt,
|
hr_negative_prompt=hr_negative_prompt,
|
||||||
override_settings=override_settings,
|
override_settings=override_settings,
|
||||||
firstpass_image=firstpass_image,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
p.scripts = modules.scripts.scripts_txt2img
|
p.scripts = modules.scripts.scripts_txt2img
|
||||||
|
@ -75,8 +53,61 @@ def txt2img(id_task: str, request: gr.Request, prompt: str, negative_prompt: str
|
||||||
if shared.opts.enable_console_prompts:
|
if shared.opts.enable_console_prompts:
|
||||||
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
|
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
|
||||||
|
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, generation_info, *args):
|
||||||
|
assert len(gallery) > 0, 'No image to upscale'
|
||||||
|
assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}'
|
||||||
|
|
||||||
|
p = txt2img_create_processing(id_task, request, *args)
|
||||||
|
p.enable_hr = True
|
||||||
|
p.batch_size = 1
|
||||||
|
p.n_iter = 1
|
||||||
|
|
||||||
|
geninfo = json.loads(generation_info)
|
||||||
|
all_seeds = geninfo["all_seeds"]
|
||||||
|
|
||||||
|
image_info = gallery[gallery_index] if 0 <= gallery_index < len(gallery) else gallery[0]
|
||||||
|
p.firstpass_image = infotext_utils.image_from_url_text(image_info)
|
||||||
|
|
||||||
|
gallery_index_from_end = len(gallery) - gallery_index
|
||||||
|
seed = all_seeds[-gallery_index_from_end if gallery_index_from_end < len(all_seeds) + 1 else 0]
|
||||||
|
p.script_args = modules.scripts.scripts_txt2img.set_named_arg(p.script_args, 'ScriptSeed', 'seed', seed)
|
||||||
|
|
||||||
with closing(p):
|
with closing(p):
|
||||||
processed = modules.scripts.scripts_txt2img.run(p, *args)
|
processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
|
||||||
|
|
||||||
|
if processed is None:
|
||||||
|
processed = processing.process_images(p)
|
||||||
|
|
||||||
|
shared.total_tqdm.clear()
|
||||||
|
|
||||||
|
new_gallery = []
|
||||||
|
for i, image in enumerate(gallery):
|
||||||
|
fake_image = Image.new(mode="RGB", size=(1, 1))
|
||||||
|
|
||||||
|
if i == gallery_index:
|
||||||
|
already_saved_as = getattr(processed.images[0], 'already_saved_as', None)
|
||||||
|
if already_saved_as is not None:
|
||||||
|
fake_image.already_saved_as = already_saved_as
|
||||||
|
else:
|
||||||
|
fake_image = processed.images[0]
|
||||||
|
else:
|
||||||
|
fake_image.already_saved_as = image["name"]
|
||||||
|
|
||||||
|
new_gallery.append(fake_image)
|
||||||
|
|
||||||
|
geninfo["infotexts"][gallery_index] = processed.info
|
||||||
|
|
||||||
|
return new_gallery, json.dumps(geninfo), plaintext_to_html(processed.info), plaintext_to_html(processed.comments, classname="comments")
|
||||||
|
|
||||||
|
|
||||||
|
def txt2img(id_task: str, request: gr.Request, *args):
|
||||||
|
p = txt2img_create_processing(id_task, request, *args)
|
||||||
|
|
||||||
|
with closing(p):
|
||||||
|
processed = modules.scripts.scripts_txt2img.run(p, *p.script_args)
|
||||||
|
|
||||||
if processed is None:
|
if processed is None:
|
||||||
processed = processing.process_images(p)
|
processed = processing.process_images(p)
|
||||||
|
|
Loading…
Reference in New Issue