Image processing changes

Image processing changes
This commit is contained in:
InvincibleDude 2023-03-01 21:30:20 +03:00
parent c3bd113a0b
commit 51f81efb02
1 changed files with 38 additions and 38 deletions

View File

@ -528,7 +528,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)] p.all_negative_prompts = p.batch_size * p.n_iter * [shared.prompt_styles.apply_negative_styles_to_prompt(p.negative_prompt, p.styles)]
if type(p) == StableDiffusionProcessingTxt2Img: if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr and p.hr_prompt != '': if p.enable_hr and p.hr_prompt == '':
p.all_hr_prompts, p.all_hr_negative_prompts = p.all_prompts, p.all_negative_prompts
elif p.enable_hr and p.hr_prompt != '':
if type(p.prompt) == list: if type(p.prompt) == list:
p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt] p.all_hr_prompts = [shared.prompt_styles.apply_styles_to_prompt(x, p.styles) for x in p.hr_prompt]
else: else:
@ -555,14 +557,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings: if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings() model_hijack.embedding_db.load_textual_inversion_embeddings()
_, extra_network_data = extra_networks.parse_prompts(p.all_prompts[0:1])
if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr and p.hr_prompt != '':
_, hr_extra_network_data = extra_networks.parse_prompts(p.all_hr_prompts[0:1])
if p.all_hr_prompts != p.all_prompts:
extra_network_data.update(hr_extra_network_data)
if p.scripts is not None: if p.scripts is not None:
p.scripts.process(p) p.scripts.process(p)
@ -600,13 +594,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN": if shared.opts.live_previews_enable and opts.show_progress_type == "Approx NN":
sd_vae_approx.model() sd_vae_approx.model()
if not p.disable_extra_networks:
extra_networks.activate(p, extra_network_data)
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
if state.job_count == -1: if state.job_count == -1:
state.job_count = p.n_iter state.job_count = p.n_iter
@ -623,7 +610,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
if type(p) == StableDiffusionProcessingTxt2Img: if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr and p.hr_prompt != '': if p.enable_hr:
if p.hr_prompt == '':
hr_prompts, hr_negative_prompts = prompts, negative_prompts
else:
hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size] hr_prompts = p.all_hr_prompts[n * p.batch_size:(n + 1) * p.batch_size]
hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size] hr_negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
@ -633,19 +623,40 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
if len(prompts) == 0: if len(prompts) == 0:
break break
prompts, _ = extra_networks.parse_prompts(prompts) prompts, extra_network_data = extra_networks.parse_prompts(prompts)
if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr and hr_prompts != prompts:
_, hr_extra_network_data = extra_networks.parse_prompts(hr_prompts)
extra_network_data.update(hr_extra_network_data)
if not p.disable_extra_networks:
with devices.autocast():
extra_networks.activate(p, extra_network_data)
if p.scripts is not None: if p.scripts is not None:
p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds)
# params.txt should be saved after scripts.process_batch, since the
# infotext could be modified by that callback
# Example: a wildcard processed by process_batch sets an extra model
# strength, which is saved as "Model Strength: 1.0" in the infotext
if n == 0:
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
processed = Processed(p, [], p.seed, "")
file.write(processed.infotext(p, 0))
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c)
if type(p) == StableDiffusionProcessingTxt2Img: if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr and p.hr_prompt != '': if p.enable_hr:
hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, if prompts != hr_prompts:
cached_uc) hr_uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, p.steps, cached_uc)
hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, hr_c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, p.steps, cached_c)
cached_c) else:
hr_uc, hr_c = uc, c
if len(model_hijack.comments) > 0: if len(model_hijack.comments) > 0:
for comment in model_hijack.comments: for comment in model_hijack.comments:
@ -658,20 +669,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast():
if type(p) == StableDiffusionProcessingTxt2Img: if type(p) == StableDiffusionProcessingTxt2Img:
if p.enable_hr: if p.enable_hr:
if p.hr_prompt != '':
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts) samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=hr_c, hr_unconditional_conditioning=hr_uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
else: else:
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, hr_conditioning=c, samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
hr_unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds,
subseed_strength=p.subseed_strength, prompts=prompts)
else:
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds,
subseed_strength=p.subseed_strength, prompts=prompts)
else:
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds,
subseeds=subseeds,
subseed_strength=p.subseed_strength, prompts=prompts)
x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))] x_samples_ddim = [decode_first_stage(p.sd_model, samples_ddim[i:i+1].to(dtype=devices.dtype_vae))[0].cpu() for i in range(samples_ddim.size(0))]
for x in x_samples_ddim: for x in x_samples_ddim: