Use closing() with processing classes everywhere

Follows up on #11569
This commit is contained in:
Aarni Koskela 2023-07-10 20:08:23 +03:00
parent bcb6ad5fab
commit 44c27ebc73
4 changed files with 24 additions and 19 deletions

View File

@ -3,6 +3,7 @@ import glob
import html
import os
import inspect
from contextlib import closing
import modules.textual_inversion.dataset
import torch
@ -711,8 +712,9 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
with closing(p):
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.cond_stage_model.to(devices.cpu)

View File

@ -1,4 +1,5 @@
import os
from contextlib import closing
from pathlib import Path
import numpy as np
@ -217,18 +218,17 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if mask:
p.extra_generation_params["Mask blur"] = mask_blur
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
with closing(p):
if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
processed = process_images(p)
p.close()
processed = Processed(p, [], p.seed, "")
else:
processed = modules.scripts.scripts_img2img.run(p, *args)
if processed is None:
processed = process_images(p)
shared.total_tqdm.clear()

View File

@ -1,5 +1,6 @@
import os
from collections import namedtuple
from contextlib import closing
import torch
import tqdm
@ -584,8 +585,9 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
preview_text = p.prompt
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
with closing(p):
processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None
if unload:
shared.sd_model.first_stage_model.to(devices.cpu)

View File

@ -1,3 +1,5 @@
from contextlib import closing
import modules.scripts
from modules import sd_samplers, processing
from modules.generation_parameters_copypaste import create_override_settings_dict
@ -53,12 +55,11 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
if cmd_opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
processed = modules.scripts.scripts_txt2img.run(p, *args)
with closing(p):
processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None:
processed = processing.process_images(p)
p.close()
if processed is None:
processed = processing.process_images(p)
shared.total_tqdm.clear()