Use closing() with processing classes everywhere

Follows up on #11569
This commit is contained in:
Aarni Koskela 2023-07-10 20:08:23 +03:00
parent bcb6ad5fab
commit 44c27ebc73
4 changed files with 24 additions and 19 deletions

View File

@ -3,6 +3,7 @@ import glob
import html import html
import os import os
import inspect import inspect
from contextlib import closing
import modules.textual_inversion.dataset import modules.textual_inversion.dataset
import torch import torch
@ -711,6 +712,7 @@ def train_hypernetwork(id_task, hypernetwork_name, learn_rate, batch_size, gradi
preview_text = p.prompt preview_text = p.prompt
with closing(p):
processed = processing.process_images(p) processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None image = processed.images[0] if len(processed.images) > 0 else None

View File

@ -1,4 +1,5 @@
import os import os
from contextlib import closing
from pathlib import Path from pathlib import Path
import numpy as np import numpy as np
@ -217,6 +218,7 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if mask: if mask:
p.extra_generation_params["Mask blur"] = mask_blur p.extra_generation_params["Mask blur"] = mask_blur
with closing(p):
if is_batch: if is_batch:
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled" assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
@ -228,8 +230,6 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s
if processed is None: if processed is None:
processed = process_images(p) processed = process_images(p)
p.close()
shared.total_tqdm.clear() shared.total_tqdm.clear()
generation_info_js = processed.js() generation_info_js = processed.js()

View File

@ -1,5 +1,6 @@
import os import os
from collections import namedtuple from collections import namedtuple
from contextlib import closing
import torch import torch
import tqdm import tqdm
@ -584,6 +585,7 @@ def train_embedding(id_task, embedding_name, learn_rate, batch_size, gradient_st
preview_text = p.prompt preview_text = p.prompt
with closing(p):
processed = processing.process_images(p) processed = processing.process_images(p)
image = processed.images[0] if len(processed.images) > 0 else None image = processed.images[0] if len(processed.images) > 0 else None

View File

@ -1,3 +1,5 @@
from contextlib import closing
import modules.scripts import modules.scripts
from modules import sd_samplers, processing from modules import sd_samplers, processing
from modules.generation_parameters_copypaste import create_override_settings_dict from modules.generation_parameters_copypaste import create_override_settings_dict
@ -53,13 +55,12 @@ def txt2img(id_task: str, prompt: str, negative_prompt: str, prompt_styles, step
if cmd_opts.enable_console_prompts: if cmd_opts.enable_console_prompts:
print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
with closing(p):
processed = modules.scripts.scripts_txt2img.run(p, *args) processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None: if processed is None:
processed = processing.process_images(p) processed = processing.process_images(p)
p.close()
shared.total_tqdm.clear() shared.total_tqdm.clear()
generation_info_js = processed.js() generation_info_js = processed.js()