A big rework, just what you were secretly hoping for!
SD upscale moved to scripts Batch processing script removed Batch processing added to main img2img and now works with scripts img2img page UI reworked to use tabs
This commit is contained in:
parent
e235d4e691
commit
91bfc71261
|
@ -35,13 +35,36 @@ function extract_image_from_gallery_extras(gallery){
|
|||
return extract_image_from_gallery(gallery);
|
||||
}
|
||||
|
||||
function submit(){
|
||||
// this calls a function from progressbar.js
|
||||
requestProgress()
|
||||
function get_tab_index(tabId){
|
||||
var res = 0
|
||||
|
||||
gradioApp().getElementById(tabId).querySelector('div').querySelectorAll('button').forEach(function(button, i){
|
||||
if(button.className.indexOf('bg-white') != -1)
|
||||
res = i
|
||||
})
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
function create_tab_index_args(tabId, args){
|
||||
var res = []
|
||||
for(var i=0; i<args.length; i++){
|
||||
res.push(args[i])
|
||||
}
|
||||
|
||||
res[0] = get_tab_index(tabId)
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
function get_extras_tab_index(){
|
||||
return create_tab_index_args('mode_extras', arguments)
|
||||
}
|
||||
|
||||
function create_submit_args(args){
|
||||
res = []
|
||||
for(var i=0;i<arguments.length;i++){
|
||||
res.push(arguments[i])
|
||||
for(var i=0;i<args.length;i++){
|
||||
res.push(args[i])
|
||||
}
|
||||
|
||||
// As it is currently, txt2img and img2img send back the previous output args (txt2img_gallery, generation_info, html_info) whenever you generate a new image.
|
||||
|
@ -55,11 +78,30 @@ function submit(){
|
|||
return res
|
||||
}
|
||||
|
||||
function submit(){
|
||||
requestProgress()
|
||||
|
||||
return create_submit_args(arguments)
|
||||
}
|
||||
|
||||
function submit_img2img(){
|
||||
requestProgress()
|
||||
|
||||
res = create_submit_args(arguments)
|
||||
|
||||
res[0] = get_tab_index('mode_img2img')
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
|
||||
function ask_for_style_name(_, prompt_text, negative_prompt_text) {
|
||||
name_ = prompt('Style name:')
|
||||
return name_ === null ? [null, null, null]: [name_, prompt_text, negative_prompt_text]
|
||||
}
|
||||
|
||||
|
||||
|
||||
opts = {}
|
||||
function apply_settings(jsdata){
|
||||
console.log(jsdata)
|
||||
|
|
|
@ -15,30 +15,22 @@ import piexif.helper
|
|||
cached_images = {}
|
||||
|
||||
|
||||
def run_extras(image, image_folder, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
||||
def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility):
|
||||
devices.torch_gc()
|
||||
|
||||
imageArr = []
|
||||
# Also keep track of original file names
|
||||
imageNameArr = []
|
||||
|
||||
if image_folder is not None:
|
||||
if image is not None:
|
||||
print("Batch detected and single image detected, please only use one of the two. Aborting.")
|
||||
return None
|
||||
if extras_mode == 1:
|
||||
#convert file to pillow image
|
||||
for img in image_folder:
|
||||
image = Image.fromarray(np.array(Image.open(img)))
|
||||
imageArr.append(image)
|
||||
imageNameArr.append(os.path.splitext(img.orig_name)[0])
|
||||
|
||||
elif image is not None:
|
||||
if image_folder is not None:
|
||||
print("Batch detected and single image detected, please only use one of the two. Aborting.")
|
||||
return None
|
||||
else:
|
||||
imageArr.append(image)
|
||||
imageNameArr.append(None)
|
||||
else:
|
||||
imageArr.append(image)
|
||||
imageNameArr.append(None)
|
||||
|
||||
outpath = opts.outdir_samples or opts.outdir_extras_samples
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
import math
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageOps, ImageChops
|
||||
|
||||
|
@ -11,9 +15,45 @@ from modules.ui import plaintext_to_html
|
|||
import modules.images as images
|
||||
import modules.scripts
|
||||
|
||||
def img2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_mask, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, mode: int, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, upscaler_index: str, upscale_overlap: int, inpaint_full_res: bool, inpainting_mask_invert: int, *args):
|
||||
|
||||
def process_batch(p, input_dir, output_dir, args):
|
||||
processing.fix_seed(p)
|
||||
|
||||
images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
|
||||
|
||||
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
|
||||
|
||||
p.do_not_save_grid = True
|
||||
p.do_not_save_samples = True
|
||||
|
||||
state.job_count = len(images) * p.n_iter
|
||||
|
||||
for i, image in enumerate(images):
|
||||
state.job = f"{i+1} out of {len(images)}"
|
||||
|
||||
if state.interrupted:
|
||||
break
|
||||
|
||||
img = Image.open(image)
|
||||
p.init_images = [img] * p.batch_size
|
||||
|
||||
proc = modules.scripts.scripts_img2img.run(p, *args)
|
||||
if proc is None:
|
||||
proc = process_images(p)
|
||||
|
||||
for n, processed_image in enumerate(proc.images):
|
||||
filename = os.path.basename(image)
|
||||
|
||||
if n > 0:
|
||||
left, right = os.path.splitext(filename)
|
||||
filename = f"{left}-{n}{right}"
|
||||
|
||||
processed_image.save(os.path.join(output_dir, filename))
|
||||
|
||||
|
||||
def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
|
||||
is_inpaint = mode == 1
|
||||
is_upscale = mode == 2
|
||||
is_batch = mode == 2
|
||||
|
||||
if is_inpaint:
|
||||
if mask_mode == 0:
|
||||
|
@ -23,8 +63,8 @@ def img2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
|
|||
mask = ImageChops.lighter(alpha_mask, mask.convert('L')).convert('L')
|
||||
image = image.convert('RGB')
|
||||
else:
|
||||
image = init_img
|
||||
mask = init_mask
|
||||
image = init_img_inpaint
|
||||
mask = init_mask_inpaint
|
||||
else:
|
||||
image = init_img
|
||||
mask = None
|
||||
|
@ -60,79 +100,19 @@ def img2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
|
|||
resize_mode=resize_mode,
|
||||
denoising_strength=denoising_strength,
|
||||
inpaint_full_res=inpaint_full_res,
|
||||
inpaint_full_res_padding=inpaint_full_res_padding,
|
||||
inpainting_mask_invert=inpainting_mask_invert,
|
||||
)
|
||||
print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
|
||||
|
||||
p.extra_generation_params["Mask blur"] = mask_blur
|
||||
|
||||
if is_upscale:
|
||||
initial_info = None
|
||||
|
||||
processing.fix_seed(p)
|
||||
seed = p.seed
|
||||
|
||||
upscaler = shared.sd_upscalers[upscaler_index]
|
||||
img = upscaler.upscale(init_img, init_img.width * 2, init_img.height * 2)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
grid = images.split_grid(img, tile_w=width, tile_h=height, overlap=upscale_overlap)
|
||||
|
||||
batch_size = p.batch_size
|
||||
upscale_count = p.n_iter
|
||||
p.n_iter = 1
|
||||
p.do_not_save_grid = True
|
||||
p.do_not_save_samples = True
|
||||
|
||||
work = []
|
||||
|
||||
for y, h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
work.append(tiledata[2])
|
||||
|
||||
batch_count = math.ceil(len(work) / batch_size)
|
||||
state.job_count = batch_count * upscale_count
|
||||
|
||||
print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.")
|
||||
|
||||
result_images = []
|
||||
for n in range(upscale_count):
|
||||
start_seed = seed + n
|
||||
p.seed = start_seed
|
||||
|
||||
work_results = []
|
||||
for i in range(batch_count):
|
||||
p.batch_size = batch_size
|
||||
p.init_images = work[i*batch_size:(i+1)*batch_size]
|
||||
|
||||
state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
|
||||
processed = process_images(p)
|
||||
|
||||
if initial_info is None:
|
||||
initial_info = processed.info
|
||||
|
||||
p.seed = processed.seed + 1
|
||||
work_results += processed.images
|
||||
|
||||
image_index = 0
|
||||
for y, h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
|
||||
image_index += 1
|
||||
|
||||
combined_image = images.combine_grid(grid)
|
||||
result_images.append(combined_image)
|
||||
|
||||
if opts.samples_save:
|
||||
images.save_image(combined_image, p.outpath_samples, "", start_seed, prompt, opts.samples_format, info=initial_info, p=p)
|
||||
|
||||
processed = Processed(p, result_images, seed, initial_info)
|
||||
if is_batch:
|
||||
process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, args)
|
||||
|
||||
processed = Processed(p, [], p.seed, "")
|
||||
else:
|
||||
|
||||
processed = modules.scripts.scripts_img2img.run(p, *args)
|
||||
|
||||
if processed is None:
|
||||
processed = process_images(p)
|
||||
|
||||
|
|
|
@ -491,7 +491,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||
class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
||||
sampler = None
|
||||
|
||||
def __init__(self, init_images=None, resize_mode=0, denoising_strength=0.75, mask=None, mask_blur=4, inpainting_fill=0, inpaint_full_res=True, inpainting_mask_invert=0, **kwargs):
|
||||
def __init__(self, init_images=None, resize_mode=0, denoising_strength=0.75, mask=None, mask_blur=4, inpainting_fill=0, inpaint_full_res=True, inpaint_full_res_padding=0, inpainting_mask_invert=0, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.init_images = init_images
|
||||
|
@ -505,6 +505,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
|||
self.mask_blur = mask_blur
|
||||
self.inpainting_fill = inpainting_fill
|
||||
self.inpaint_full_res = inpaint_full_res
|
||||
self.inpaint_full_res_padding = inpaint_full_res_padding
|
||||
self.inpainting_mask_invert = inpainting_mask_invert
|
||||
self.mask = None
|
||||
self.nmask = None
|
||||
|
@ -527,7 +528,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
|||
if self.inpaint_full_res:
|
||||
self.mask_for_overlay = self.image_mask
|
||||
mask = self.image_mask.convert('L')
|
||||
crop_region = masking.get_crop_region(np.array(mask), opts.upscale_at_full_resolution_padding)
|
||||
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
|
||||
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
|
||||
x1, y1, x2, y2 = crop_region
|
||||
|
||||
|
|
|
@ -143,7 +143,6 @@ class ScriptRunner:
|
|||
|
||||
return inputs
|
||||
|
||||
|
||||
def run(self, p: StableDiffusionProcessing, *args):
|
||||
script_index = args[0]
|
||||
|
||||
|
|
|
@ -147,14 +147,13 @@ class Options:
|
|||
"save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."),
|
||||
"ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}),
|
||||
"ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
|
||||
"realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"],"Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
|
||||
"realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"], "Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}),
|
||||
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
|
||||
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
|
||||
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
|
||||
"ldsr_pre_down":OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
|
||||
"ldsr_post_down":OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}),
|
||||
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
|
||||
"upscale_at_full_resolution_padding": OptionInfo(16, "Inpainting at full resolution: padding, in pixels, for the masked region.", gr.Slider, {"minimum": 0, "maximum": 128, "step": 4}),
|
||||
"upscaler_for_hires_fix": OptionInfo(None, "Upscaler for highres. fix", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}),
|
||||
"show_progressbar": OptionInfo(True, "Show progressbar"),
|
||||
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
|
||||
|
|
136
modules/ui.py
136
modules/ui.py
|
@ -527,36 +527,47 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
progressbar = gr.HTML(elem_id="progressbar")
|
||||
img2img_preview = gr.Image(elem_id='img2img_preview', visible=False)
|
||||
setup_progressbar(progressbar, img2img_preview)
|
||||
|
||||
|
||||
with gr.Row().style(equal_height=False):
|
||||
with gr.Column(variant='panel'):
|
||||
with gr.Group():
|
||||
switch_mode = gr.Radio(label='Mode', elem_id="img2img_mode", choices=['Redraw whole image', 'Inpaint a part of image', 'SD upscale'], value='Redraw whole image', type="index", show_label=False)
|
||||
init_img = gr.Image(label="Image for img2img", source="upload", interactive=True, type="pil")
|
||||
init_img_with_mask = gr.Image(label="Image for inpainting with mask", elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", visible=False, image_mode="RGBA")
|
||||
init_mask = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
|
||||
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to another img2img mode above and back</small>", visible=False)
|
||||
|
||||
with gr.Row():
|
||||
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
|
||||
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
|
||||
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
|
||||
with gr.TabItem('img2img'):
|
||||
init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
|
||||
|
||||
with gr.TabItem('Inpaint'):
|
||||
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
|
||||
init_img_with_mask_comment = gr.HTML(elem_id="mask_bug_info", value="<small>if the editor shows ERROR, switch to another tab and back, then to \"Upload mask\" mode above and back</small>")
|
||||
|
||||
init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False)
|
||||
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False)
|
||||
|
||||
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4)
|
||||
|
||||
with gr.Row():
|
||||
mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask")
|
||||
inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index")
|
||||
|
||||
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index")
|
||||
|
||||
with gr.Row():
|
||||
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False)
|
||||
inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32)
|
||||
|
||||
with gr.TabItem('Batch img2img'):
|
||||
gr.HTML("<p class=\"text-gray-500\">Process images in a directory on the same machine where the server is running.</p>")
|
||||
img2img_batch_input_dir = gr.Textbox(label="Input directory")
|
||||
img2img_batch_output_dir = gr.Textbox(label="Output directory")
|
||||
|
||||
with gr.Row():
|
||||
resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill"], type="index", value="Just resize")
|
||||
|
||||
steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20)
|
||||
sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index")
|
||||
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, visible=False)
|
||||
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", visible=False)
|
||||
|
||||
with gr.Row():
|
||||
inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, visible=False)
|
||||
inpainting_mask_invert = gr.Radio(label='Masking mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", visible=False)
|
||||
|
||||
with gr.Row():
|
||||
restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1)
|
||||
tiling = gr.Checkbox(label='Tiling', value=False)
|
||||
sd_upscale_overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
|
||||
|
||||
with gr.Row():
|
||||
sd_upscale_upscaler_name = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
|
||||
|
||||
with gr.Row():
|
||||
batch_count = gr.Slider(minimum=1, maximum=cmd_opts.max_batch_count, step=1, label='Batch count', value=1)
|
||||
|
@ -589,7 +600,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
img2img_send_to_extras = gr.Button('Send to extras')
|
||||
img2img_save_style = gr.Button('Save prompt as style')
|
||||
|
||||
|
||||
with gr.Group():
|
||||
html_info = gr.HTML()
|
||||
generation_info = gr.Textbox(visible=False)
|
||||
|
@ -597,70 +607,36 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
|
||||
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
|
||||
|
||||
def apply_mode(mode, uploadmask):
|
||||
is_classic = mode == 0
|
||||
is_inpaint = mode == 1
|
||||
is_upscale = mode == 2
|
||||
|
||||
return {
|
||||
init_img: gr_show(not is_inpaint or (is_inpaint and uploadmask == 1)),
|
||||
init_img_with_mask: gr_show(is_inpaint and uploadmask == 0),
|
||||
init_img_with_mask_comment: gr_show(is_inpaint and uploadmask == 0),
|
||||
init_mask: gr_show(is_inpaint and uploadmask == 1),
|
||||
mask_mode: gr_show(is_inpaint),
|
||||
mask_blur: gr_show(is_inpaint),
|
||||
inpainting_fill: gr_show(is_inpaint),
|
||||
sd_upscale_upscaler_name: gr_show(is_upscale),
|
||||
sd_upscale_overlap: gr_show(is_upscale),
|
||||
inpaint_full_res: gr_show(is_inpaint),
|
||||
inpainting_mask_invert: gr_show(is_inpaint),
|
||||
img2img_interrogate: gr_show(not is_inpaint),
|
||||
}
|
||||
|
||||
switch_mode.change(
|
||||
apply_mode,
|
||||
inputs=[switch_mode, mask_mode],
|
||||
mask_mode.change(
|
||||
lambda mode, img: {
|
||||
#init_img_with_mask: gr.Image.update(visible=mode == 0, value=img["image"]),
|
||||
init_img_with_mask: gr_show(mode == 0),
|
||||
init_img_with_mask_comment: gr_show(mode == 0),
|
||||
init_img_inpaint: gr_show(mode == 1),
|
||||
init_mask_inpaint: gr_show(mode == 1),
|
||||
},
|
||||
inputs=[mask_mode, init_img_with_mask],
|
||||
outputs=[
|
||||
init_img,
|
||||
init_img_with_mask,
|
||||
init_img_with_mask_comment,
|
||||
init_mask,
|
||||
mask_mode,
|
||||
mask_blur,
|
||||
inpainting_fill,
|
||||
sd_upscale_upscaler_name,
|
||||
sd_upscale_overlap,
|
||||
inpaint_full_res,
|
||||
inpainting_mask_invert,
|
||||
img2img_interrogate,
|
||||
]
|
||||
)
|
||||
|
||||
mask_mode.change(
|
||||
lambda mode: {
|
||||
init_img: gr_show(mode == 1),
|
||||
init_img_with_mask: gr_show(mode == 0),
|
||||
init_mask: gr_show(mode == 1),
|
||||
},
|
||||
inputs=[mask_mode],
|
||||
outputs=[
|
||||
init_img,
|
||||
init_img_with_mask,
|
||||
init_mask,
|
||||
init_img_inpaint,
|
||||
init_mask_inpaint,
|
||||
],
|
||||
)
|
||||
|
||||
img2img_args = dict(
|
||||
fn=img2img,
|
||||
_js="submit",
|
||||
_js="submit_img2img",
|
||||
inputs=[
|
||||
dummy_component,
|
||||
img2img_prompt,
|
||||
img2img_negative_prompt,
|
||||
img2img_prompt_style,
|
||||
img2img_prompt_style2,
|
||||
init_img,
|
||||
init_img_with_mask,
|
||||
init_mask,
|
||||
init_img_inpaint,
|
||||
init_mask_inpaint,
|
||||
mask_mode,
|
||||
steps,
|
||||
sampler_index,
|
||||
|
@ -668,7 +644,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
inpainting_fill,
|
||||
restore_faces,
|
||||
tiling,
|
||||
switch_mode,
|
||||
batch_count,
|
||||
batch_size,
|
||||
cfg_scale,
|
||||
|
@ -678,10 +653,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
height,
|
||||
width,
|
||||
resize_mode,
|
||||
sd_upscale_upscaler_name,
|
||||
sd_upscale_overlap,
|
||||
inpaint_full_res,
|
||||
inpaint_full_res_padding,
|
||||
inpainting_mask_invert,
|
||||
img2img_batch_input_dir,
|
||||
img2img_batch_output_dir,
|
||||
] + custom_inputs,
|
||||
outputs=[
|
||||
img2img_gallery,
|
||||
|
@ -748,7 +724,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
with gr.Blocks(analytics_enabled=False) as extras_interface:
|
||||
with gr.Row().style(equal_height=False):
|
||||
with gr.Column(variant='panel'):
|
||||
with gr.Tabs():
|
||||
with gr.Tabs(elem_id="mode_extras"):
|
||||
with gr.TabItem('Single Image'):
|
||||
image = gr.Image(label="Source", source="upload", interactive=True, type="pil")
|
||||
|
||||
|
@ -778,9 +754,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
html_info_x = gr.HTML()
|
||||
html_info = gr.HTML()
|
||||
|
||||
extras_args = dict(
|
||||
submit.click(
|
||||
fn=run_extras,
|
||||
_js="get_extras_tab_index",
|
||||
inputs=[
|
||||
dummy_component,
|
||||
image,
|
||||
image_batch,
|
||||
gfpgan_visibility,
|
||||
|
@ -798,8 +776,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
]
|
||||
)
|
||||
|
||||
submit.click(**extras_args)
|
||||
|
||||
pnginfo_interface = gr.Interface(
|
||||
wrap_gradio_call(run_pnginfo),
|
||||
inputs=[
|
||||
|
@ -929,6 +905,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo):
|
|||
outputs=[init_img_with_mask],
|
||||
)
|
||||
|
||||
tabs_img2img_mode.change(
|
||||
fn=lambda x: x,
|
||||
inputs=[init_img_with_mask],
|
||||
outputs=[init_img_with_mask],
|
||||
)
|
||||
|
||||
send_to_img2img.click(
|
||||
fn=lambda x: image_from_url_text(x),
|
||||
_js="extract_image_from_gallery_img2img",
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
import math
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import modules.scripts as scripts
|
||||
import gradio as gr
|
||||
|
||||
from modules.processing import Processed, process_images
|
||||
from PIL import Image
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
def title(self):
|
||||
return "Batch processing"
|
||||
|
||||
def show(self, is_img2img):
|
||||
return is_img2img
|
||||
|
||||
def ui(self, is_img2img):
|
||||
input_dir = gr.Textbox(label="Input directory", lines=1)
|
||||
output_dir = gr.Textbox(label="Output directory", lines=1)
|
||||
|
||||
return [input_dir, output_dir]
|
||||
|
||||
def run(self, p, input_dir, output_dir):
|
||||
images = [file for file in [os.path.join(input_dir, x) for x in os.listdir(input_dir)] if os.path.isfile(file)]
|
||||
|
||||
batch_count = math.ceil(len(images) / p.batch_size)
|
||||
print(f"Will process {len(images)} images in {batch_count} batches.")
|
||||
|
||||
p.batch_count = 1
|
||||
p.do_not_save_grid = True
|
||||
p.do_not_save_samples = True
|
||||
|
||||
state.job_count = batch_count
|
||||
|
||||
for batch_no in range(batch_count):
|
||||
batch_images = []
|
||||
for path in images[batch_no*p.batch_size:(batch_no+1)*p.batch_size]:
|
||||
try:
|
||||
img = Image.open(path)
|
||||
batch_images.append((img, path))
|
||||
except:
|
||||
print(f"Error processing {path}:", file=sys.stderr)
|
||||
print(traceback.format_exc(), file=sys.stderr)
|
||||
|
||||
if len(batch_images) == 0:
|
||||
continue
|
||||
|
||||
state.job = f"{batch_no} out of {batch_count}: {batch_images[0][1]}"
|
||||
p.init_images = [x[0] for x in batch_images]
|
||||
proc = process_images(p)
|
||||
for image, (_, path) in zip(proc.images, batch_images):
|
||||
filename = os.path.basename(path)
|
||||
image.save(os.path.join(output_dir, filename))
|
||||
|
||||
return Processed(p, [], p.seed, "")
|
|
@ -0,0 +1,93 @@
|
|||
import math
|
||||
|
||||
import modules.scripts as scripts
|
||||
import gradio as gr
|
||||
from PIL import Image
|
||||
|
||||
from modules import processing, shared, sd_samplers, images, devices
|
||||
from modules.processing import Processed
|
||||
from modules.shared import opts, cmd_opts, state
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
def title(self):
|
||||
return "SD upscale"
|
||||
|
||||
def show(self, is_img2img):
|
||||
return is_img2img
|
||||
|
||||
def ui(self, is_img2img):
|
||||
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
|
||||
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False)
|
||||
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False)
|
||||
|
||||
return [info, overlap, upscaler_index]
|
||||
|
||||
def run(self, p, _, overlap, upscaler_index):
|
||||
processing.fix_seed(p)
|
||||
upscaler = shared.sd_upscalers[upscaler_index]
|
||||
|
||||
p.extra_generation_params["SD upscale overlap"] = overlap
|
||||
p.extra_generation_params["SD upscale upscaler"] = upscaler.name
|
||||
|
||||
initial_info = None
|
||||
seed = p.seed
|
||||
|
||||
init_img = p.init_images[0]
|
||||
img = upscaler.upscale(init_img, init_img.width * 2, init_img.height * 2)
|
||||
|
||||
devices.torch_gc()
|
||||
|
||||
grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap)
|
||||
|
||||
batch_size = p.batch_size
|
||||
upscale_count = p.n_iter
|
||||
p.n_iter = 1
|
||||
p.do_not_save_grid = True
|
||||
p.do_not_save_samples = True
|
||||
|
||||
work = []
|
||||
|
||||
for y, h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
work.append(tiledata[2])
|
||||
|
||||
batch_count = math.ceil(len(work) / batch_size)
|
||||
state.job_count = batch_count * upscale_count
|
||||
|
||||
print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.")
|
||||
|
||||
result_images = []
|
||||
for n in range(upscale_count):
|
||||
start_seed = seed + n
|
||||
p.seed = start_seed
|
||||
|
||||
work_results = []
|
||||
for i in range(batch_count):
|
||||
p.batch_size = batch_size
|
||||
p.init_images = work[i*batch_size:(i+1)*batch_size]
|
||||
|
||||
state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
|
||||
processed = processing.process_images(p)
|
||||
|
||||
if initial_info is None:
|
||||
initial_info = processed.info
|
||||
|
||||
p.seed = processed.seed + 1
|
||||
work_results += processed.images
|
||||
|
||||
image_index = 0
|
||||
for y, h, row in grid.tiles:
|
||||
for tiledata in row:
|
||||
tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height))
|
||||
image_index += 1
|
||||
|
||||
combined_image = images.combine_grid(grid)
|
||||
result_images.append(combined_image)
|
||||
|
||||
if opts.samples_save:
|
||||
images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p)
|
||||
|
||||
processed = Processed(p, result_images, seed, initial_info)
|
||||
|
||||
return processed
|
|
@ -97,6 +97,11 @@
|
|||
background: transparent;
|
||||
}
|
||||
|
||||
.my-4{
|
||||
margin-top: 0;
|
||||
margin-bottom: 0;
|
||||
}
|
||||
|
||||
#toprow div{
|
||||
border: none;
|
||||
gap: 0;
|
||||
|
@ -198,7 +203,8 @@ input[type="range"]{
|
|||
#mask_bug_info {
|
||||
text-align: center;
|
||||
display: block;
|
||||
margin-bottom: 0.5em;
|
||||
margin-top: -0.75em;
|
||||
margin-bottom: -0.75em;
|
||||
}
|
||||
|
||||
#txt2img_negative_prompt, #img2img_negative_prompt{
|
||||
|
|
Loading…
Reference in New Issue