Add auto-sized cropping UI
This commit is contained in:
parent
c361b89026
commit
4688bfff55
|
@ -12,7 +12,7 @@ from modules.shared import opts, cmd_opts
|
||||||
from modules.textual_inversion import autocrop
|
from modules.textual_inversion import autocrop
|
||||||
|
|
||||||
|
|
||||||
def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
def preprocess(id_task, process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
|
||||||
try:
|
try:
|
||||||
if process_caption:
|
if process_caption:
|
||||||
shared.interrogator.load()
|
shared.interrogator.load()
|
||||||
|
@ -20,7 +20,7 @@ def preprocess(id_task, process_src, process_dst, process_width, process_height,
|
||||||
if process_caption_deepbooru:
|
if process_caption_deepbooru:
|
||||||
deepbooru.model.start()
|
deepbooru.model.start()
|
||||||
|
|
||||||
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
|
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug, process_multicrop, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
|
|
||||||
|
@ -109,8 +109,32 @@ def split_pic(image, inverse_xy, width, height, overlap_ratio):
|
||||||
splitted = image.crop((0, y, to_w, y + to_h))
|
splitted = image.crop((0, y, to_w, y + to_h))
|
||||||
yield splitted
|
yield splitted
|
||||||
|
|
||||||
|
# not using torchvision.transforms.CenterCrop because it doesn't allow float regions
|
||||||
|
def center_crop(image: Image, w: int, h: int):
|
||||||
|
iw, ih = image.size
|
||||||
|
if ih / h < iw / w:
|
||||||
|
sw = w * ih / h
|
||||||
|
box = (iw - sw) / 2, 0, iw - (iw - sw) / 2, ih
|
||||||
|
else:
|
||||||
|
sh = h * iw / w
|
||||||
|
box = 0, (ih - sh) / 2, iw, ih - (ih - sh) / 2
|
||||||
|
return image.resize((w, h), Image.Resampling.LANCZOS, box)
|
||||||
|
|
||||||
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
|
||||||
|
def multicrop_pic(image: Image, mindim, maxdim, minarea, maxarea, objective, threshold):
|
||||||
|
iw, ih = image.size
|
||||||
|
err = lambda w, h: 1-(lambda x: x if x < 1 else 1/x)(iw/ih/(w/h))
|
||||||
|
try:
|
||||||
|
w, h = max(((w, h) for w in range(mindim, maxdim+1, 64) for h in range(mindim, maxdim+1, 64)
|
||||||
|
if minarea <= w * h <= maxarea and err(w, h) <= threshold),
|
||||||
|
key= lambda wh: ((objective=='Maximize area')*wh[0]*wh[1], -err(*wh))
|
||||||
|
)
|
||||||
|
except ValueError:
|
||||||
|
return
|
||||||
|
return center_crop(image, w, h)
|
||||||
|
|
||||||
|
|
||||||
|
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False, process_multicrop=None, process_multicrop_mindim=None, process_multicrop_maxdim=None, process_multicrop_minarea=None, process_multicrop_maxarea=None, process_multicrop_objective=None, process_multicrop_threshold=None):
|
||||||
width = process_width
|
width = process_width
|
||||||
height = process_height
|
height = process_height
|
||||||
src = os.path.abspath(process_src)
|
src = os.path.abspath(process_src)
|
||||||
|
@ -194,6 +218,14 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
|
||||||
save_pic(focal, index, params, existing_caption=existing_caption)
|
save_pic(focal, index, params, existing_caption=existing_caption)
|
||||||
process_default_resize = False
|
process_default_resize = False
|
||||||
|
|
||||||
|
if process_multicrop:
|
||||||
|
cropped = multicrop_pic(img, process_multicrop_mindim, process_multicrop_maxdim, process_multicrop_minarea, process_multicrop_maxarea, process_multicrop_objective, process_multicrop_threshold)
|
||||||
|
if cropped is not None:
|
||||||
|
save_pic(cropped, index, params, existing_caption=existing_caption)
|
||||||
|
else:
|
||||||
|
print(f"skipped {img.width}x{img.height} image {filename} (can't find suitable size within error threshold)")
|
||||||
|
process_default_resize = False
|
||||||
|
|
||||||
if process_default_resize:
|
if process_default_resize:
|
||||||
img = images.resize_image(1, img, width, height)
|
img = images.resize_image(1, img, width, height)
|
||||||
save_pic(img, index, params, existing_caption=existing_caption)
|
save_pic(img, index, params, existing_caption=existing_caption)
|
||||||
|
|
|
@ -1226,6 +1226,7 @@ def create_ui():
|
||||||
process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
|
process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip")
|
||||||
process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
|
process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split")
|
||||||
process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
|
process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop")
|
||||||
|
process_multicrop = gr.Checkbox(label='Auto-sized crop', elem_id="train_process_multicrop")
|
||||||
process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
|
process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption")
|
||||||
process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
|
process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru")
|
||||||
|
|
||||||
|
@ -1239,6 +1240,18 @@ def create_ui():
|
||||||
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
|
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight")
|
||||||
process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
|
process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug")
|
||||||
|
|
||||||
|
with gr.Column(visible=False) as process_multicrop_col:
|
||||||
|
gr.Markdown('Each image is center-cropped with an automatically chosen width and height.')
|
||||||
|
with gr.Row():
|
||||||
|
process_multicrop_mindim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension lower bound", value=384, elem_id="train_process_multicrop_mindim")
|
||||||
|
process_multicrop_maxdim = gr.Slider(minimum=64, maximum=2048, step=8, label="Dimension upper bound", value=768, elem_id="train_process_multicrop_maxdim")
|
||||||
|
with gr.Row():
|
||||||
|
process_multicrop_minarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area lower bound", value=64*64, elem_id="train_process_multicrop_minarea")
|
||||||
|
process_multicrop_maxarea = gr.Slider(minimum=64*64, maximum=2048*2048, step=1, label="Area upper bound", value=640*640, elem_id="train_process_multicrop_maxarea")
|
||||||
|
with gr.Row():
|
||||||
|
process_multicrop_objective = gr.Radio(["Maximize area", "Minimize error"], value="Maximize area", label="Resizing objective", elem_id="train_process_multicrop_objective")
|
||||||
|
process_multicrop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, label="Error threshold", value=0.1, elem_id="train_process_multicrop_threshold")
|
||||||
|
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
with gr.Column(scale=3):
|
with gr.Column(scale=3):
|
||||||
gr.HTML(value="")
|
gr.HTML(value="")
|
||||||
|
@ -1260,6 +1273,12 @@ def create_ui():
|
||||||
outputs=[process_focal_crop_row],
|
outputs=[process_focal_crop_row],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
process_multicrop.change(
|
||||||
|
fn=lambda show: gr_show(show),
|
||||||
|
inputs=[process_multicrop],
|
||||||
|
outputs=[process_multicrop_col],
|
||||||
|
)
|
||||||
|
|
||||||
def get_textual_inversion_template_names():
|
def get_textual_inversion_template_names():
|
||||||
return sorted([x for x in textual_inversion.textual_inversion_templates])
|
return sorted([x for x in textual_inversion.textual_inversion_templates])
|
||||||
|
|
||||||
|
@ -1379,6 +1398,13 @@ def create_ui():
|
||||||
process_focal_crop_entropy_weight,
|
process_focal_crop_entropy_weight,
|
||||||
process_focal_crop_edges_weight,
|
process_focal_crop_edges_weight,
|
||||||
process_focal_crop_debug,
|
process_focal_crop_debug,
|
||||||
|
process_multicrop,
|
||||||
|
process_multicrop_mindim,
|
||||||
|
process_multicrop_maxdim,
|
||||||
|
process_multicrop_minarea,
|
||||||
|
process_multicrop_maxarea,
|
||||||
|
process_multicrop_objective,
|
||||||
|
process_multicrop_threshold,
|
||||||
],
|
],
|
||||||
outputs=[
|
outputs=[
|
||||||
ti_output,
|
ti_output,
|
||||||
|
|
Loading…
Reference in New Issue