Organized the settings and UI of soft inpainting to allow for toggling the feature, and centralizes default values to reduce the amount of copy-pasta.
This commit is contained in:
parent
552f8bc832
commit
aaacf48232
|
@ -15,6 +15,7 @@ import modules.shared as shared
|
|||
import modules.processing as processing
|
||||
from modules.ui import plaintext_to_html
|
||||
import modules.scripts
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
|
||||
def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
|
||||
|
@ -162,6 +163,7 @@ def img2img(id_task: str,
|
|||
sampler_name: str,
|
||||
mask_blur: int,
|
||||
mask_alpha: float,
|
||||
mask_blend_enabled: bool,
|
||||
mask_blend_power: float,
|
||||
mask_blend_scale: float,
|
||||
inpaint_detail_preservation: float,
|
||||
|
@ -227,6 +229,9 @@ def img2img(id_task: str,
|
|||
|
||||
assert 0. <= denoising_strength <= 1., 'can only work with strength in [0.0, 1.0]'
|
||||
|
||||
soft_inpainting = si.SoftInpaintingSettings(mask_blend_power, mask_blend_scale, inpaint_detail_preservation) \
|
||||
if mask_blend_enabled else None
|
||||
|
||||
p = StableDiffusionProcessingImg2Img(
|
||||
sd_model=shared.sd_model,
|
||||
outpath_samples=opts.outdir_samples or opts.outdir_img2img_samples,
|
||||
|
@ -244,9 +249,7 @@ def img2img(id_task: str,
|
|||
init_images=[image],
|
||||
mask=mask,
|
||||
mask_blur=mask_blur,
|
||||
mask_blend_power=mask_blend_power,
|
||||
mask_blend_scale=mask_blend_scale,
|
||||
inpaint_detail_preservation=inpaint_detail_preservation,
|
||||
soft_inpainting=soft_inpainting,
|
||||
inpainting_fill=inpainting_fill,
|
||||
resize_mode=resize_mode,
|
||||
denoising_strength=denoising_strength,
|
||||
|
@ -267,9 +270,8 @@ def img2img(id_task: str,
|
|||
|
||||
if mask:
|
||||
p.extra_generation_params["Mask blur"] = mask_blur
|
||||
p.extra_generation_params["Mask blending bias"] = mask_blend_power
|
||||
p.extra_generation_params["Mask blending preservation"] = mask_blend_scale
|
||||
p.extra_generation_params["Mask blending contrast boost"] = inpaint_detail_preservation
|
||||
if soft_inpainting is not None:
|
||||
soft_inpainting.add_generation_params(p.extra_generation_params)
|
||||
|
||||
with closing(p):
|
||||
if is_batch:
|
||||
|
|
|
@ -30,6 +30,7 @@ import modules.sd_models as sd_models
|
|||
import modules.sd_vae as sd_vae
|
||||
from ldm.data.util import AddMiDaS
|
||||
from ldm.models.diffusion.ddpm import LatentDepth2ImageDiffusion
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
from einops import repeat, rearrange
|
||||
from blendmodes.blend import blendLayers, BlendType
|
||||
|
@ -1425,9 +1426,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
|
|||
mask_blur_x: int = 4
|
||||
mask_blur_y: int = 4
|
||||
mask_blur: int = None
|
||||
mask_blend_power: float = 1
|
||||
mask_blend_scale: float = 0.5
|
||||
inpaint_detail_preservation: float = 4
|
||||
soft_inpainting: si.SoftInpaintingParameters = si.default
|
||||
inpainting_fill: int = 0
|
||||
inpaint_full_res: bool = True
|
||||
inpaint_full_res_padding: int = 0
|
||||
|
|
|
@ -6,6 +6,7 @@ import modules.shared as shared
|
|||
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
|
||||
from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
|
||||
from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
|
||||
def catenate_conds(conds):
|
||||
|
@ -43,9 +44,7 @@ class CFGDenoiser(torch.nn.Module):
|
|||
self.model_wrap = None
|
||||
self.mask = None
|
||||
self.nmask = None
|
||||
self.mask_blend_power = 1
|
||||
self.mask_blend_scale = 0.5
|
||||
self.inpaint_detail_preservation = 4
|
||||
self.soft_inpainting: si.SoftInpaintingParameters = None
|
||||
self.init_latent = None
|
||||
self.steps = None
|
||||
"""number of steps as specified by user in UI"""
|
||||
|
@ -95,7 +94,8 @@ class CFGDenoiser(torch.nn.Module):
|
|||
self.sampler.sampler_extra_args['uncond'] = uc
|
||||
|
||||
def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
|
||||
def latent_blend(a, b, t):
|
||||
def latent_blend(a, b, t, one_minus_t=None):
|
||||
|
||||
"""
|
||||
Interpolates two latent image representations according to the parameter t,
|
||||
where the interpolated vectors' magnitudes are also interpolated separately.
|
||||
|
@ -104,8 +104,12 @@ class CFGDenoiser(torch.nn.Module):
|
|||
"""
|
||||
# NOTE: We use inplace operations wherever possible.
|
||||
|
||||
if one_minus_t is None:
|
||||
one_minus_t = 1 - t
|
||||
|
||||
if self.soft_inpainting is None:
|
||||
return a * one_minus_t + b * t
|
||||
|
||||
# Linearly interpolate the image vectors.
|
||||
a_scaled = a * one_minus_t
|
||||
b_scaled = b * t
|
||||
|
@ -119,10 +123,10 @@ class CFGDenoiser(torch.nn.Module):
|
|||
current_magnitude = torch.norm(image_interp, p=2, dim=1).to(torch.float64).add_(0.00001)
|
||||
|
||||
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
|
||||
a_magnitude = torch.norm(a, p=2, dim=1).to(torch.float64).pow_(self.inpaint_detail_preservation) * one_minus_t
|
||||
b_magnitude = torch.norm(b, p=2, dim=1).to(torch.float64).pow_(self.inpaint_detail_preservation) * t
|
||||
a_magnitude = torch.norm(a, p=2, dim=1).to(torch.float64).pow_(self.soft_inpainting.inpaint_detail_preservation) * one_minus_t
|
||||
b_magnitude = torch.norm(b, p=2, dim=1).to(torch.float64).pow_(self.soft_inpainting.inpaint_detail_preservation) * t
|
||||
desired_magnitude = a_magnitude
|
||||
desired_magnitude.add_(b_magnitude).pow_(1 / self.inpaint_detail_preservation)
|
||||
desired_magnitude.add_(b_magnitude).pow_(1 / self.soft_inpainting.inpaint_detail_preservation)
|
||||
del a_magnitude, b_magnitude, one_minus_t
|
||||
|
||||
# Change the linearly interpolated image vectors' magnitudes to the value we want.
|
||||
|
@ -156,7 +160,10 @@ class CFGDenoiser(torch.nn.Module):
|
|||
|
||||
NOTE: "mask" is not used
|
||||
"""
|
||||
return torch.pow(nmask, (_sigma ** self.mask_blend_power) * self.mask_blend_scale)
|
||||
if self.soft_inpainting is None:
|
||||
return nmask
|
||||
|
||||
return torch.pow(nmask, (_sigma ** self.soft_inpainting.mask_blend_power) * self.soft_inpainting.mask_blend_scale)
|
||||
|
||||
if state.interrupted or state.skipped:
|
||||
raise sd_samplers_common.InterruptedException
|
||||
|
@ -176,6 +183,9 @@ class CFGDenoiser(torch.nn.Module):
|
|||
|
||||
# Blend in the original latents (before)
|
||||
if self.mask_before_denoising and self.mask is not None:
|
||||
if self.soft_inpainting is None:
|
||||
x = latent_blend(self.init_latent, x, self.nmask, self.mask)
|
||||
else:
|
||||
x = latent_blend(self.init_latent, x, get_modified_nmask(self.nmask, sigma))
|
||||
|
||||
batch_size = len(conds_list)
|
||||
|
@ -279,6 +289,9 @@ class CFGDenoiser(torch.nn.Module):
|
|||
|
||||
# Blend in the original latents (after)
|
||||
if not self.mask_before_denoising and self.mask is not None:
|
||||
if self.soft_inpainting is None:
|
||||
denoised = latent_blend(self.init_latent, denoised, self.nmask, self.mask)
|
||||
else:
|
||||
denoised = latent_blend(self.init_latent, denoised, get_modified_nmask(self.nmask, sigma))
|
||||
|
||||
self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma)
|
||||
|
|
|
@ -277,9 +277,7 @@ class Sampler:
|
|||
self.model_wrap_cfg.p = p
|
||||
self.model_wrap_cfg.mask = p.mask if hasattr(p, 'mask') else None
|
||||
self.model_wrap_cfg.nmask = p.nmask if hasattr(p, 'nmask') else None
|
||||
self.model_wrap_cfg.mask_blend_power = p.mask_blend_power if hasattr(p, 'mask_blend_power') else None
|
||||
self.model_wrap_cfg.mask_blend_scale = p.mask_blend_scale if hasattr(p, 'mask_blend_scale') else None
|
||||
self.model_wrap_cfg.inpaint_detail_preservation = p.inpaint_detail_preservation if hasattr(p, 'inpaint_detail_preservation') else None
|
||||
self.model_wrap_cfg.soft_inpainting = p.soft_inpainting if hasattr(p, 'soft_inpainting') else None
|
||||
self.model_wrap_cfg.step = 0
|
||||
self.model_wrap_cfg.image_cfg_scale = getattr(p, 'image_cfg_scale', None)
|
||||
self.eta = p.eta if p.eta is not None else getattr(opts, self.eta_option_field, 0.0)
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
class SoftInpaintingSettings:
|
||||
def __init__(self, mask_blend_power, mask_blend_scale, inpaint_detail_preservation):
|
||||
self.mask_blend_power = mask_blend_power
|
||||
self.mask_blend_scale = mask_blend_scale
|
||||
self.inpaint_detail_preservation = inpaint_detail_preservation
|
||||
|
||||
def get_paste_fields(self):
|
||||
return [
|
||||
(self.mask_blend_power, gen_param_labels.mask_blend_power),
|
||||
(self.mask_blend_scale, gen_param_labels.mask_blend_scale),
|
||||
(self.inpaint_detail_preservation, gen_param_labels.inpaint_detail_preservation),
|
||||
]
|
||||
|
||||
def add_generation_params(self, dest):
|
||||
dest[enabled_gen_param_label] = True
|
||||
dest[gen_param_labels.mask_blend_power] = self.mask_blend_power
|
||||
dest[gen_param_labels.mask_blend_scale] = self.mask_blend_scale
|
||||
dest[gen_param_labels.inpaint_detail_preservation] = self.inpaint_detail_preservation
|
||||
|
||||
|
||||
enabled_ui_label = "Soft inpainting"
|
||||
enabled_gen_param_label = "Soft inpainting enabled"
|
||||
enabled_el_id = "soft_inpainting_enabled"
|
||||
|
||||
default = SoftInpaintingSettings(1, 0.5, 4)
|
||||
ui_labels = SoftInpaintingSettings("Schedule bias", "Preservation strength", "Transition contrast boost")
|
||||
|
||||
ui_info = SoftInpaintingSettings(
|
||||
mask_blend_power="Shifts when preservation of original content occurs during denoising.",
|
||||
# "Below 1: Stronger preservation near the end (with low sigma)\n"
|
||||
# "1: Balanced (proportional to sigma)\n"
|
||||
# "Above 1: Stronger preservation in the beginning (with high sigma)",
|
||||
mask_blend_scale="How strongly partially masked content should be preserved.",
|
||||
# "Low values: Favors generated content.\n"
|
||||
# "High values: Favors original content.",
|
||||
inpaint_detail_preservation="Amplifies the contrast that may be lost in partially masked regions.")
|
||||
|
||||
gen_param_labels = SoftInpaintingSettings("Soft inpainting schedule bias", "Soft inpainting preservation strength", "Soft inpainting transition contrast boost")
|
||||
el_ids = SoftInpaintingSettings("mask_blend_power", "mask_blend_scale", "inpaint_detail_preservation")
|
||||
|
||||
|
||||
def gradio_ui():
|
||||
import gradio as gr
|
||||
from modules.ui_components import InputAccordion
|
||||
"""
|
||||
with InputAccordion(False, label="Refiner", elem_id=self.elem_id("enable")) as enable_refiner:
|
||||
with gr.Row():
|
||||
refiner_checkpoint = gr.Dropdown(label='Checkpoint', elem_id=self.elem_id("checkpoint"), choices=sd_models.checkpoint_tiles(), value='', tooltip="switch to another model in the middle of generation")
|
||||
create_refresh_button(refiner_checkpoint, sd_models.list_models, lambda: {"choices": sd_models.checkpoint_tiles()}, self.elem_id("checkpoint_refresh"))
|
||||
|
||||
refiner_switch_at = gr.Slider(value=0.8, label="Switch at", minimum=0.01, maximum=1.0, step=0.01, elem_id=self.elem_id("switch_at"), tooltip="fraction of sampling steps when the switch to refiner model should happen; 1=never, 0.5=switch in the middle of generation")
|
||||
|
||||
"""
|
||||
with InputAccordion(False, label=enabled_ui_label, elem_id=enabled_el_id) as soft_inpainting_enabled:
|
||||
with gr.Group():
|
||||
gr.Markdown(
|
||||
"""
|
||||
Soft inpainting allows you to **seamlessly blend original content with inpainted content** according to the mask opacity.
|
||||
**High _Mask blur_** values are recommended!
|
||||
""")
|
||||
|
||||
result = SoftInpaintingSettings(
|
||||
gr.Slider(label=ui_labels.mask_blend_power,
|
||||
info=ui_info.mask_blend_power,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.1,
|
||||
value=default.mask_blend_power,
|
||||
elem_id=el_ids.mask_blend_power),
|
||||
gr.Slider(label=ui_labels.mask_blend_scale,
|
||||
info=ui_info.mask_blend_scale,
|
||||
minimum=0,
|
||||
maximum=8,
|
||||
step=0.05,
|
||||
value=default.mask_blend_scale,
|
||||
elem_id=el_ids.mask_blend_scale),
|
||||
gr.Slider(label=ui_labels.inpaint_detail_preservation,
|
||||
info=ui_info.inpaint_detail_preservation,
|
||||
minimum=1,
|
||||
maximum=32,
|
||||
step=0.5,
|
||||
value=default.inpaint_detail_preservation,
|
||||
elem_id=el_ids.inpaint_detail_preservation))
|
||||
|
||||
with gr.Accordion("Help", open=False):
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.mask_blend_power}
|
||||
|
||||
The blending strength of original content is scaled proportionally with the decreasing noise level values at each step (sigmas).
|
||||
This ensures that the influence of the denoiser and original content preservation is roughly balanced at each step.
|
||||
This balance can be shifted using this parameter, controlling whether earlier or later steps have stronger preservation.
|
||||
|
||||
- **Below 1**: Stronger preservation near the end (with low sigma)
|
||||
- **1**: Balanced (proportional to sigma)
|
||||
- **Above 1**: Stronger preservation in the beginning (with high sigma)
|
||||
""")
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.mask_blend_scale}
|
||||
|
||||
Skews whether partially masked image regions should be more likely to preserve the original content or favor inpainted content.
|
||||
This may need to be adjusted depending on the {ui_labels.mask_blend_power}, CFG Scale, prompt and Denoising strength.
|
||||
|
||||
- **Low values**: Favors generated content.
|
||||
- **High values**: Favors original content.
|
||||
""")
|
||||
gr.Markdown(
|
||||
f"""
|
||||
### {ui_labels.inpaint_detail_preservation}
|
||||
|
||||
This parameter controls how the original latent vectors and denoised latent vectors are interpolated.
|
||||
With higher values, the magnitude of the resulting blended vector will be closer to the maximum of the two interpolated vectors.
|
||||
This can prevent the loss of contrast that occurs with linear interpolation.
|
||||
|
||||
- **Low values**: Softer blending, details may fade.
|
||||
- **High values**: Stronger contrast, may over-saturate colors.
|
||||
""")
|
||||
|
||||
return (
|
||||
[
|
||||
soft_inpainting_enabled,
|
||||
result.mask_blend_power,
|
||||
result.mask_blend_scale,
|
||||
result.inpaint_detail_preservation
|
||||
],
|
||||
[
|
||||
(soft_inpainting_enabled, enabled_gen_param_label),
|
||||
(result.mask_blend_power, gen_param_labels.mask_blend_power),
|
||||
(result.mask_blend_scale, gen_param_labels.mask_blend_scale),
|
||||
(result.inpaint_detail_preservation, gen_param_labels.inpaint_detail_preservation)
|
||||
]
|
||||
)
|
|
@ -29,6 +29,7 @@ import modules.shared as shared
|
|||
from modules import prompt_parser
|
||||
from modules.sd_hijack import model_hijack
|
||||
from modules.generation_parameters_copypaste import image_from_url_text
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
create_setting_component = ui_settings.create_setting_component
|
||||
|
||||
|
@ -678,9 +679,16 @@ def create_ui():
|
|||
with FormRow():
|
||||
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur")
|
||||
mask_alpha = gr.Slider(label="Mask transparency", visible=False, elem_id="img2img_mask_alpha")
|
||||
|
||||
with FormRow():
|
||||
soft_inpainting = si.gradio_ui()
|
||||
|
||||
|
||||
"""
|
||||
mask_blend_power = gr.Slider(label='Blending bias', minimum=0, maximum=8, step=0.1, value=1, elem_id="img2img_mask_blend_power")
|
||||
mask_blend_scale = gr.Slider(label='Blending preservation', minimum=0, maximum=8, step=0.05, value=0.5, elem_id="img2img_mask_blend_scale")
|
||||
inpaint_detail_preservation = gr.Slider(label='Blending contrast boost', minimum=1, maximum=32, step=0.5, value=4, elem_id="img2img_mask_blend_offset")
|
||||
"""
|
||||
|
||||
with FormRow():
|
||||
inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode")
|
||||
|
@ -736,9 +744,7 @@ def create_ui():
|
|||
sampler_name,
|
||||
mask_blur,
|
||||
mask_alpha,
|
||||
mask_blend_power,
|
||||
mask_blend_scale,
|
||||
inpaint_detail_preservation,
|
||||
*(soft_inpainting[0]),
|
||||
inpainting_fill,
|
||||
batch_count,
|
||||
batch_size,
|
||||
|
@ -837,11 +843,10 @@ def create_ui():
|
|||
(toprow.ui_styles.dropdown, lambda d: d["Styles array"] if isinstance(d.get("Styles array"), list) else gr.update()),
|
||||
(denoising_strength, "Denoising strength"),
|
||||
(mask_blur, "Mask blur"),
|
||||
(mask_blend_power, "Mask blending bias"),
|
||||
(mask_blend_scale, "Mask blending preservation"),
|
||||
(inpaint_detail_preservation, "Mask blending contrast boost"),
|
||||
*(soft_inpainting[1]),
|
||||
*scripts.scripts_img2img.infotext_fields
|
||||
]
|
||||
|
||||
parameters_copypaste.add_paste_fields("img2img", init_img, img2img_paste_fields, override_settings)
|
||||
parameters_copypaste.add_paste_fields("inpaint", init_img_with_mask, img2img_paste_fields, override_settings)
|
||||
parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding(
|
||||
|
|
|
@ -10,6 +10,7 @@ from PIL import Image, ImageDraw
|
|||
from modules import images
|
||||
from modules.processing import Processed, process_images
|
||||
from modules.shared import opts, state
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
|
||||
# this function is taken from https://github.com/parlance-zz/g-diffuser-bot
|
||||
|
@ -133,16 +134,14 @@ class Script(scripts.Script):
|
|||
|
||||
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
|
||||
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur"))
|
||||
mask_blend_power = gr.Slider(label='Blending bias', minimum=0, maximum=8, step=0.1, value=1, elem_id=self.elem_id("mask_blend_power"))
|
||||
mask_blend_scale = gr.Slider(label='Blending preservation', minimum=0, maximum=8, step=0.05, value=0.5, elem_id=self.elem_id("mask_blend_scale"))
|
||||
inpaint_detail_preservation = gr.Slider(label='Blending contrast boost', minimum=1, maximum=32, step=0.5, value=4, elem_id=self.elem_id("inpaint_detail_preservation"))
|
||||
soft_inpainting = si.gradio_ui()[0]
|
||||
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
|
||||
noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q"))
|
||||
color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation"))
|
||||
|
||||
return [info, pixels, mask_blur, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, direction, noise_q, color_variation]
|
||||
return [info, pixels, mask_blur, *soft_inpainting, direction, noise_q, color_variation]
|
||||
|
||||
def run(self, p, _, pixels, mask_blur, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, direction, noise_q, color_variation):
|
||||
def run(self, p, _, pixels, mask_blur, mask_blend_enabled, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, direction, noise_q, color_variation):
|
||||
initial_seed_and_info = [None, None]
|
||||
|
||||
process_width = p.width
|
||||
|
@ -170,9 +169,9 @@ class Script(scripts.Script):
|
|||
|
||||
p.mask_blur_x = mask_blur_x*4
|
||||
p.mask_blur_y = mask_blur_y*4
|
||||
p.mask_blend_power = mask_blend_power
|
||||
p.mask_blend_scale = mask_blend_scale
|
||||
p.inpaint_detail_preservation = inpaint_detail_preservation
|
||||
|
||||
p.soft_inpainting = si.SoftInpaintingSettings(mask_blend_power, mask_blend_scale, inpaint_detail_preservation) \
|
||||
if mask_blend_enabled else None
|
||||
|
||||
init_img = p.init_images[0]
|
||||
target_w = math.ceil((init_img.width + left + right) / 64) * 64
|
||||
|
|
|
@ -7,6 +7,7 @@ from PIL import Image, ImageDraw
|
|||
from modules import images, devices
|
||||
from modules.processing import Processed, process_images
|
||||
from modules.shared import opts, state
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
|
||||
class Script(scripts.Script):
|
||||
|
@ -22,23 +23,19 @@ class Script(scripts.Script):
|
|||
|
||||
pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels"))
|
||||
mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur"))
|
||||
mask_blend_power = gr.Slider(label='Blending bias', minimum=0, maximum=8, step=0.1, value=1, elem_id=self.elem_id("mask_blend_power"))
|
||||
mask_blend_scale = gr.Slider(label='Blending preservation', minimum=0, maximum=8, step=0.05, value=0.5, elem_id=self.elem_id("mask_blend_scale"))
|
||||
inpaint_detail_preservation = gr.Slider(label='Blending contrast boost', minimum=1, maximum=32, step=0.5, value=4, elem_id=self.elem_id("inpaint_detail_preservation"))
|
||||
soft_inpainting = si.gradio_ui()[0]
|
||||
inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill"))
|
||||
direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction"))
|
||||
|
||||
return [pixels, mask_blur, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, inpainting_fill, direction]
|
||||
return [pixels, mask_blur, *soft_inpainting, inpainting_fill, direction]
|
||||
|
||||
def run(self, p, pixels, mask_blur, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, inpainting_fill, direction):
|
||||
def run(self, p, pixels, mask_blur, mask_blend_enabled, mask_blend_power, mask_blend_scale, inpaint_detail_preservation, inpainting_fill, direction):
|
||||
initial_seed = None
|
||||
initial_info = None
|
||||
|
||||
p.mask_blur = mask_blur * 2
|
||||
p.mask_blend_power = mask_blend_power
|
||||
p.mask_blend_scale = mask_blend_scale
|
||||
p.inpaint_detail_preservation = inpaint_detail_preservation
|
||||
|
||||
p.soft_inpainting = si.SoftInpaintingSettings(mask_blend_power, mask_blend_scale, inpaint_detail_preservation) \
|
||||
if mask_blend_enabled else None
|
||||
p.inpainting_fill = inpainting_fill
|
||||
p.inpaint_full_res = False
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
import pytest
|
||||
import requests
|
||||
import modules.soft_inpainting as si
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
|
@ -24,9 +25,10 @@ def simple_img2img_request(img2img_basic_image_base64):
|
|||
"inpainting_mask_invert": False,
|
||||
"mask": None,
|
||||
"mask_blur": 4,
|
||||
"mask_blend_power": 1,
|
||||
"mask_blend_scale": 0.5,
|
||||
"inpaint_detail_preservation": 4,
|
||||
"mask_blend_enabled": True,
|
||||
"mask_blend_power": si.default.mask_blend_power,
|
||||
"mask_blend_scale": si.default.mask_blend_scale,
|
||||
"inpaint_detail_preservation": si.default.inpaint_detail_preservation,
|
||||
"n_iter": 1,
|
||||
"negative_prompt": "",
|
||||
"override_settings": {},
|
||||
|
|
Loading…
Reference in New Issue