Merge pull request #15806 from huchenlei/inpaint_fix
[Performance 4/6] Precompute is_sdxl_inpaint flag
This commit is contained in:
commit
6450d24afe
|
@ -115,10 +115,7 @@ def txt2img_image_conditioning(sd_model, x, width, height):
|
||||||
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
|
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
sd = sd_model.model.state_dict()
|
if getattr(sd_model.model, "is_sdxl_inpaint", False):
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
|
||||||
if diffusion_model_input is not None:
|
|
||||||
if diffusion_model_input.shape[1] == 9:
|
|
||||||
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
|
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
|
||||||
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
|
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
|
||||||
image_conditioning = images_tensor_to_samples(image_conditioning,
|
image_conditioning = images_tensor_to_samples(image_conditioning,
|
||||||
|
@ -392,10 +389,7 @@ class StableDiffusionProcessing:
|
||||||
if self.sampler.conditioning_key == "crossattn-adm":
|
if self.sampler.conditioning_key == "crossattn-adm":
|
||||||
return self.unclip_image_conditioning(source_image)
|
return self.unclip_image_conditioning(source_image)
|
||||||
|
|
||||||
sd = self.sampler.model_wrap.inner_model.model.state_dict()
|
if getattr(self.sampler.model_wrap.inner_model.model, "is_sdxl_inpaint", False):
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
|
||||||
if diffusion_model_input is not None:
|
|
||||||
if diffusion_model_input.shape[1] == 9:
|
|
||||||
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
|
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
|
||||||
|
|
||||||
# Dummy zero conditioning if we're not using inpainting or depth model.
|
# Dummy zero conditioning if we're not using inpainting or depth model.
|
||||||
|
|
|
@ -380,6 +380,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
||||||
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
|
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
|
||||||
model.is_sd1 = not model.is_sdxl and not model.is_sd2
|
model.is_sd1 = not model.is_sdxl and not model.is_sd2
|
||||||
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
|
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
|
||||||
|
# Set is_sdxl_inpaint flag.
|
||||||
|
diffusion_model_input = state_dict.get('diffusion_model.input_blocks.0.0.weight', None)
|
||||||
|
model.is_sdxl_inpaint = (
|
||||||
|
model.is_sdxl and
|
||||||
|
diffusion_model_input is not None and
|
||||||
|
diffusion_model_input.shape[1] == 9
|
||||||
|
)
|
||||||
if model.is_sdxl:
|
if model.is_sdxl:
|
||||||
sd_models_xl.extend_sdxl(model)
|
sd_models_xl.extend_sdxl(model)
|
||||||
|
|
||||||
|
|
|
@ -35,10 +35,9 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
|
||||||
|
|
||||||
|
|
||||||
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
|
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
|
||||||
sd = self.model.state_dict()
|
"""WARNING: This function is called once per denoising iteration. DO NOT add
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
expensive functionc calls such as `model.state_dict`. """
|
||||||
if diffusion_model_input is not None:
|
if self.is_sdxl_inpaint:
|
||||||
if diffusion_model_input.shape[1] == 9:
|
|
||||||
x = torch.cat([x] + cond['c_concat'], dim=1)
|
x = torch.cat([x] + cond['c_concat'], dim=1)
|
||||||
|
|
||||||
return self.model(x, t, cond)
|
return self.model(x, t, cond)
|
||||||
|
|
Loading…
Reference in New Issue