2023-01-27 01:28:12 -07:00
|
|
|
import os
|
|
|
|
|
2023-01-28 05:24:29 -07:00
|
|
|
import torch
|
|
|
|
|
2023-08-09 01:25:35 -06:00
|
|
|
from modules import shared, paths, sd_disable_initialization, devices
|
2023-01-27 01:28:12 -07:00
|
|
|
|
|
|
|
sd_configs_path = shared.sd_configs_path
|
|
|
|
sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion")
|
2023-07-11 12:16:43 -06:00
|
|
|
sd_xl_repo_configs_path = os.path.join(paths.paths['Stable Diffusion XL'], "configs", "inference")
|
2023-01-27 01:28:12 -07:00
|
|
|
|
|
|
|
|
|
|
|
config_default = shared.sd_default_config
|
|
|
|
config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml")
|
|
|
|
config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml")
|
2023-01-27 18:06:19 -07:00
|
|
|
config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml")
|
2023-07-12 14:52:43 -06:00
|
|
|
config_sdxl = os.path.join(sd_xl_repo_configs_path, "sd_xl_base.yaml")
|
2023-07-14 00:16:01 -06:00
|
|
|
config_sdxl_refiner = os.path.join(sd_xl_repo_configs_path, "sd_xl_refiner.yaml")
|
2023-12-21 05:15:51 -07:00
|
|
|
config_sdxl_inpainting = os.path.join(sd_configs_path, "sd_xl_inpaint.yaml")
|
2023-01-27 01:54:19 -07:00
|
|
|
config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml")
|
2023-03-24 20:48:16 -06:00
|
|
|
config_unclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-l-inference.yaml")
|
|
|
|
config_unopenclip = os.path.join(sd_repo_configs_path, "v2-1-stable-unclip-h-inference.yaml")
|
2023-01-27 01:28:12 -07:00
|
|
|
config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml")
|
|
|
|
config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml")
|
|
|
|
config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml")
|
2023-09-23 03:51:41 -06:00
|
|
|
config_alt_diffusion_m18 = os.path.join(sd_configs_path, "alt-diffusion-m18-inference.yaml")
|
2024-06-15 23:04:31 -06:00
|
|
|
config_sd3 = os.path.join(sd_configs_path, "sd3-inference.yaml")
|
|
|
|
|
2023-01-27 01:28:12 -07:00
|
|
|
|
2023-01-28 05:24:29 -07:00
|
|
|
def is_using_v_parameterization_for_sd2(state_dict):
|
|
|
|
"""
|
|
|
|
Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome.
|
|
|
|
"""
|
2023-01-27 01:28:12 -07:00
|
|
|
|
2023-01-28 05:24:29 -07:00
|
|
|
import ldm.modules.diffusionmodules.openaimodel
|
|
|
|
|
2024-06-23 11:16:48 -06:00
|
|
|
device = devices.device
|
2023-01-28 05:24:29 -07:00
|
|
|
|
|
|
|
with sd_disable_initialization.DisableInitialization():
|
|
|
|
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
|
2024-05-15 13:20:40 -06:00
|
|
|
use_checkpoint=False,
|
2023-01-28 05:24:29 -07:00
|
|
|
use_fp16=False,
|
|
|
|
image_size=32,
|
|
|
|
in_channels=4,
|
|
|
|
out_channels=4,
|
|
|
|
model_channels=320,
|
|
|
|
attention_resolutions=[4, 2, 1],
|
|
|
|
num_res_blocks=2,
|
|
|
|
channel_mult=[1, 2, 4, 4],
|
|
|
|
num_head_channels=64,
|
|
|
|
use_spatial_transformer=True,
|
|
|
|
use_linear_in_transformer=True,
|
|
|
|
transformer_depth=1,
|
|
|
|
context_dim=1024,
|
|
|
|
legacy=False
|
|
|
|
)
|
|
|
|
unet.eval()
|
|
|
|
|
|
|
|
with torch.no_grad():
|
|
|
|
unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k}
|
|
|
|
unet.load_state_dict(unet_sd, strict=True)
|
2024-07-06 02:00:22 -06:00
|
|
|
unet.to(device=device, dtype=devices.dtype_unet)
|
2023-01-27 01:28:12 -07:00
|
|
|
|
2023-01-28 05:24:29 -07:00
|
|
|
test_cond = torch.ones((1, 2, 1024), device=device) * 0.5
|
|
|
|
x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5
|
|
|
|
|
2024-07-06 02:00:22 -06:00
|
|
|
with devices.autocast():
|
|
|
|
out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().cpu().item()
|
2023-01-28 05:24:29 -07:00
|
|
|
|
|
|
|
return out < -1
|
|
|
|
|
|
|
|
|
|
|
|
def guess_model_config_from_state_dict(sd, filename):
|
2023-01-27 01:28:12 -07:00
|
|
|
sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None)
|
|
|
|
diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None)
|
2023-03-24 20:48:16 -06:00
|
|
|
sd2_variations_weight = sd.get('embedder.model.ln_final.weight', None)
|
2023-01-27 01:54:19 -07:00
|
|
|
|
2024-06-15 23:04:31 -06:00
|
|
|
if "model.diffusion_model.x_embedder.proj.weight" in sd:
|
|
|
|
return config_sd3
|
|
|
|
|
2023-07-12 14:52:43 -06:00
|
|
|
if sd.get('conditioner.embedders.1.model.ln_final.weight', None) is not None:
|
2023-12-21 05:15:51 -07:00
|
|
|
if diffusion_model_input.shape[1] == 9:
|
|
|
|
return config_sdxl_inpainting
|
|
|
|
else:
|
|
|
|
return config_sdxl
|
2024-06-15 23:04:31 -06:00
|
|
|
|
2023-07-14 00:16:01 -06:00
|
|
|
if sd.get('conditioner.embedders.0.model.ln_final.weight', None) is not None:
|
|
|
|
return config_sdxl_refiner
|
2023-07-12 14:52:43 -06:00
|
|
|
elif sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None:
|
2023-01-27 01:54:19 -07:00
|
|
|
return config_depth_model
|
2023-03-24 20:48:16 -06:00
|
|
|
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 768:
|
|
|
|
return config_unclip
|
|
|
|
elif sd2_variations_weight is not None and sd2_variations_weight.shape[0] == 1024:
|
|
|
|
return config_unopenclip
|
2023-01-27 01:28:12 -07:00
|
|
|
|
|
|
|
if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024:
|
2023-01-27 18:06:19 -07:00
|
|
|
if diffusion_model_input.shape[1] == 9:
|
|
|
|
return config_sd2_inpainting
|
2023-01-28 05:24:29 -07:00
|
|
|
elif is_using_v_parameterization_for_sd2(sd):
|
2023-01-27 01:28:12 -07:00
|
|
|
return config_sd2v
|
|
|
|
else:
|
|
|
|
return config_sd2
|
|
|
|
|
|
|
|
if diffusion_model_input is not None:
|
|
|
|
if diffusion_model_input.shape[1] == 9:
|
|
|
|
return config_inpainting
|
|
|
|
if diffusion_model_input.shape[1] == 8:
|
|
|
|
return config_instruct_pix2pix
|
|
|
|
|
2023-01-27 01:54:19 -07:00
|
|
|
if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None:
|
2023-09-23 03:51:41 -06:00
|
|
|
if sd.get('cond_stage_model.transformation.weight').size()[0] == 1024:
|
|
|
|
return config_alt_diffusion_m18
|
2023-01-27 01:28:12 -07:00
|
|
|
return config_alt_diffusion
|
|
|
|
|
|
|
|
return config_default
|
|
|
|
|
|
|
|
|
|
|
|
def find_checkpoint_config(state_dict, info):
|
|
|
|
if info is None:
|
|
|
|
return guess_model_config_from_state_dict(state_dict, "")
|
|
|
|
|
|
|
|
config = find_checkpoint_config_near_filename(info)
|
|
|
|
if config is not None:
|
|
|
|
return config
|
|
|
|
|
|
|
|
return guess_model_config_from_state_dict(state_dict, info.filename)
|
|
|
|
|
|
|
|
|
|
|
|
def find_checkpoint_config_near_filename(info):
|
|
|
|
if info is None:
|
|
|
|
return None
|
|
|
|
|
2023-05-09 13:17:58 -06:00
|
|
|
config = f"{os.path.splitext(info.filename)[0]}.yaml"
|
2023-01-27 01:28:12 -07:00
|
|
|
if os.path.exists(config):
|
|
|
|
return config
|
|
|
|
|
|
|
|
return None
|
|
|
|
|