2022-09-03 03:08:45 -06:00
|
|
|
import torch
|
2022-10-22 05:04:14 -06:00
|
|
|
from modules import devices
|
2022-09-03 03:08:45 -06:00
|
|
|
|
|
|
|
module_in_gpu = None
|
|
|
|
cpu = torch.device("cpu")
|
|
|
|
|
2022-09-12 02:55:27 -06:00
|
|
|
|
|
|
|
def send_everything_to_cpu():
|
|
|
|
global module_in_gpu
|
|
|
|
|
|
|
|
if module_in_gpu is not None:
|
|
|
|
module_in_gpu.to(cpu)
|
|
|
|
|
|
|
|
module_in_gpu = None
|
|
|
|
|
|
|
|
|
2022-09-03 03:08:45 -06:00
|
|
|
def setup_for_low_vram(sd_model, use_medvram):
|
2023-06-04 04:07:22 -06:00
|
|
|
sd_model.lowvram = True
|
|
|
|
|
2022-09-03 03:08:45 -06:00
|
|
|
parents = {}
|
|
|
|
|
|
|
|
def send_me_to_gpu(module, _):
|
|
|
|
"""send this module to GPU; send whatever tracked module was previous in GPU to CPU;
|
|
|
|
we add this as forward_pre_hook to a lot of modules and this way all but one of them will
|
|
|
|
be in CPU
|
|
|
|
"""
|
|
|
|
global module_in_gpu
|
|
|
|
|
|
|
|
module = parents.get(module, module)
|
|
|
|
|
|
|
|
if module_in_gpu == module:
|
|
|
|
return
|
|
|
|
|
|
|
|
if module_in_gpu is not None:
|
|
|
|
module_in_gpu.to(cpu)
|
|
|
|
|
2022-10-22 05:04:14 -06:00
|
|
|
module.to(devices.device)
|
2022-09-03 03:08:45 -06:00
|
|
|
module_in_gpu = module
|
|
|
|
|
|
|
|
# see below for register_forward_pre_hook;
|
|
|
|
# first_stage_model does not use forward(), it uses encode/decode, so register_forward_pre_hook is
|
|
|
|
# useless here, and we just replace those methods
|
|
|
|
|
2022-11-01 01:01:49 -06:00
|
|
|
first_stage_model = sd_model.first_stage_model
|
|
|
|
first_stage_model_encode = sd_model.first_stage_model.encode
|
|
|
|
first_stage_model_decode = sd_model.first_stage_model.decode
|
|
|
|
|
|
|
|
def first_stage_model_encode_wrap(x):
|
|
|
|
send_me_to_gpu(first_stage_model, None)
|
|
|
|
return first_stage_model_encode(x)
|
|
|
|
|
|
|
|
def first_stage_model_decode_wrap(z):
|
|
|
|
send_me_to_gpu(first_stage_model, None)
|
|
|
|
return first_stage_model_decode(z)
|
2022-09-03 03:08:45 -06:00
|
|
|
|
2022-11-26 10:52:16 -07:00
|
|
|
# for SD1, cond_stage_model is CLIP and its NN is in the tranformer frield, but for SD2, it's open clip, and it's in model field
|
|
|
|
if hasattr(sd_model.cond_stage_model, 'model'):
|
|
|
|
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model
|
|
|
|
|
2023-03-24 20:48:16 -06:00
|
|
|
# remove several big modules: cond, first_stage, depth/embedder (if applicable), and unet from the model and then
|
2022-09-03 03:08:45 -06:00
|
|
|
# send the model to GPU. Then put modules back. the modules will be in CPU.
|
2023-03-24 20:48:16 -06:00
|
|
|
stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), getattr(sd_model, 'embedder', None), sd_model.model
|
|
|
|
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = None, None, None, None, None
|
2022-10-22 05:04:14 -06:00
|
|
|
sd_model.to(devices.device)
|
2023-03-24 20:48:16 -06:00
|
|
|
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.embedder, sd_model.model = stored
|
2022-09-03 03:08:45 -06:00
|
|
|
|
2022-12-10 09:02:47 -07:00
|
|
|
# register hooks for those the first three models
|
2022-09-03 03:08:45 -06:00
|
|
|
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu)
|
|
|
|
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu)
|
2022-11-01 01:01:49 -06:00
|
|
|
sd_model.first_stage_model.encode = first_stage_model_encode_wrap
|
|
|
|
sd_model.first_stage_model.decode = first_stage_model_decode_wrap
|
2022-12-10 09:02:47 -07:00
|
|
|
if sd_model.depth_model:
|
|
|
|
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu)
|
2023-03-24 20:48:16 -06:00
|
|
|
if sd_model.embedder:
|
|
|
|
sd_model.embedder.register_forward_pre_hook(send_me_to_gpu)
|
2022-09-03 03:08:45 -06:00
|
|
|
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model
|
|
|
|
|
2022-11-26 10:52:16 -07:00
|
|
|
if hasattr(sd_model.cond_stage_model, 'model'):
|
|
|
|
sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer
|
|
|
|
del sd_model.cond_stage_model.transformer
|
|
|
|
|
2022-09-03 03:08:45 -06:00
|
|
|
if use_medvram:
|
|
|
|
sd_model.model.register_forward_pre_hook(send_me_to_gpu)
|
|
|
|
else:
|
|
|
|
diff_model = sd_model.model.diffusion_model
|
|
|
|
|
|
|
|
# the third remaining model is still too big for 4 GB, so we also do the same for its submodules
|
|
|
|
# so that only one of them is in GPU at a time
|
|
|
|
stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed
|
|
|
|
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None
|
2022-10-22 05:04:14 -06:00
|
|
|
sd_model.model.to(devices.device)
|
2022-09-03 03:08:45 -06:00
|
|
|
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored
|
|
|
|
|
|
|
|
# install hooks for bits of third model
|
|
|
|
diff_model.time_embed.register_forward_pre_hook(send_me_to_gpu)
|
|
|
|
for block in diff_model.input_blocks:
|
|
|
|
block.register_forward_pre_hook(send_me_to_gpu)
|
|
|
|
diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu)
|
|
|
|
for block in diff_model.output_blocks:
|
|
|
|
block.register_forward_pre_hook(send_me_to_gpu)
|
2023-06-04 04:07:22 -06:00
|
|
|
|
|
|
|
|
|
|
|
def is_enabled(sd_model):
|
|
|
|
return getattr(sd_model, 'lowvram', False)
|