Merge pull request #3818 from jwatzman/master
Reduce peak memory usage when changing models
This commit is contained in:
commit
9553a7e071
|
@ -173,7 +173,9 @@ def load_model_weights(model, checkpoint_info):
|
|||
print(f"Global Step: {pl_sd['global_step']}")
|
||||
|
||||
sd = get_state_dict_from_checkpoint(pl_sd)
|
||||
missing, extra = model.load_state_dict(sd, strict=False)
|
||||
del pl_sd
|
||||
model.load_state_dict(sd, strict=False)
|
||||
del sd
|
||||
|
||||
if shared.cmd_opts.opt_channelslast:
|
||||
model.to(memory_format=torch.channels_last)
|
||||
|
@ -197,6 +199,7 @@ def load_model_weights(model, checkpoint_info):
|
|||
|
||||
model.first_stage_model.to(devices.dtype_vae)
|
||||
|
||||
if shared.opts.sd_checkpoint_cache > 0:
|
||||
checkpoints_loaded[checkpoint_info] = model.state_dict().copy()
|
||||
while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
|
||||
checkpoints_loaded.popitem(last=False) # LRU
|
||||
|
|
Loading…
Reference in New Issue