From 2c11e9009ea18bab4ce2963d44db0c6fd3227370 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 24 Jul 2023 11:57:59 +0300 Subject: [PATCH] repair --medvram for SD2.x too after SDXL update --- modules/lowvram.py | 7 ++++--- modules/sd_hijack_open_clip.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/lowvram.py b/modules/lowvram.py index 6bbc11eb7..3f8306643 100644 --- a/modules/lowvram.py +++ b/modules/lowvram.py @@ -90,8 +90,12 @@ def setup_for_low_vram(sd_model, use_medvram): sd_model.conditioner.register_forward_pre_hook(send_me_to_gpu) elif is_sd2: sd_model.cond_stage_model.model.register_forward_pre_hook(send_me_to_gpu) + sd_model.cond_stage_model.model.token_embedding.register_forward_pre_hook(send_me_to_gpu) + parents[sd_model.cond_stage_model.model] = sd_model.cond_stage_model + parents[sd_model.cond_stage_model.model.token_embedding] = sd_model.cond_stage_model else: sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) + parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) sd_model.first_stage_model.encode = first_stage_model_encode_wrap @@ -101,9 +105,6 @@ def setup_for_low_vram(sd_model, use_medvram): if sd_model.embedder: sd_model.embedder.register_forward_pre_hook(send_me_to_gpu) - if hasattr(sd_model, 'cond_stage_model'): - parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model - if use_medvram: sd_model.model.register_forward_pre_hook(send_me_to_gpu) else: diff --git a/modules/sd_hijack_open_clip.py b/modules/sd_hijack_open_clip.py index bb0b96c72..25c5e9831 100644 --- a/modules/sd_hijack_open_clip.py +++ b/modules/sd_hijack_open_clip.py @@ -32,7 +32,7 @@ class FrozenOpenCLIPEmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWit def encode_embedding_init_text(self, init_text, nvpt): ids = tokenizer.encode(init_text) ids = torch.asarray([ids], device=devices.device, dtype=torch.int) - embedded = self.wrapped.model.token_embedding.wrapped(ids.to(self.wrapped.model.token_embedding.wrapped.weight.device)).squeeze(0) + embedded = self.wrapped.model.token_embedding.wrapped(ids).squeeze(0) return embedded