fix 'RuntimeError: Expected all tensors to be on the same device' error preventing models from loading on lowvram/medvram.

This commit is contained in:
AUTOMATIC 2023-01-01 02:41:15 +03:00
parent 29a3a7eb13
commit 210449b374
1 changed files with 1 additions and 1 deletions

View File

@ -298,6 +298,6 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase):
def encode_embedding_init_text(self, init_text, nvpt):
embedding_layer = self.wrapped.transformer.text_model.embeddings
ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"]
embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0)
return embedded