diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py index 2041738..66f0e57 100644 --- a/ldm/modules/encoders/modules.py +++ b/ldm/modules/encoders/modules.py @@ -154,7 +154,7 @@ class FrozenCLIPEmbedder(AbstractEncoder): batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, return_overflowing_tokens=False, padding="max_length", return_tensors="pt") tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) + outputs = self.transformer(input_ids=tokens, output_hidden_states=True) if self.penultimate: z = outputs.hidden_states[-2] # simple enough