add kwargs to cog patch for compatibility with newer diffusers versions

This commit is contained in:
Victor Hall 2024-06-18 18:23:31 -04:00
parent 938fe5016d
commit dcbd9d45a9
1 changed files with 1 additions and 1 deletions

View File

@ -7,7 +7,7 @@ from transformers.modeling_utils import PreTrainedModel
# CogVLM stores inv_freq in the state dictionary but it is not in models._parameters so it cannot be quantized
# was patched in transformers for other models here: https://github.com/huggingface/transformers/pull/28837/files but cog is not part of transformers
def _patched_check_quantized_param(
self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any]
self, model: "PreTrainedModel", param_value: "torch.Tensor", param_name: str, state_dict: Dict[str, Any], **kwargs
) -> bool:
# if "inv_freq" in param_name: # detect failure case