Fix LoRA bias error

This commit is contained in:
huchenlei 2024-05-16 14:45:00 -04:00
parent 51b13a8c54
commit b2ae4490b9
1 changed files with 5 additions and 3 deletions

View File

@ -379,15 +379,17 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
bias_backup = getattr(self, "network_bias_backup", None)
if bias_backup is None and wanted_names != ():
if current_names != ():
raise RuntimeError("no backup bias found and current bias are not unchanged")
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
elif getattr(self, 'bias', None) is not None:
bias_backup = self.bias.to(devices.cpu, copy=True)
else:
bias_backup = None
# Unlike weight which always has value, some modules don't have bias.
# Only report if bias is not None and current bias are not unchanged.
if bias_backup is not None and current_names != ():
raise RuntimeError("no backup bias found and current bias are not unchanged")
self.network_bias_backup = bias_backup
if current_names != wanted_names: