[Bug Fix] Update torch import reference in bnb quantization (#1902)

# What does this PR do?

Fixes `Import Error` occurring from mismatch of usage between
torch.nn.Module and nn.Module.
This commit is contained in:
Dhruv Srikanth 2024-05-15 20:08:32 +01:00 committed by GitHub
parent a69ef52cf6
commit 6c715f8183
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 1 additions and 1 deletions

View File

@ -70,7 +70,7 @@ class Linear8bitLt(torch.nn.Module):
return out return out
class Linear4bit(nn.Module): class Linear4bit(torch.nn.Module):
def __init__(self, weight, bias, quant_type): def __init__(self, weight, bias, quant_type):
super().__init__() super().__init__()
self.weight = Params4bit( self.weight = Params4bit(