[Bug Fix] Update torch import reference in bnb quantization (#1902)
# What does this PR do? Fixes `Import Error` occurring from mismatch of usage between torch.nn.Module and nn.Module.
This commit is contained in:
parent
a69ef52cf6
commit
6c715f8183
|
@ -70,7 +70,7 @@ class Linear8bitLt(torch.nn.Module):
|
|||
return out
|
||||
|
||||
|
||||
class Linear4bit(nn.Module):
|
||||
class Linear4bit(torch.nn.Module):
|
||||
def __init__(self, weight, bias, quant_type):
|
||||
super().__init__()
|
||||
self.weight = Params4bit(
|
||||
|
|
Loading…
Reference in New Issue