fix: prefer hidden_activation over hidden_act in gemma2 (#2381)
This commit is contained in:
parent
2ca5980634
commit
f852190060
|
@ -265,7 +265,7 @@ class FlashGemma2Attention(torch.nn.Module):
|
||||||
class Gemma2MLP(nn.Module):
|
class Gemma2MLP(nn.Module):
|
||||||
def __init__(self, prefix, config, weights):
|
def __init__(self, prefix, config, weights):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
act = config.hidden_act
|
act = config.hidden_activation
|
||||||
self.act = (
|
self.act = (
|
||||||
ACT2FN[act]
|
ACT2FN[act]
|
||||||
if "gelu" not in act
|
if "gelu" not in act
|
||||||
|
|
Loading…
Reference in New Issue