diff --git a/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py b/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py index de86f514..54d212e6 100644 --- a/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_gemma2_modeling.py @@ -265,7 +265,7 @@ class FlashGemma2Attention(torch.nn.Module): class Gemma2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() - act = config.hidden_act + act = config.hidden_activation self.act = ( ACT2FN[act] if "gelu" not in act