From 8094ecfc9ef22c838fa7d49db4af8301539619e3 Mon Sep 17 00:00:00 2001 From: almersawi <43927639+almersawi@users.noreply.github.com> Date: Thu, 8 Aug 2024 03:45:23 +0400 Subject: [PATCH] fix: fix num_ln_in_parallel_attn attribute name typo in RWConfig (#2350) Co-authored-by: Islam Almersawi --- .../models/custom_modeling/flash_rw_modeling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py b/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py index fc002082..10f995a3 100644 --- a/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_rw_modeling.py @@ -94,7 +94,7 @@ class RWConfig(PretrainedConfig): else kwargs.pop("n_head", 8) ) self.layer_norm_epsilon = layer_norm_epsilon - self.num_ln_in_parallel_attention = num_ln_in_prallel_attention + self.num_ln_in_parallel_attn = num_ln_in_prallel_attention self.initializer_range = initializer_range self.use_cache = use_cache self.hidden_dropout = hidden_dropout