From 4f55f15840ec8049aa8881d135038fb63b61953b Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 1 Jul 2024 23:25:54 +0200 Subject: [PATCH] Fixing baichuan override. (#2158) --- .../models/custom_modeling/flash_llama_modeling.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index 6b82aeca..0ea9f623 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -117,6 +117,11 @@ class FlashLlamaAttention(torch.nn.Module): self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads + # Setting defaults for baichuan custom config which doesn't apply them. + config.rope_theta = getattr(config, "rope_theta", 10000) + config.num_key_value_heads = getattr( + config, "num_key_value_heads", config.num_attention_heads + ) self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size,