From 7947c347b7b489bcf03719253d0a21a4a346d1bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Danie=CC=88l=20de=20Kok?= Date: Wed, 26 Jun 2024 10:38:08 +0200 Subject: [PATCH] exl2 phi does not use packed QKV/gate-up projections --- .../models/custom_modeling/flash_llama_modeling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index c48ed268..ee2ada0d 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -60,7 +60,7 @@ def load_attention(config, prefix, weights, layer_id): sizes = None prefixes = None - if config.model_type == "phi3": + if config.model_type == "phi3" and config.quantize != "exl2": prefix = f"{prefix}.qkv_proj" base_layer = TensorParallelColumnLinear.load_qkv( config, @@ -246,7 +246,7 @@ class LlamaMLP(nn.Module): # Fuse gate and up proj bias = getattr(config, "mlp_bias", False) - if config.model_type == "phi3": + if config.model_type == "phi3" and config.quantize != "exl2": gate_up_proj = TensorParallelColumnLinear.load_gate_up( config, prefix=f"{prefix}.gate_up_proj",