exl2 phi does not use packed QKV/gate-up projections

This commit is contained in:
Daniël de Kok 2024-06-26 10:38:08 +02:00
parent be2d38032a
commit 7947c347b7
1 changed files with 2 additions and 2 deletions

View File

@ -60,7 +60,7 @@ def load_attention(config, prefix, weights, layer_id):
sizes = None sizes = None
prefixes = None prefixes = None
if config.model_type == "phi3": if config.model_type == "phi3" and config.quantize != "exl2":
prefix = f"{prefix}.qkv_proj" prefix = f"{prefix}.qkv_proj"
base_layer = TensorParallelColumnLinear.load_qkv( base_layer = TensorParallelColumnLinear.load_qkv(
config, config,
@ -246,7 +246,7 @@ class LlamaMLP(nn.Module):
# Fuse gate and up proj # Fuse gate and up proj
bias = getattr(config, "mlp_bias", False) bias = getattr(config, "mlp_bias", False)
if config.model_type == "phi3": if config.model_type == "phi3" and config.quantize != "exl2":
gate_up_proj = TensorParallelColumnLinear.load_gate_up( gate_up_proj = TensorParallelColumnLinear.load_gate_up(
config, config,
prefix=f"{prefix}.gate_up_proj", prefix=f"{prefix}.gate_up_proj",