From 799a193b109662743bed1b18a09af1fdcd508c8b Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Sat, 1 Jun 2024 08:47:00 +0000 Subject: [PATCH] Fixing Phi3. --- .../models/custom_modeling/flash_llama_modeling.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py index c0fa09fd..cef712f0 100644 --- a/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py +++ b/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py @@ -52,7 +52,8 @@ if SYSTEM == "rocm": def load_attention(config, prefix, weights): - bias = config.attention_bias + # Only defined in granite. + bias = getattr(config, "attention_bias", False) # if specific model type, load the correct attention if config.model_type == "phi3":