From cf04a43fb1f9aa08f8095e99b9ef04cd768af302 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 15 Oct 2024 12:43:49 +0200 Subject: [PATCH] Fixing linters. (#2650) --- docs/openapi.json | 2 +- server/text_generation_server/models/causal_lm.py | 11 ++--------- server/text_generation_server/models/seq2seq_lm.py | 6 +----- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/docs/openapi.json b/docs/openapi.json index 957fe246..d1b60f4d 100644 --- a/docs/openapi.json +++ b/docs/openapi.json @@ -2186,4 +2186,4 @@ "description": "Hugging Face Text Generation Inference API" } ] -} \ No newline at end of file +} diff --git a/server/text_generation_server/models/causal_lm.py b/server/text_generation_server/models/causal_lm.py index de2c0651..ef46cb8c 100644 --- a/server/text_generation_server/models/causal_lm.py +++ b/server/text_generation_server/models/causal_lm.py @@ -619,18 +619,11 @@ class CausalLM(Model): model_id, revision=revision, torch_dtype=dtype, - device_map=( - "auto" - if device_count > 1 - else None - ), + device_map=("auto" if device_count > 1 else None), load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, ) - if ( - device_count == 1 - and quantize != "bitsandbytes" - ): + if device_count == 1 and quantize != "bitsandbytes": model = model.to(device) if tokenizer.pad_token_id is None: diff --git a/server/text_generation_server/models/seq2seq_lm.py b/server/text_generation_server/models/seq2seq_lm.py index 94f87d02..91c99c50 100644 --- a/server/text_generation_server/models/seq2seq_lm.py +++ b/server/text_generation_server/models/seq2seq_lm.py @@ -649,11 +649,7 @@ class Seq2SeqLM(Model): model_id, revision=revision, torch_dtype=dtype, - device_map=( - "auto" - if device_count > 1 - else None - ), + device_map=("auto" if device_count > 1 else None), load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, )