From 87f43814e3a026b6df603efdc309357543c52632 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 5 Oct 2023 10:11:27 +0200 Subject: [PATCH] Fixing GPTQ exllama kernel usage. (#1101) # What does this PR do? Fixes #1098 Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. --- server/text_generation_server/utils/weights.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/server/text_generation_server/utils/weights.py b/server/text_generation_server/utils/weights.py index 4bae8cc0..2f330d9c 100644 --- a/server/text_generation_server/utils/weights.py +++ b/server/text_generation_server/utils/weights.py @@ -212,7 +212,9 @@ class Weights: g_idx = None bits, groupsize = self._get_gptq_params() - weight = (qweight, qzeros, scales, g_idx, bits, groupsize, False) + from text_generation_server.utils.layers import HAS_EXLLAMA + use_exllama = bits==4 and HAS_EXLLAMA and quantize == "gptq" + weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) else: w = [self.get_sharded(f"{p}.weight", dim=0) for p in prefixes] weight = torch.cat(w, dim=dim)