Fixing GPTQ imports. (#1994)
# What does this PR do? <!-- Congratulations! You've made it this far! You're not quite done yet though. Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution. Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change. Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost. --> <!-- Remove if not applicable --> Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. <!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @ @OlivierDehaene OR @Narsil -->
This commit is contained in:
parent
799a193b10
commit
9add5d0af5
|
@ -2,8 +2,6 @@ from typing import Optional
|
||||||
import torch
|
import torch
|
||||||
from torch.nn import functional as F
|
from torch.nn import functional as F
|
||||||
from text_generation_server.utils.import_utils import SYSTEM
|
from text_generation_server.utils.import_utils import SYSTEM
|
||||||
from text_generation_server.layers.exl2 import Exl2Weight
|
|
||||||
from text_generation_server.layers.gptq import GPTQWeight
|
|
||||||
|
|
||||||
if SYSTEM == "rocm":
|
if SYSTEM == "rocm":
|
||||||
try:
|
try:
|
||||||
|
@ -155,6 +153,8 @@ def get_linear(weight, bias, quantize):
|
||||||
quant_type="nf4",
|
quant_type="nf4",
|
||||||
)
|
)
|
||||||
elif quantize == "exl2":
|
elif quantize == "exl2":
|
||||||
|
from text_generation_server.layers.exl2 import Exl2Weight
|
||||||
|
|
||||||
if not isinstance(weight, Exl2Weight):
|
if not isinstance(weight, Exl2Weight):
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"The passed weight is not `exl2` compatible, loader needs to be updated."
|
f"The passed weight is not `exl2` compatible, loader needs to be updated."
|
||||||
|
@ -165,6 +165,8 @@ def get_linear(weight, bias, quantize):
|
||||||
linear = ExllamaQuantLinear(weight, bias)
|
linear = ExllamaQuantLinear(weight, bias)
|
||||||
|
|
||||||
elif quantize == "gptq":
|
elif quantize == "gptq":
|
||||||
|
from text_generation_server.layers.gptq import GPTQWeight
|
||||||
|
|
||||||
if not isinstance(weight, GPTQWeight):
|
if not isinstance(weight, GPTQWeight):
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"The passed weight is not `gptq` compatible, loader needs to be updated."
|
f"The passed weight is not `gptq` compatible, loader needs to be updated."
|
||||||
|
|
|
@ -21,7 +21,6 @@ from transformers.activations import ACT2FN
|
||||||
from transformers.configuration_utils import PretrainedConfig
|
from transformers.configuration_utils import PretrainedConfig
|
||||||
from typing import Optional, List, Tuple, Any
|
from typing import Optional, List, Tuple, Any
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from text_generation_server.layers.gptq import GPTQWeight
|
|
||||||
from text_generation_server.utils.import_utils import SYSTEM
|
from text_generation_server.utils.import_utils import SYSTEM
|
||||||
|
|
||||||
if SYSTEM != "xpu":
|
if SYSTEM != "xpu":
|
||||||
|
@ -198,6 +197,8 @@ def _load_gqa(config, prefix: str, weights):
|
||||||
v_stop = v_offset + (rank + 1) * kv_block_size
|
v_stop = v_offset + (rank + 1) * kv_block_size
|
||||||
|
|
||||||
if config.quantize in ["gptq", "awq"]:
|
if config.quantize in ["gptq", "awq"]:
|
||||||
|
from text_generation_server.layers.gptq import GPTQWeight
|
||||||
|
|
||||||
try:
|
try:
|
||||||
qweight_slice = weights._get_slice(f"{prefix}.qweight")
|
qweight_slice = weights._get_slice(f"{prefix}.qweight")
|
||||||
q_qweight = qweight_slice[:, q_start:q_stop]
|
q_qweight = qweight_slice[:, q_start:q_stop]
|
||||||
|
|
|
@ -5,7 +5,6 @@ from torch import nn
|
||||||
from transformers.activations import ACT2FN
|
from transformers.activations import ACT2FN
|
||||||
from typing import Optional, List, Tuple
|
from typing import Optional, List, Tuple
|
||||||
|
|
||||||
from text_generation_server.layers.gptq import GPTQWeight
|
|
||||||
from text_generation_server.layers.attention import (
|
from text_generation_server.layers.attention import (
|
||||||
paged_attention,
|
paged_attention,
|
||||||
attention,
|
attention,
|
||||||
|
@ -39,6 +38,8 @@ def load_multi_mqa(
|
||||||
def _load_multi_mqa_gptq(
|
def _load_multi_mqa_gptq(
|
||||||
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
|
config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size
|
||||||
):
|
):
|
||||||
|
from text_generation_server.layers.gptq import GPTQWeight
|
||||||
|
|
||||||
if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose:
|
if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose:
|
||||||
world_size = weights.process_group.size()
|
world_size = weights.process_group.size()
|
||||||
rank = weights.process_group.rank()
|
rank = weights.process_group.rank()
|
||||||
|
|
|
@ -7,8 +7,6 @@ import torch
|
||||||
from loguru import logger
|
from loguru import logger
|
||||||
from huggingface_hub import hf_hub_download
|
from huggingface_hub import hf_hub_download
|
||||||
import json
|
import json
|
||||||
from text_generation_server.layers.exl2 import Exl2Weight
|
|
||||||
from text_generation_server.layers.gptq import GPTQWeight
|
|
||||||
from text_generation_server.utils.log import log_once
|
from text_generation_server.utils.log import log_once
|
||||||
|
|
||||||
|
|
||||||
|
@ -221,6 +219,8 @@ class Weights:
|
||||||
|
|
||||||
def get_weights_col(self, prefix: str, quantize: str):
|
def get_weights_col(self, prefix: str, quantize: str):
|
||||||
if quantize == "exl2":
|
if quantize == "exl2":
|
||||||
|
from text_generation_server.layers.exl2 import Exl2Weight
|
||||||
|
|
||||||
try:
|
try:
|
||||||
q_weight = self.get_tensor(f"{prefix}.q_weight")
|
q_weight = self.get_tensor(f"{prefix}.q_weight")
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
|
@ -247,6 +247,8 @@ class Weights:
|
||||||
if quantize == "exl2":
|
if quantize == "exl2":
|
||||||
raise ValueError("get_multi_weights_col is not supported for exl2")
|
raise ValueError("get_multi_weights_col is not supported for exl2")
|
||||||
elif quantize in ["gptq", "awq"]:
|
elif quantize in ["gptq", "awq"]:
|
||||||
|
from text_generation_server.layers.gptq import GPTQWeight
|
||||||
|
|
||||||
try:
|
try:
|
||||||
qweight = torch.cat(
|
qweight = torch.cat(
|
||||||
[self.get_sharded(f"{p}.qweight", dim=1) for p in prefixes], dim=1
|
[self.get_sharded(f"{p}.qweight", dim=1) for p in prefixes], dim=1
|
||||||
|
|
Loading…
Reference in New Issue