2023-07-21 02:59:00 -06:00
|
|
|
import os
|
2023-05-15 09:30:47 -06:00
|
|
|
import torch
|
2023-06-08 06:51:52 -06:00
|
|
|
import torch.distributed
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
from torch import nn
|
2023-05-15 15:36:30 -06:00
|
|
|
from torch.nn import functional as F
|
2023-06-08 06:51:52 -06:00
|
|
|
from typing import List
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
HAS_BITS_AND_BYTES = True
|
|
|
|
try:
|
2023-06-08 06:51:52 -06:00
|
|
|
import bitsandbytes as bnb
|
|
|
|
from bitsandbytes.nn import Int8Params
|
|
|
|
|
|
|
|
except ImportError:
|
2023-05-15 09:30:47 -06:00
|
|
|
HAS_BITS_AND_BYTES = False
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
from accelerate import init_empty_weights
|
|
|
|
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
from text_generation_server.utils.gptq.quant_linear import QuantLinear
|
2023-07-24 03:43:58 -06:00
|
|
|
|
2023-07-21 02:59:00 -06:00
|
|
|
HAS_EXLLAMA = True
|
|
|
|
if os.getenv("DISABLE_EXLLAMA") == "True":
|
2023-07-24 03:43:58 -06:00
|
|
|
HAS_EXLLAMA = False
|
2023-07-21 02:59:00 -06:00
|
|
|
try:
|
|
|
|
from text_generation_server.utils.gptq.exllama import Ex4bitLinear
|
|
|
|
except ImportError:
|
|
|
|
HAS_EXLLAMA = False
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
|
2023-07-21 02:59:00 -06:00
|
|
|
from typing import Optional
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
# Monkey patching
|
|
|
|
@classmethod
|
|
|
|
def load_layer_norm(cls, prefix, weights, eps):
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
bias = weights.get_tensor(f"{prefix}.bias")
|
|
|
|
with init_empty_weights():
|
|
|
|
ln = cls(weight.shape, eps=eps)
|
|
|
|
|
|
|
|
ln.weight = nn.Parameter(weight)
|
|
|
|
ln.bias = nn.Parameter(bias)
|
|
|
|
return ln
|
|
|
|
|
|
|
|
|
2023-07-03 05:01:46 -06:00
|
|
|
@classmethod
|
|
|
|
def load_layer_norm_no_bias(cls, prefix, weights, eps):
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
with init_empty_weights():
|
|
|
|
ln = cls(weight.shape, eps=eps)
|
|
|
|
|
|
|
|
ln.weight = nn.Parameter(weight)
|
|
|
|
ln.bias = None
|
|
|
|
return ln
|
|
|
|
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
torch.nn.LayerNorm.load = load_layer_norm
|
2023-07-03 05:01:46 -06:00
|
|
|
torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
class FastLinear(nn.Module):
|
2023-05-15 09:30:47 -06:00
|
|
|
def __init__(
|
|
|
|
self,
|
2023-06-08 06:51:52 -06:00
|
|
|
weight,
|
|
|
|
bias,
|
2023-05-15 09:30:47 -06:00
|
|
|
) -> None:
|
2023-06-08 06:51:52 -06:00
|
|
|
super().__init__()
|
|
|
|
self.weight = nn.Parameter(weight)
|
|
|
|
if bias is not None:
|
|
|
|
self.bias = nn.Parameter(bias)
|
|
|
|
else:
|
2023-05-15 09:30:47 -06:00
|
|
|
self.bias = None
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def load(cls, config, prefix: str, weights, bias: bool):
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
if bias:
|
|
|
|
bias = weights.get_tensor(f"{prefix}.bias")
|
2023-05-15 09:30:47 -06:00
|
|
|
else:
|
2023-06-08 06:51:52 -06:00
|
|
|
bias = None
|
|
|
|
return cls(weight, bias)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
2023-06-08 06:51:52 -06:00
|
|
|
return F.linear(input, self.weight, self.bias)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
class Linear8bitLt(nn.Module):
|
2023-05-15 09:30:47 -06:00
|
|
|
def __init__(
|
|
|
|
self,
|
2023-06-08 06:51:52 -06:00
|
|
|
weight,
|
|
|
|
bias,
|
|
|
|
has_fp16_weights=True,
|
|
|
|
memory_efficient_backward=False,
|
|
|
|
threshold=0.0,
|
|
|
|
index=None,
|
2023-05-15 09:30:47 -06:00
|
|
|
):
|
2023-06-08 06:51:52 -06:00
|
|
|
super().__init__()
|
|
|
|
assert (
|
|
|
|
not memory_efficient_backward
|
|
|
|
), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0"
|
|
|
|
self.state = bnb.MatmulLtState()
|
|
|
|
self.index = index
|
|
|
|
|
|
|
|
# Necessary for stacked layers
|
|
|
|
self.state.threshold = threshold
|
|
|
|
self.state.has_fp16_weights = has_fp16_weights
|
|
|
|
self.state.memory_efficient_backward = memory_efficient_backward
|
|
|
|
if threshold > 0.0 and not has_fp16_weights:
|
|
|
|
self.state.use_pool = True
|
|
|
|
|
|
|
|
self.weight = Int8Params(
|
|
|
|
weight.data,
|
|
|
|
has_fp16_weights=has_fp16_weights,
|
|
|
|
requires_grad=has_fp16_weights,
|
2023-05-15 09:30:47 -06:00
|
|
|
)
|
2023-06-08 06:51:52 -06:00
|
|
|
self.weight.cuda(weight.device)
|
|
|
|
self.bias = bias
|
|
|
|
|
|
|
|
def init_8bit_state(self):
|
|
|
|
self.state.CB = self.weight.CB
|
|
|
|
self.state.SCB = self.weight.SCB
|
|
|
|
self.weight.CB = None
|
|
|
|
self.weight.SCB = None
|
|
|
|
|
|
|
|
def forward(self, x: torch.Tensor):
|
|
|
|
self.state.is_training = self.training
|
|
|
|
if self.weight.CB is not None:
|
|
|
|
self.init_8bit_state()
|
|
|
|
|
|
|
|
# weights are cast automatically as Int8Params, but the bias has to be cast manually
|
|
|
|
if self.bias is not None and self.bias.dtype != x.dtype:
|
|
|
|
self.bias.data = self.bias.data.to(x.dtype)
|
|
|
|
|
|
|
|
out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state)
|
|
|
|
|
|
|
|
if not self.state.has_fp16_weights:
|
|
|
|
if self.state.CB is not None and self.state.CxB is not None:
|
|
|
|
# we converted 8-bit row major to turing/ampere format in the first inference pass
|
|
|
|
# we no longer need the row-major weight
|
|
|
|
del self.state.CB
|
|
|
|
self.weight.data = self.state.CxB
|
|
|
|
return out
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
def get_linear(weight, bias, quantize):
|
|
|
|
if quantize is None:
|
|
|
|
linear = FastLinear(weight, bias)
|
|
|
|
elif quantize == "bitsandbytes":
|
|
|
|
linear = Linear8bitLt(
|
|
|
|
weight,
|
|
|
|
bias,
|
|
|
|
has_fp16_weights=False,
|
|
|
|
threshold=6.0,
|
|
|
|
)
|
|
|
|
if bias is not None:
|
|
|
|
linear.bias = nn.Parameter(bias)
|
|
|
|
elif quantize == "gptq":
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
try:
|
2023-07-21 02:59:00 -06:00
|
|
|
qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama = weight
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
except Exception:
|
|
|
|
raise NotImplementedError(
|
|
|
|
f"The passed weight is not `gptq` compatible, loader needs to be updated."
|
|
|
|
)
|
|
|
|
|
2023-07-21 02:59:00 -06:00
|
|
|
if use_exllama:
|
|
|
|
linear = Ex4bitLinear(qweight, qzeros, scales, g_idx, bias, bits, groupsize)
|
|
|
|
else:
|
|
|
|
linear = QuantLinear(
|
|
|
|
qweight,
|
|
|
|
qzeros,
|
|
|
|
scales,
|
|
|
|
g_idx,
|
|
|
|
bias,
|
|
|
|
bits,
|
|
|
|
groupsize,
|
|
|
|
)
|
2023-06-08 06:51:52 -06:00
|
|
|
else:
|
|
|
|
raise NotImplementedError(f"Quantization `{quantize}` is not implemented yet.")
|
|
|
|
return linear
|
|
|
|
|
|
|
|
|
|
|
|
class SuperLayer(nn.Module):
|
|
|
|
def __init__(self, linear):
|
|
|
|
super().__init__()
|
|
|
|
self.linear = linear
|
|
|
|
|
|
|
|
def forward(self, x):
|
|
|
|
return self.linear.forward(x)
|
|
|
|
|
|
|
|
|
|
|
|
class TensorParallelHead(SuperLayer):
|
2023-07-12 08:43:31 -06:00
|
|
|
def __init__(self, linear, process_group, should_gather: bool):
|
2023-06-08 06:51:52 -06:00
|
|
|
super().__init__(linear)
|
2023-05-15 09:30:47 -06:00
|
|
|
self.process_group = process_group
|
2023-07-12 08:43:31 -06:00
|
|
|
self.should_gather = should_gather
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def load(config, prefix: str, weights):
|
2023-07-12 08:43:31 -06:00
|
|
|
if weights.process_group.size() > 1:
|
|
|
|
try:
|
|
|
|
weight = weights.get_sharded(f"{prefix}.weight", dim=0)
|
|
|
|
should_gather = True
|
|
|
|
except AssertionError:
|
|
|
|
# If the vocab size is not divisible by number of shards
|
|
|
|
# just load the entire thing.
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
should_gather = False
|
|
|
|
else:
|
|
|
|
weight = weights.get_tensor(f"{prefix}.weight")
|
|
|
|
should_gather = False
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
|
|
|
|
# GPTQ doesn't quantize heads (nor embeddings)
|
|
|
|
if config.quantize == "gptq":
|
|
|
|
quantize = None
|
|
|
|
else:
|
|
|
|
quantize = config.quantize
|
2023-06-08 06:51:52 -06:00
|
|
|
return TensorParallelHead(
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
get_linear(weight, bias=None, quantize=quantize),
|
2023-06-08 06:51:52 -06:00
|
|
|
process_group=weights.process_group,
|
2023-07-12 08:43:31 -06:00
|
|
|
should_gather=should_gather,
|
2023-05-15 09:30:47 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
2023-07-28 09:43:46 -06:00
|
|
|
if not self.should_gather:
|
|
|
|
return super().forward(input)
|
|
|
|
|
2023-07-12 08:43:31 -06:00
|
|
|
world_size = self.process_group.size()
|
2023-07-28 09:43:46 -06:00
|
|
|
if len(input.shape) == 2 and isinstance(self.linear, FastLinear):
|
2023-06-09 03:55:29 -06:00
|
|
|
out_dim = self.linear.weight.shape[0]
|
|
|
|
|
2023-07-28 09:43:46 -06:00
|
|
|
if input.shape[0] == 1:
|
|
|
|
world_out = input.new_empty(1, out_dim * world_size)
|
|
|
|
local_out = input.new_empty(1, out_dim)
|
|
|
|
gather_input = local_out
|
|
|
|
else:
|
|
|
|
world_out = input.new_empty(out_dim * world_size, input.shape[0])
|
|
|
|
gather_input = input.new_empty(out_dim, input.shape[0])
|
|
|
|
local_out = gather_input.T
|
2023-06-09 03:55:29 -06:00
|
|
|
|
|
|
|
torch.mm(input, self.linear.weight.T, out=local_out)
|
|
|
|
|
|
|
|
torch.distributed.all_gather_into_tensor(
|
2023-07-28 09:43:46 -06:00
|
|
|
world_out, gather_input, group=self.process_group
|
2023-06-09 03:55:29 -06:00
|
|
|
)
|
|
|
|
|
2023-07-28 09:43:46 -06:00
|
|
|
if input.shape[0] == 1:
|
|
|
|
return world_out
|
|
|
|
return world_out.T
|
2023-07-28 07:36:38 -06:00
|
|
|
|
2023-07-28 09:43:46 -06:00
|
|
|
output = super().forward(input)
|
|
|
|
world_output = [
|
|
|
|
torch.empty_like(output) for _ in range(self.process_group.size())
|
|
|
|
]
|
2023-06-08 06:51:52 -06:00
|
|
|
torch.distributed.all_gather(world_output, output, group=self.process_group)
|
|
|
|
world_output = torch.cat(world_output, dim=-1)
|
|
|
|
return world_output
|
|
|
|
|
|
|
|
|
|
|
|
class TensorParallelColumnLinear(SuperLayer):
|
|
|
|
@classmethod
|
|
|
|
def load(cls, config, prefix: str, weights, bias: bool):
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
return cls.load_multi(config, [prefix], weights, bias, dim=0)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
@classmethod
|
|
|
|
def load_multi(cls, config, prefixes: List[str], weights, bias: bool, dim: int):
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
weight = weights.get_multi_weights_col(
|
|
|
|
prefixes, quantize=config.quantize, dim=dim
|
|
|
|
)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
if bias:
|
|
|
|
b = [weights.get_sharded(f"{p}.bias", dim=0) for p in prefixes]
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
bias = torch.cat(b, dim=dim)
|
2023-06-08 06:51:52 -06:00
|
|
|
else:
|
|
|
|
bias = None
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
linear = get_linear(weight, bias, config.quantize)
|
|
|
|
return cls(linear)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
class TensorParallelRowLinear(SuperLayer):
|
|
|
|
def __init__(self, linear, process_group):
|
|
|
|
super().__init__(linear)
|
2023-05-15 09:30:47 -06:00
|
|
|
self.process_group = process_group
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
@classmethod
|
|
|
|
def load(cls, config, prefix: str, weights, bias: bool):
|
feat(server): Add inference support for GPTQ (llama + falcon tested) + Quantization script (#438)
Let's start discussing implementation.
- Need to expose the quantization scripts (either included here or add
doc on how to use https://github.com/qwopqwop200/GPTQ-for-LLaMa)
- Make sure GPTQ works for multiple models (priority to Falcon).
Currently it means that every place we use `get_{tensor|sharded}` to
check for quantization.
My idea is to reintegrate as much as possible into `utils/layer.py` by
expanding `load_multi` to be a bit more generic.
This might require some thinking, but ultimately the
`qweight,qzeros,scales,g_idx` should be in a single place, and
independant of bias presence.
# What does this PR do?
<!--
Congratulations! You've made it this far! You're not quite done yet
though.
Once merged, your PR is going to appear in the release notes with the
title you set, so make sure it's a great title that fully reflects the
extent of your awesome contribution.
Then, please replace this with a description of the change and which
issue is fixed (if applicable). Please also include relevant motivation
and context. List any dependencies (if any) that are required for this
change.
Once you're done, someone will review your PR shortly (see the section
"Who can review?" below to tag some potential reviewers). They may
suggest changes to make the code even better. If no one reviewed your PR
after a week has passed, don't hesitate to post a new comment
@-mentioning the same persons---sometimes notifications get lost.
-->
<!-- Remove if not applicable -->
Fixes # (issue)
## Before submitting
- [ ] This PR fixes a typo or improves the docs (you can dismiss the
other checks if that's the case).
- [ ] Did you read the [contributor
guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests),
Pull Request section?
- [ ] Was this discussed/approved via a Github issue or the
[forum](https://discuss.huggingface.co/)? Please add a link
to it if that's the case.
- [ ] Did you make sure to update the documentation with your changes?
Here are the
[documentation
guidelines](https://github.com/huggingface/transformers/tree/main/docs),
and
[here are tips on formatting
docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation).
- [ ] Did you write any new necessary tests?
## Who can review?
Anyone in the community is free to review the PR once the tests have
passed. Feel free to tag
members/contributors who may be interested in your PR.
<!-- Your PR will be replied to more quickly if you can figure out the
right person to tag with @
@OlivierDehaene OR @Narsil
-->
---------
Co-authored-by: Ubuntu <ubuntu@ip-172-31-41-161.ec2.internal>
Co-authored-by: OlivierDehaene <olivier@huggingface.co>
2023-06-26 04:27:01 -06:00
|
|
|
weight = weights.get_multi_weights_row(prefix, quantize=config.quantize)
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
if bias and weights.process_group.rank() == 0:
|
|
|
|
# Rank is only on the first rank process
|
|
|
|
bias = weights.get_tensor(f"{prefix}.bias")
|
|
|
|
else:
|
|
|
|
bias = None
|
|
|
|
return cls(
|
|
|
|
get_linear(weight, bias, config.quantize),
|
|
|
|
process_group=weights.process_group,
|
|
|
|
)
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
|
|
|
out = super().forward(input)
|
2023-06-09 03:55:29 -06:00
|
|
|
if self.process_group.size() > 1:
|
|
|
|
torch.distributed.all_reduce(out, group=self.process_group)
|
2023-06-08 06:51:52 -06:00
|
|
|
return out
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
class TensorParallelEmbedding(nn.Module):
|
|
|
|
def __init__(self, prefix: str, weights, reduce=True):
|
|
|
|
super().__init__()
|
2023-07-12 08:43:31 -06:00
|
|
|
weight = weights.get_partial_sharded(f"{prefix}.weight", dim=0)
|
2023-06-08 06:51:52 -06:00
|
|
|
num_embeddings = weights.get_shape(f"{prefix}.weight")[0]
|
|
|
|
|
|
|
|
process_group = weights.process_group
|
|
|
|
|
|
|
|
world_size = process_group.size()
|
|
|
|
rank = process_group.rank()
|
|
|
|
|
|
|
|
block_size = num_embeddings // world_size
|
|
|
|
self.min_id = rank * block_size
|
|
|
|
self.max_id = min(num_embeddings, (rank + 1) * block_size)
|
|
|
|
self.null_idx = block_size
|
|
|
|
self.process_group = weights.process_group
|
|
|
|
self.reduce = reduce
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
"""Additional 0 entry used for masking"""
|
2023-06-08 06:51:52 -06:00
|
|
|
self.weight = nn.Parameter(F.pad(weight, (0, 0, 0, 1)))
|
2023-05-15 09:30:47 -06:00
|
|
|
|
|
|
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
|
|
|
# default all out of bounds values to `self.null_idx` that will then be mapped to 0
|
|
|
|
# translate for [0, self.max_id - self.min_id[
|
|
|
|
input = torch.where(
|
|
|
|
(self.min_id > input) | (input >= self.max_id),
|
|
|
|
self.null_idx,
|
|
|
|
input - self.min_id,
|
|
|
|
)
|
2023-06-08 06:51:52 -06:00
|
|
|
out = torch.nn.functional.embedding(input, self.weight)
|
2023-06-09 03:55:29 -06:00
|
|
|
if self.reduce and self.process_group.size() > 1:
|
2023-05-15 15:36:30 -06:00
|
|
|
torch.distributed.all_reduce(out, group=self.process_group)
|
2023-05-15 09:30:47 -06:00
|
|
|
return out
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
import dropout_layer_norm
|
|
|
|
|
|
|
|
class FastLayerNorm(nn.LayerNorm):
|
|
|
|
def forward(self, hidden_states, residual=None):
|
|
|
|
if hidden_states.shape[-1] > 8192:
|
|
|
|
if residual is not None:
|
|
|
|
hidden_states += residual
|
|
|
|
residual = hidden_states
|
|
|
|
|
|
|
|
return super(FastLayerNorm, self).forward(hidden_states), residual
|
|
|
|
else:
|
|
|
|
(
|
|
|
|
normed_hidden_states,
|
|
|
|
residual,
|
|
|
|
*rest,
|
|
|
|
) = dropout_layer_norm.dropout_add_ln_fwd(
|
|
|
|
hidden_states,
|
|
|
|
residual,
|
|
|
|
self.weight,
|
|
|
|
self.bias,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
0.0,
|
|
|
|
self.eps,
|
|
|
|
1.0,
|
|
|
|
0,
|
|
|
|
None,
|
|
|
|
False,
|
|
|
|
False,
|
|
|
|
)
|
|
|
|
if residual is None:
|
|
|
|
residual = hidden_states
|
|
|
|
|
|
|
|
return normed_hidden_states, residual
|
|
|
|
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
from flash_attn.layers.rotary import RotaryEmbedding
|
|
|
|
import rotary_emb
|
|
|
|
|
2023-07-31 07:38:47 -06:00
|
|
|
def _create_inv_freq(dim, base, device):
|
|
|
|
inv_freq = 1.0 / (
|
|
|
|
base
|
|
|
|
** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)
|
|
|
|
)
|
|
|
|
return inv_freq
|
|
|
|
|
|
|
|
def _get_rope_config(config):
|
|
|
|
if os.getenv("ROPE_SCALING", None) is not None:
|
|
|
|
rope_scaling = {"type": os.environ["ROPE_SCALING"], "factor": float(os.environ["ROPE_FACTOR"])}
|
|
|
|
return rope_scaling
|
|
|
|
return getattr(config, "rope_scaling", None)
|
|
|
|
|
2023-06-08 06:51:52 -06:00
|
|
|
class PositionRotaryEmbedding(nn.Module):
|
2023-07-31 07:38:47 -06:00
|
|
|
def __init__(self, inv_freq, scaling_factor):
|
2023-06-08 06:51:52 -06:00
|
|
|
super().__init__()
|
2023-07-04 12:23:55 -06:00
|
|
|
self.inv_freq = inv_freq
|
2023-06-08 06:51:52 -06:00
|
|
|
self._seq_len_cached = 0
|
|
|
|
self._cos_cached = None
|
|
|
|
self._sin_cached = None
|
|
|
|
self._cos_k_cached = None
|
|
|
|
self._sin_k_cached = None
|
2023-07-31 07:38:47 -06:00
|
|
|
self.scaling_factor = scaling_factor
|
|
|
|
self.dynamic_args = None
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
@classmethod
|
2023-07-31 07:38:47 -06:00
|
|
|
def static(cls, config, dim, base, device):
|
|
|
|
inv_freq = _create_inv_freq(dim, base, device)
|
|
|
|
scaling_factor = None
|
|
|
|
rope_scaling = _get_rope_config(config)
|
|
|
|
if rope_scaling is not None:
|
|
|
|
scaling_factor = rope_scaling["factor"]
|
|
|
|
if rope_scaling["type"] == "linear":
|
|
|
|
pass
|
|
|
|
elif rope_scaling["type"] == "dynamic":
|
|
|
|
return DynamicPositionRotaryEmbedding(dim=dim, max_position_embeddings=config.max_position_embeddings, base=base, device=inv_freq.device, scaling_factor=scaling_factor)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError(f"rope scaling type {rope_scaling['type']} is not implemented or invalid")
|
|
|
|
return cls(inv_freq, scaling_factor)
|
2023-06-08 06:51:52 -06:00
|
|
|
|
|
|
|
@classmethod
|
2023-07-31 07:38:47 -06:00
|
|
|
def load(cls, config, prefix, weights):
|
2023-06-08 06:51:52 -06:00
|
|
|
# XXX: Always load this in float32 !
|
|
|
|
dtype = weights.dtype
|
|
|
|
weights.dtype = torch.float32
|
|
|
|
inv_freq = weights.get_tensor(f"{prefix}.inv_freq")
|
|
|
|
weights.dtype = dtype
|
2023-07-31 07:38:47 -06:00
|
|
|
|
|
|
|
scaling_factor = None
|
|
|
|
rope_scaling = _get_rope_config(config)
|
|
|
|
if rope_scaling is not None:
|
|
|
|
scaling_factor = rope_scaling["factor"]
|
|
|
|
if rope_scaling["type"] == "linear":
|
|
|
|
pass
|
|
|
|
elif rope_scaling["type"] == "dynamic":
|
|
|
|
return DynamicPositionRotaryEmbedding(dim=2*inv_freq.shape[0], max_position_embeddings=config.max_position_embeddings, base=10000.0, device=inv_freq.device, scaling_factor=scaling_factor)
|
|
|
|
else:
|
|
|
|
raise NotImplementedError(f"rope scaling type {rope_scaling['type']} is not implemented or invalid")
|
|
|
|
return cls(inv_freq, scaling_factor)
|
2023-06-08 06:51:52 -06:00
|
|
|
|
2023-05-15 09:30:47 -06:00
|
|
|
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
|
|
|
# Reset the tables if the sequence length has changed,
|
|
|
|
# or if we're on a new device (possibly due to tracing for instance)
|
|
|
|
if (
|
|
|
|
seqlen > self._seq_len_cached
|
|
|
|
or self._cos_cached.device != device
|
|
|
|
or self._cos_cached.dtype != dtype
|
|
|
|
):
|
|
|
|
self._seq_len_cached = seqlen
|
|
|
|
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
|
2023-07-31 07:38:47 -06:00
|
|
|
if self.scaling_factor is not None:
|
|
|
|
t /= self.scaling_factor
|
2023-05-15 09:30:47 -06:00
|
|
|
# Don't do einsum, it converts fp32 to fp16
|
|
|
|
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
2023-07-31 07:38:47 -06:00
|
|
|
|
2023-05-15 09:30:47 -06:00
|
|
|
freqs = torch.outer(t, self.inv_freq.to(device=t.device))
|
|
|
|
self._cos_cached = torch.cos(freqs).to(dtype)
|
|
|
|
self._sin_cached = torch.sin(freqs).to(dtype)
|
|
|
|
|
|
|
|
def get_cos_sin(
|
|
|
|
self, position_ids: torch.Tensor, max_s: int, dtype: torch.dtype
|
|
|
|
):
|
|
|
|
"""
|
|
|
|
Return cos and sin for the asked position ids
|
|
|
|
"""
|
|
|
|
|
|
|
|
self._update_cos_sin_cache(dtype, position_ids.device, max_s)
|
|
|
|
|
|
|
|
cos = torch.index_select(self._cos_cached, 0, position_ids)
|
|
|
|
sin = torch.index_select(self._sin_cached, 0, position_ids)
|
|
|
|
return cos.unsqueeze(1), sin.unsqueeze(1)
|
|
|
|
|
2023-05-30 10:25:19 -06:00
|
|
|
def forward(self, x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor):
|
2023-05-15 09:30:47 -06:00
|
|
|
rotary_dim = cos.shape[-1]
|
2023-05-30 10:25:19 -06:00
|
|
|
x1 = x[..., :rotary_dim]
|
|
|
|
x2 = x[..., rotary_dim : 2 * rotary_dim]
|
|
|
|
|
|
|
|
rotary_emb.apply_rotary(x1, x2, cos, sin, x1, x2, False)
|
|
|
|
return x
|
2023-05-15 09:30:47 -06:00
|
|
|
|
2023-07-31 07:38:47 -06:00
|
|
|
class DynamicPositionRotaryEmbedding(PositionRotaryEmbedding):
|
|
|
|
def __init__(self, dim, max_position_embeddings, base, device, scaling_factor):
|
|
|
|
inv_freq = create_inv_freq(dim, base, device)
|
|
|
|
super().__init__(inv_freq, scaling_factor)
|
|
|
|
self.dim = dim
|
|
|
|
self.max_position_embeddings = max_position_embeddings
|
|
|
|
self.base = base
|
|
|
|
|
|
|
|
def _update_cos_sin_cache(self, dtype, device, seqlen):
|
|
|
|
# Reset the tables if the sequence length has changed,
|
|
|
|
# or if we're on a new device (possibly due to tracing for instance)
|
|
|
|
if (
|
|
|
|
seqlen > self._seq_len_cached
|
|
|
|
or self._cos_cached.device != device
|
|
|
|
or self._cos_cached.dtype != dtype
|
|
|
|
):
|
|
|
|
if seqlen > self.max_position_embeddings:
|
|
|
|
newbase = self.base * ((self.scaling_factor * seq_len / self.max_position_embeddings) - (self.scaling_factor - 1)) ** (self.dim / (self.dim - 2))
|
|
|
|
self.inv_freq = _create_inv_freq(self.dim, newbase, self.inv_freq.device)
|
|
|
|
self._seq_len_cached = seqlen
|
|
|
|
t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
|
|
|
|
if self.scaling_factor is not None:
|
|
|
|
t /= self.scaling_factor
|
|
|
|
# Don't do einsum, it converts fp32 to fp16
|
|
|
|
# freqs = torch.einsum("i,j->ij", t, self.inv_freq)
|
|
|
|
|
|
|
|
freqs = torch.outer(t, self.inv_freq.to(device=t.device))
|
|
|
|
self._cos_cached = torch.cos(freqs).to(dtype)
|
|
|
|
self._sin_cached = torch.sin(freqs).to(dtype)
|
|
|
|
|
|
|
|
|
2023-05-15 09:30:47 -06:00
|
|
|
except ImportError:
|
|
|
|
pass
|