Fp8 Support (#1726)
# What does this PR do? <!-- Congratulations! You've made it this far! You're not quite done yet though. Once merged, your PR is going to appear in the release notes with the title you set, so make sure it's a great title that fully reflects the extent of your awesome contribution. Then, please replace this with a description of the change and which issue is fixed (if applicable). Please also include relevant motivation and context. List any dependencies (if any) that are required for this change. Once you're done, someone will review your PR shortly (see the section "Who can review?" below to tag some potential reviewers). They may suggest changes to make the code even better. If no one reviewed your PR after a week has passed, don't hesitate to post a new comment @-mentioning the same persons---sometimes notifications get lost. --> <!-- Remove if not applicable --> Fixes # (issue) ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Did you read the [contributor guideline](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#start-contributing-pull-requests), Pull Request section? - [ ] Was this discussed/approved via a Github issue or the [forum](https://discuss.huggingface.co/)? Please add a link to it if that's the case. - [ ] Did you make sure to update the documentation with your changes? Here are the [documentation guidelines](https://github.com/huggingface/transformers/tree/main/docs), and [here are tips on formatting docstrings](https://github.com/huggingface/transformers/tree/main/docs#writing-source-documentation). - [ ] Did you write any new necessary tests? ## Who can review? Anyone in the community is free to review the PR once the tests have passed. Feel free to tag members/contributors who may be interested in your PR. <!-- Your PR will be replied to more quickly if you can figure out the right person to tag with @ @OlivierDehaene OR @Narsil --> --------- Co-authored-by: Dong Shin <d0104.shin@gmail.com>
This commit is contained in:
parent
c2fd35d875
commit
408dbc485c
|
@ -66,6 +66,7 @@ Options:
|
||||||
- bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
|
- bitsandbytes: Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, but it is known that the model will be much slower to run than the native f16
|
||||||
- bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
|
- bitsandbytes-nf4: Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, but it is known that the model will be much slower to run than the native f16
|
||||||
- bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
|
- bitsandbytes-fp4: Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better perplexity performance for you model
|
||||||
|
- fp8: [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above This dtype has native ops should be the fastest if available. This is currently not the fastest because of local unpacking + padding to satisfy matrix multiplication limitations
|
||||||
|
|
||||||
```
|
```
|
||||||
## SPECULATE
|
## SPECULATE
|
||||||
|
|
|
@ -47,6 +47,11 @@ enum Quantization {
|
||||||
/// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better
|
/// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better
|
||||||
/// perplexity performance for you model
|
/// perplexity performance for you model
|
||||||
BitsandbytesFP4,
|
BitsandbytesFP4,
|
||||||
|
/// [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above
|
||||||
|
/// This dtype has native ops should be the fastest if available.
|
||||||
|
/// This is currently not the fastest because of local unpacking + padding to satisfy matrix
|
||||||
|
/// multiplication limitations.
|
||||||
|
Fp8,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Quantization {
|
impl std::fmt::Display for Quantization {
|
||||||
|
@ -73,6 +78,9 @@ impl std::fmt::Display for Quantization {
|
||||||
Quantization::Eetq => {
|
Quantization::Eetq => {
|
||||||
write!(f, "eetq")
|
write!(f, "eetq")
|
||||||
}
|
}
|
||||||
|
Quantization::Fp8 => {
|
||||||
|
write!(f, "fp8")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ class Quantization(str, Enum):
|
||||||
gptq = "gptq"
|
gptq = "gptq"
|
||||||
awq = "awq"
|
awq = "awq"
|
||||||
eetq = "eetq"
|
eetq = "eetq"
|
||||||
|
fp8 = "fp8"
|
||||||
|
|
||||||
|
|
||||||
class Dtype(str, Enum):
|
class Dtype(str, Enum):
|
||||||
|
|
|
@ -182,6 +182,48 @@ class EETQLinear(nn.Module):
|
||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
def fp8_quantize(weight, qdtype=torch.float8_e4m3fn):
|
||||||
|
device = weight.device
|
||||||
|
# weight, scale = quant_weights(weight, torch.int8, False)
|
||||||
|
finfo = torch.finfo(qdtype)
|
||||||
|
# Calculate the scale as dtype max divided by absmax
|
||||||
|
scale = finfo.max / weight.abs().max().clamp(min=1e-12)
|
||||||
|
# scale and clamp the tensor to bring it to
|
||||||
|
# the representative range of float8 data type
|
||||||
|
# (as default cast is unsaturated)
|
||||||
|
qweight = (weight * scale).clamp(min=finfo.min, max=finfo.max)
|
||||||
|
# Return both float8 data and the inverse scale (as float),
|
||||||
|
# as both required as inputs to torch._scaled_mm
|
||||||
|
qweight = qweight.to(qdtype)
|
||||||
|
scale = scale.float().reciprocal()
|
||||||
|
return qweight, scale
|
||||||
|
|
||||||
|
|
||||||
|
class Fp8Linear(nn.Module):
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
weight,
|
||||||
|
bias,
|
||||||
|
) -> None:
|
||||||
|
super().__init__()
|
||||||
|
self.dtype = weight.dtype
|
||||||
|
self.qweight, self.scale = fp8_quantize(weight)
|
||||||
|
|
||||||
|
self.bias = bias if bias is not None else None
|
||||||
|
|
||||||
|
def forward(self, input: torch.Tensor) -> torch.Tensor:
|
||||||
|
qinput, scale = fp8_quantize(input)
|
||||||
|
output, _ = torch._scaled_mm(
|
||||||
|
qinput,
|
||||||
|
self.qweight.t(),
|
||||||
|
out_dtype=self.dtype,
|
||||||
|
scale_a=scale,
|
||||||
|
scale_b=self.scale,
|
||||||
|
bias=self.bias,
|
||||||
|
)
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
class Linear8bitLt(nn.Module):
|
class Linear8bitLt(nn.Module):
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
@ -293,6 +335,8 @@ def get_linear(weight, bias, quantize):
|
||||||
raise ImportError(
|
raise ImportError(
|
||||||
"Please install EETQ from https://github.com/NetEase-FuXi/EETQ"
|
"Please install EETQ from https://github.com/NetEase-FuXi/EETQ"
|
||||||
)
|
)
|
||||||
|
elif quantize == "fp8":
|
||||||
|
linear = Fp8Linear(weight, bias)
|
||||||
elif quantize == "bitsandbytes":
|
elif quantize == "bitsandbytes":
|
||||||
warn_deprecate_bnb()
|
warn_deprecate_bnb()
|
||||||
linear = Linear8bitLt(
|
linear = Linear8bitLt(
|
||||||
|
|
Loading…
Reference in New Issue