Use symmetric quantization in the `quantize` subcommand (#2120)
Packing of asymmetric quantization is broken, all (q)zeros values of `0` get reset to `1`, resulting in a loss of accuracy. So instead use symmetric quantization. To be able to distinguish models with symmetric and asymmetric quantization, a new config tensor `gptq_sym` is added. If this tensor is not present, we assume `sym=False`.
This commit is contained in:
parent
c46eaf707b
commit
dbb23fbfa8
|
@ -353,6 +353,7 @@ def quantize(
|
|||
upload_to_model_id=upload_to_model_id,
|
||||
percdamp=percdamp,
|
||||
act_order=act_order,
|
||||
sym=True,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -393,11 +393,15 @@ class GPTQWeightsLoader(WeightsLoader):
|
|||
)
|
||||
|
||||
def _get_gptq_params(self, weights: Weights):
|
||||
try:
|
||||
if weights._has_tensor("gptq_bits") and weights._has_tensor("gptq_groupsize"):
|
||||
self.bits = weights.get_tensor("gptq_bits").item()
|
||||
self.groupsize = weights.get_tensor("gptq_groupsize").item()
|
||||
self.desc_act = False
|
||||
self.sym = False
|
||||
# `server quantize` used asymmetric quantization unconditionally
|
||||
# before the `gptq_sym` setting tensor was added.
|
||||
self.sym = (
|
||||
weights.get_tensor("gptq_sym").item()
|
||||
if weights._has_tensor("gptq_sym")
|
||||
else False
|
||||
)
|
||||
self.quant_method = "gptq"
|
||||
except (SafetensorError, RuntimeError) as e:
|
||||
pass
|
||||
|
|
|
@ -871,6 +871,7 @@ def quantize(
|
|||
upload_to_model_id: Optional[str],
|
||||
percdamp: float,
|
||||
act_order: bool,
|
||||
sym: bool,
|
||||
):
|
||||
print("loading model")
|
||||
config = AutoConfig.from_pretrained(
|
||||
|
@ -946,6 +947,7 @@ def quantize(
|
|||
percdamp=percdamp,
|
||||
act_order=act_order,
|
||||
hooks=hooks,
|
||||
sym=sym,
|
||||
)
|
||||
print(time.time() - tick)
|
||||
|
||||
|
@ -957,6 +959,7 @@ def quantize(
|
|||
state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()}
|
||||
state_dict["gptq_bits"] = torch.LongTensor([bits])
|
||||
state_dict["gptq_groupsize"] = torch.LongTensor([groupsize])
|
||||
state_dict["gptq_sym"] = torch.BoolTensor([sym])
|
||||
|
||||
max_shard_size = "10GB"
|
||||
shards, index = shard_checkpoint(
|
||||
|
|
|
@ -146,6 +146,13 @@ class Weights:
|
|||
slice_ = f.get_slice(tensor_name)
|
||||
return slice_
|
||||
|
||||
def _has_tensor(self, tensor_name: str):
|
||||
try:
|
||||
self.get_filename(tensor_name)
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_shape(self, tensor_name: str):
|
||||
return self._get_slice(tensor_name).get_shape()
|
||||
|
||||
|
|
Loading…
Reference in New Issue