LinearA8W8Quant是对torch_npu.npu_quant_matmul接口的封装类,完成A8W8量化算子的矩阵乘计算。
torch_npu.contrib.module.LinearA8W8Quant(in_features, out_features, *, bias=True, offset=False, pertoken_scale=False, output_dtype=None)
x1(计算输入):Device侧的Tensor类型,数据类型支持INT8。数据格式支持ND,shape需要在2-6维范围。
一个Tensor类型的输出,代表量化matmul的计算结果:
x1(入参) |
weight(变量) |
scale(变量) |
offset(变量) |
bias(变量) |
pertoken_scale(变量) |
output_dtype(入参或变量) |
---|---|---|---|---|---|---|
int8 |
int8 |
int64/float32 |
None |
int32/None |
None |
float16 |
int8 |
int8 |
int64/float32 |
float32/None |
int32/None |
None |
int8 |
注:None表示传入参数或变量为False的场景。 |
x1(入参) |
weight(变量) |
scale(变量) |
offset(变量) |
bias(变量) |
pertoken_scale(变量) |
output_dtype(入参或变量) |
---|---|---|---|---|---|---|
int8 |
int8 |
int64/float32 |
None |
int32/None |
None |
float16 |
int8 |
int8 |
int64/float32 |
float32/None |
int32/None |
None |
int8 |
int8 |
int8 |
float32/bfloat16 |
None |
int32/bfloat16/float32/None |
float32/None |
bfloat16 |
int8 |
int8 |
float32 |
None |
int32/float16/float32/None |
float32/None |
float16 |
注:None表示传入参数或变量为False的场景。 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 | # int8类型输入 import torch import torch_npu import logging import os from torch_npu.contrib.module import LinearA8W8Quant x1 = torch.randint(-1, 1, (1, 512), dtype=torch.int8).npu() x2 = torch.randint(-1, 1, (128, 512), dtype=torch.int8).npu() scale = torch.randn(1, dtype=torch.float32).npu() offset = torch.randn(128, dtype=torch.float32).npu() bias = torch.randint(-1,1, (128,), dtype=torch.int32).npu() in_features = 512 out_features = 128 output_dtype = torch.int8 model = LinearA8W8Quant(in_features, out_features, bias=True, offset=True, output_dtype=output_dtype) model = model.npu() model.weight.data = x2 model.scale.data = scale model.offset.data = offset model.bias.data = bias output = model(x1) |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | import torch import torch_npu import torchair as tng from torchair.ge_concrete_graph import ge_apis as ge from torchair.configs.compiler_config import CompilerConfig from torch_npu.contrib.module import LinearA8W8Quant import logging from torchair.core.utils import logger logger.setLevel(logging.DEBUG) import os import numpy as np os.environ["ENABLE_ACLNN"] = "true" config = CompilerConfig() npu_backend = tng.get_npu_backend(compiler_config=config) x1 = torch.randint(-1, 1, (1, 512), dtype=torch.int8).npu() x2 = torch.randint(-1, 1, (128, 512), dtype=torch.int8).npu() scale = torch.randn(1, dtype=torch.float32).npu() offset = torch.randn(128, dtype=torch.float32).npu() bias = torch.randint(-1,1, (128,), dtype=torch.int32).npu() in_features = 512 out_features = 128 output_dtype = torch.int8 model = LinearA8W8Quant(in_features, out_features, bias=True, offset=True, output_dtype=output_dtype) model = model.npu() model.weight.data = x2 model.scale.data = scale model.offset.data = offset if output_dtype != torch.bfloat16: # 包含npu_trans_quant_param功能,Atlas 推理系列产品还包含使能高带宽的x2数据排布功能 tng.experimental.inference.use_internal_format_weight(model) model.bias.data = bias model = torch.compile(model, backend=npu_backend, dynamic=False) output = model(x1) |