完成量化的矩阵乘计算,最小支持输入维度为2维,最大支持输入维度为6维。
npu_quant_matmul(Tensor x1, Tensor x2, Tensor scale, *, Tensor? offset=None, Tensor? pertoken_scale=None, Tensor? bias=None, ScalarType? output_dtype=None) -> Tensor
一个Tensor类型的输出,代表量化matmul的计算结果。如果output_dtype为torch.float16,输出的数据类型为FLOAT16;如果output_dtype为torch.bfloat16,输出的数据类型为BFLOAT16;如果output_dtype为torch.int8或者None,输出的数据类型为INT8;如果output_dtype非以上数据类型,返回错误码。
1.单算子调用: # Atlas 推理系列产品:在单算子模式下不支持使能高带宽的x2数据排布,如果想追求极致性能,请使用图模式 import torch import torch_npu import logging import os cpu_x1 = torch.randint(-5, 5, (1, 256, 768), dtype=torch.int8) cpu_x2 = torch.randint(-5, 5, (31, 768, 16), dtype=torch.int8) scale = torch.randn(16, dtype=torch.float32) offset = torch.randn(16, dtype=torch.float32) bias = torch.randint(-5, 5, (31, 1, 16), dtype=torch.int32) # Method 1:You can directly call npu_quant_matmul npu_out = torch_npu.npu_quant_matmul(cpu_x1.npu(), cpu_x2.npu(), scale.npu(), offset=offset.npu(), bias=bias.npu()) # Method 2: You can first call npu_trans_quant_param to convert scale and offset from float32 to int64 when output dtype is torch.int8 or torch.float16 scale_1 = torch_npu.npu_trans_quant_param(scale.npu(), offset.npu()) npu_out = torch_npu.npu_quant_matmul(cpu_x1.npu(), cpu_x2.npu(), scale_1, bias=bias.npu()) 2.图模式: 2.1 通用 import torch import torch_npu import torchair as tng from torchair.ge_concrete_graph import ge_apis as ge from torchair.configs.compiler_config import CompilerConfig import logging from torchair.core.utils import logger logger.setLevel(logging.DEBUG) import os import numpy as np os.environ["ENABLE_ACLNN"] = "true" config = CompilerConfig() npu_backend = tng.get_npu_backend(compiler_config=config) class MyModel(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x1, x2, scale, offset, bias): scale_1 = torch_npu.npu_trans_quant_param(scale, offset) return torch_npu.npu_quant_matmul(x1, x2, scale_1, offset=offset, bias=bias) cpu_model = MyModel() model = cpu_model.npu() cpu_x1 = torch.randint(-1, 1, (15, 1, 512), dtype=torch.int8) cpu_x2 = torch.randint(-1, 1, (15, 512, 128), dtype=torch.int8) scale = torch.randn(1, dtype=torch.float32) offset = torch.randn(1, dtype=torch.float32) bias = torch.randint(-1,1, (15, 1, 128), dtype=torch.int32) model = torch.compile(cpu_model, backend=npu_backend, dynamic=True) npu_out = model(cpu_x1.npu(), cpu_x2.npu(), scale.npu(), offset.npu(), bias.npu()) 2.2 Atlas 推理系列产品(Atlas A2 训练系列产品不可用)高性能调用方式 import torch import torch_npu import torchair as tng from torchair.ge_concrete_graph import ge_apis as ge from torchair.configs.compiler_config import CompilerConfig import logging from torchair.core.utils import logger logger.setLevel(logging.DEBUG) import os import numpy as np os.environ["ENABLE_ACLNN"] = "true" config = CompilerConfig() npu_backend = tng.get_npu_backend(compiler_config=config) class MyModel(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x1, x2, scale, offset, bias): return torch_npu.npu_quant_matmul(x1, x2.transpose(2,1), scale, offset=offset, bias=bias) cpu_model = MyModel() model = cpu_model.npu() cpu_x1 = torch.randint(-1, 1, (15,1, 512), dtype=torch.int8).npu() cpu_x2 = torch.randint(-1, 1, (15,512, 128), dtype=torch.int8).npu() # Process x2 into a high-bandwidth format(29) offline to improve performance, please ensure that the input is continuous with (batch,n,k) layout cpu_x2_t_29 = torch_npu.npu_format_cast(cpu_x2.transpose(2,1).contiguous(), 29) scale = torch.randn(1, dtype=torch.float32).npu() offset = torch.randn(1, dtype=torch.float32).npu() bias = torch.randint(-1,1, (128,), dtype=torch.int32).npu() # Process scale from float32 to int64 offline to improve performance scale_1 = torch_npu.npu_trans_quant_param(scale, offset) model = torch.compile(cpu_model, backend=npu_backend, dynamic=False) npu_out = model(cpu_x1, cpu_x2_t_29, scale_1, offset, bias)