每个算子有两段接口,必须先调用“aclnnXxxGetWorkspaceSize”接口获取入参并根据计算流程计算所需workspace大小,再调用“aclnnXxx”接口执行计算。两段式接口如下:
aclnnStatus aclnnBatchNormBackwardGetWorkspaceSize(const aclTensor *gradOut, const aclTensor *input, const aclTensor *weight, const aclTensor *runningMean, const aclTensor *runningVar, const aclTensor *saveMean, const aclTensor *saveInvstd, bool training, double eps, const aclBoolArray *outputMask, aclTensor *gradInput, aclTensor *gradWeight, aclTensor *gradBias, uint64_t *workspaceSize, aclOpExecutor **executor)
返回aclnnStatus状态码,具体参见aclnn返回码。
第一段接口完成入参校验,出现以下场景时报错:
aclnnStatus aclnnBatchNormBackward(void *workspace, uint64_t workspaceSize, aclOpExecutor *executor, const aclrtStream stream)
返回aclnnStatus状态码,具体参见aclnn返回码。
无
#include <iostream> #include <vector> #include "acl/acl.h" #include "aclnnop/level2/aclnn_batch_norm_backward.h" #define CHECK_RET(cond, return_expr) \ do { \ if (!(cond)) { \ return_expr; \ } \ } while (0) #define LOG_PRINT(message, ...) \ do { \ printf(message, ##__VA_ARGS__); \ } while (0) int64_t GetShapeSize(const std::vector<int64_t>& shape) { int64_t shape_size = 1; for (auto i : shape) { shape_size *= i; } return shape_size; } int Init(int32_t deviceId, aclrtContext* context, aclrtStream* stream) { // 固定写法,acl初始化 auto ret = aclrtSetDevice(deviceId); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetDevice failed. ERROR: %d\n", ret); return ret); ret = aclrtCreateContext(context, deviceId); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateContext failed. ERROR: %d\n", ret); return ret); ret = aclrtSetCurrentContext(*context); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSetCurrentContext failed. ERROR: %d\n", ret); return ret); ret = aclrtCreateStream(stream); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtCreateStream failed. ERROR: %d\n", ret); return ret); ret = aclInit(nullptr); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclInit failed. ERROR: %d\n", ret); return ret); return 0; } template <typename T> int CreateAclTensor(const std::vector<T>& hostData, const std::vector<int64_t>& shape, void** deviceAddr, aclDataType dataType, aclTensor** tensor) { auto size = GetShapeSize(shape) * sizeof(T); // 调用aclrtMalloc申请device侧内存 auto ret = aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMalloc failed. ERROR: %d\n", ret); return ret); // 调用aclrtMemcpy将host侧数据拷贝到device侧内存上 ret = aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtMemcpy failed. ERROR: %d\n", ret); return ret); // 计算连续tensor的strides std::vector<int64_t> strides(shape.size(), 1); for (int64_t i = shape.size() - 2; i >= 0; i--) { strides[i] = shape[i + 1] * strides[i + 1]; } // 调用aclCreateTensor接口创建aclTensor *tensor = aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr); return 0; } int main() { // 1. (固定写法)device/context/stream初始化, 参考acl对外接口列表 // 根据自己的实际device填写deviceId int32_t deviceId = 0; aclrtContext context; aclrtStream stream; auto ret = Init(deviceId, &context, &stream); // check根据自己的需要处理 CHECK_RET(ret == 0, LOG_PRINT("Init acl failed. ERROR: %d\n", ret); return ret); // 2. 构造输入与输出,需要根据API的接口自定义构造 std::vector<int64_t> inputShape = {2, 3, 2}; std::vector<int64_t> meanShape = {3}; void* gradOutDeviceAddr = nullptr; void* inputDeviceAddr = nullptr; void* weightDeviceAddr = nullptr; void* runningMeanDeviceAddr = nullptr; void* runningVarDeviceAddr = nullptr; void* saveMeanDeviceAddr = nullptr; void* saveInvstdDeviceAddr = nullptr; void* gradInputDeviceAddr = nullptr; void* gradWeightDeviceAddr = nullptr; void* gradBiasDeviceAddr = nullptr; aclTensor* gradOut = nullptr; aclTensor* input = nullptr; aclTensor* weight = nullptr; aclTensor* runningMean = nullptr; aclTensor* runningVar = nullptr; aclTensor* saveMean = nullptr; aclTensor* saveInvstd = nullptr; aclTensor* gradInput = nullptr; aclTensor* gradWeight = nullptr; aclTensor* gradBias = nullptr; std::vector<float> inputHostData = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; std::vector<float> zeroHostData = {0, 0, 0}; std::vector<float> oneHostData = {1, 1, 1}; // 创建gradOut aclTensor ret = CreateAclTensor(inputHostData, inputShape, &gradOutDeviceAddr, aclDataType::ACL_FLOAT, &gradOut); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建input aclTensor ret = CreateAclTensor(inputHostData, inputShape, &inputDeviceAddr, aclDataType::ACL_FLOAT, &input); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建weight aclTensor ret = CreateAclTensor(oneHostData, meanShape, &weightDeviceAddr, aclDataType::ACL_FLOAT, &weight); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建runningMean aclTensor ret = CreateAclTensor(zeroHostData, meanShape, &runningMeanDeviceAddr, aclDataType::ACL_FLOAT, &runningMean); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建runningVar aclTensor ret = CreateAclTensor(oneHostData, meanShape, &runningVarDeviceAddr, aclDataType::ACL_FLOAT, &runningVar); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建saveMean aclTensor ret = CreateAclTensor(zeroHostData, meanShape, &saveMeanDeviceAddr, aclDataType::ACL_FLOAT, &saveMean); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建saveInvstd aclTensor ret = CreateAclTensor(zeroHostData, meanShape, &saveInvstdDeviceAddr, aclDataType::ACL_FLOAT, &saveInvstd); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建gradInput aclTensor ret = CreateAclTensor(inputHostData, inputShape, &gradInputDeviceAddr, aclDataType::ACL_FLOAT, &gradInput); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建gradWeight aclTensor ret = CreateAclTensor(zeroHostData, meanShape, &gradWeightDeviceAddr, aclDataType::ACL_FLOAT, &gradWeight); CHECK_RET(ret == ACL_SUCCESS, return ret); // 创建gradBias aclTensor ret = CreateAclTensor(zeroHostData, meanShape, &gradBiasDeviceAddr, aclDataType::ACL_FLOAT, &gradBias); CHECK_RET(ret == ACL_SUCCESS, return ret); std::array<bool, 3> value = {true, true, true}; auto outputMask = aclCreateBoolArray(value.data(), value.size()); // 3. 调用CANN算子库API,需要修改为具体的HostApi uint64_t workspaceSize = 0; aclOpExecutor* executor; // 调用aclnnBatchNormBackward第一段接口 ret = aclnnBatchNormBackwardGetWorkspaceSize(gradOut, input, weight, runningMean, runningVar, saveMean, saveInvstd, true, 1e-5, outputMask, gradInput, gradWeight, gradBias, &workspaceSize, &executor); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnBatchNormBackwardGetWorkspaceSize failed. ERROR: %d\n", ret); return ret); // 根据第一段接口计算出的workspaceSize申请device内存 void* workspaceAddr = nullptr; if (workspaceSize > 0) { ret = aclrtMalloc(&workspaceAddr, workspaceSize, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("allocate workspace failed. ERROR: %d\n", ret); return ret;); } // 调用aclnnBatchNormBackward第二段接口 ret = aclnnBatchNormBackward(workspaceAddr, workspaceSize, executor, stream); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclnnBatchNormBackward failed. ERROR: %d\n", ret); return ret); // 4. (固定写法)同步等待任务执行结束 ret = aclrtSynchronizeStream(stream); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("aclrtSynchronizeStream failed. ERROR: %d\n", ret); return ret); // 5. 获取输出的值,将device侧内存上的结果拷贝至host侧,需要根据具体API的接口定义修改 auto size = GetShapeSize(inputShape); std::vector<float> resultData(size, 0); ret = aclrtMemcpy(resultData.data(), resultData.size() * sizeof(resultData[0]), gradInput, size * sizeof(float), ACL_MEMCPY_DEVICE_TO_HOST); CHECK_RET(ret == ACL_SUCCESS, LOG_PRINT("copy result from device to host failed. ERROR: %d\n", ret); return ret); for (int64_t i = 0; i < size; i++) { LOG_PRINT("result[%ld] is: %f\n", i, resultData[i]); } // 6. 释放aclTensor和aclScalar,需要根据具体API的接口定义修改 aclDestroyTensor(gradOut); aclDestroyTensor(input); aclDestroyTensor(weight); aclDestroyTensor(runningMean); aclDestroyTensor(runningVar); aclDestroyTensor(saveMean); aclDestroyTensor(saveInvstd); aclDestroyTensor(gradInput); aclDestroyTensor(gradWeight); aclDestroyTensor(gradBias); return 0; }