写点什么

AscendC 从入门到精通系列(二)基于 Kernel 直调开发 AscendC 算子

作者:zjun
  • 2024-12-18
    上海
  • 本文字数:5824 字

    阅读完需:约 19 分钟

AscendC从入门到精通系列(二)基于Kernel直调开发AscendC算子

本次主要讨论下 AscendC 算子的开发流程,基于 Kernel 直调工程的算子开发。

1 AscendC 算子开发的基本流程

使用 Ascend C 完成 Add 算子核函数开发;使用 ICPU_RUN_KF CPU 调测宏完成算子核函数 CPU 侧运行验证;使用<<<>>>内核调用符完成算子核函数 NPU 侧运行验证。在正式的开发之前,还需要先完成环境准备和算子分析工作,开发 Ascend C 算子的基本流程如下图所示:


2 核函数开发

本次以 add_custom.cpp 作为参考用例。Gitee 也有对应工程和完整代码。operator/AddCustomSample/KernelLaunch/AddKernelInvocationNeo · Ascend/samples - 码云 - 开源中国 (gitee.com)

2.1 核函数定义

首先要根据核函数定义 核函数-编程模型-Ascend C 算子开发-算子开发-开发指南-CANN 社区版 8.0.RC3.alpha003 开发文档-昇腾社区 (hiascend.com) 的规则进行核函数的定义,并在核函数中调用算子类的 Init 和 Process 函数。


// 给CPU调用extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z){    KernelAdd op;    op.Init(x, y, z);    op.Process();}

// 给NPU调用#ifndef ASCENDC_CPU_DEBUGvoid add_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *y, uint8_t *z){ add_custom<<<blockDim, nullptr, stream>>>(x, y, z);}#endif
复制代码

2.2 算子类定义

根据矢量编程范式实现算子类,本样例中定义 KernelAdd 算子类,其具体成员如下:


class KernelAdd {public:    __aicore__ inline KernelAdd(){}    // 初始化函数,完成内存初始化相关操作    __aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z){}    // 核心处理函数,实现算子逻辑,调用私有成员函数CopyIn、Compute、CopyOut完成矢量算子的三级流水操作    __aicore__ inline void Process(){}
private: // 搬入函数,完成CopyIn阶段的处理,被核心Process函数调用 __aicore__ inline void CopyIn(int32_t progress){} // 计算函数,完成Compute阶段的处理,被核心Process函数调用 __aicore__ inline void Compute(int32_t progress){} // 搬出函数,完成CopyOut阶段的处理,被核心Process函数调用 __aicore__ inline void CopyOut(int32_t progress){}
private: AscendC::TPipe pipe; //Pipe内存管理对象 AscendC::TQue<AscendC::QuePosition::VECIN, BUFFER_NUM> inQueueX, inQueueY; //输入数据Queue队列管理对象,QuePosition为VECIN AscendC::TQue<AscendC::QuePosition::VECOUT, BUFFER_NUM> outQueueZ; //输出数据Queue队列管理对象,QuePosition为VECOUT AscendC::GlobalTensor<half> xGm; //管理输入输出Global Memory内存地址的对象,其中xGm, yGm为输入,zGm为输出 AscendC::GlobalTensor<half> yGm; AscendC::GlobalTensor<half> zGm;};
复制代码


核函数调用关系图


2.3 实现 Init,CopyIn,Compute,CopyOut 这个 4 个关键函数

Init 函数初始化输入资源


__aicore__ inline void Init(GM_ADDR x, GM_ADDR y, GM_ADDR z)    {        xGm.SetGlobalBuffer((__gm__ half *)x + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);        yGm.SetGlobalBuffer((__gm__ half *)y + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);        zGm.SetGlobalBuffer((__gm__ half *)z + BLOCK_LENGTH * AscendC::GetBlockIdx(), BLOCK_LENGTH);        pipe.InitBuffer(inQueueX, BUFFER_NUM, TILE_LENGTH * sizeof(half));        pipe.InitBuffer(inQueueY, BUFFER_NUM, TILE_LENGTH * sizeof(half));        pipe.InitBuffer(outQueueZ, BUFFER_NUM, TILE_LENGTH * sizeof(half));    }Process函数中通过如下方式调用这三个:    __aicore__ inline void Process()    {        // loop count need to be doubled, due to double buffer        constexpr int32_t loopCount = TILE_NUM * BUFFER_NUM;        // tiling strategy, pipeline parallel        for (int32_t i = 0; i < loopCount; i++) {            CopyIn(i);            Compute(i);            CopyOut(i);        }    }
复制代码


CopyIn 函数中通过如下方式调用这三个:1、使用 DataCopy 接口将 GlobalTensor 数据拷贝到 LocalTensor。2、使用 EnQue 将 LocalTensor 放入 VecIn 的 Queue 中。


__aicore__ inline void CopyIn(int32_t progress)    {        // alloc tensor from queue memory        AscendC::LocalTensor<half> xLocal = inQueueX.AllocTensor<half>();        AscendC::LocalTensor<half> yLocal = inQueueY.AllocTensor<half>();        // copy progress_th tile from global tensor to local tensor        AscendC::DataCopy(xLocal, xGm[progress * TILE_LENGTH], TILE_LENGTH);        AscendC::DataCopy(yLocal, yGm[progress * TILE_LENGTH], TILE_LENGTH);        // enque input tensors to VECIN queue        inQueueX.EnQue(xLocal);        inQueueY.EnQue(yLocal);    }
复制代码


Compute 函数实现。1、使用 DeQue 从 VecIn 中取出 LocalTensor。2、使用 Ascend C 接口 Add 完成矢量计算。3、使用 EnQue 将计算结果 LocalTensor 放入到 VecOut 的 Queue 中。4、使用 FreeTensor 将释放不再使用的 LocalTensor。


__aicore__ inline void Compute(int32_t progress){    // deque input tensors from VECIN queue    AscendC::LocalTensor<half> xLocal = inQueueX.DeQue<half>();    AscendC::LocalTensor<half> yLocal = inQueueY.DeQue<half>();    AscendC::LocalTensor<half> zLocal = outQueueZ.AllocTensor<half>();    // call Add instr for computation    AscendC::Add(zLocal, xLocal, yLocal, TILE_LENGTH);    // enque the output tensor to VECOUT queue    outQueueZ.EnQue<half>(zLocal);    // free input tensors for reuse    inQueueX.FreeTensor(xLocal);    inQueueY.FreeTensor(yLocal);}
复制代码


CopyOut 函数实现。1、使用 DeQue 接口从 VecOut 的 Queue 中取出 LocalTensor。2、使用 DataCopy 接口将 LocalTensor 拷贝到 GlobalTensor 上。3、使用 FreeTensor 将不再使用的 LocalTensor 进行回收。


 __aicore__ inline void CopyOut(int32_t progress){    // deque output tensor from VECOUT queue    AscendC::LocalTensor<half> zLocal = outQueueZ.DeQue<half>();    // copy progress_th tile from local tensor to global tensor    AscendC::DataCopy(zGm[progress * TILE_LENGTH], zLocal, TILE_LENGTH);    // free output tensor for reuse    outQueueZ.FreeTensor(zLocal);}
复制代码

3 核函数的运行验证

异构计算架构中,NPU(kernel 侧)与 CPU(host 侧)是协同工作的,完成了 kernel 侧核函数开发后,即可编写 host 侧的核函数调用程序,实现从 host 侧的 APP 程序调用算子,执行计算过程。

3.1 编写 CPU 侧调用程序


 // 使用GmAlloc分配共享内存,并进行数据初始化    uint8_t* x = (uint8_t*)AscendC::GmAlloc(inputByteSize);    uint8_t* y = (uint8_t*)AscendC::GmAlloc(inputByteSize);    uint8_t* z = (uint8_t*)AscendC::GmAlloc(outputByteSize);
ReadFile("./input/input_x.bin", inputByteSize, x, inputByteSize); ReadFile("./input/input_y.bin", inputByteSize, y, inputByteSize); // 调用ICPU_RUN_KF调测宏,完成核函数CPU侧的调用 AscendC::SetKernelMode(KernelMode::AIV_MODE); ICPU_RUN_KF(add_custom, blockDim, x, y, z); // use this macro for cpu debug // 输出数据写出 WriteFile("./output/output_z.bin", z, outputByteSize); // 调用GmFree释放申请的资源 AscendC::GmFree((void *)x); AscendC::GmFree((void *)y); AscendC::GmFree((void *)z);
复制代码

3.2 编写 NPU 侧运行算子的调用程序


  // AscendCL初始化    CHECK_ACL(aclInit(nullptr));    // 运行管理资源申请    int32_t deviceId = 0;    CHECK_ACL(aclrtSetDevice(deviceId));    aclrtStream stream = nullptr;    CHECK_ACL(aclrtCreateStream(&stream));    // 分配Host内存    uint8_t *xHost, *yHost, *zHost;    uint8_t *xDevice, *yDevice, *zDevice;    CHECK_ACL(aclrtMallocHost((void**)(&xHost), inputByteSize));    CHECK_ACL(aclrtMallocHost((void**)(&yHost), inputByteSize));    CHECK_ACL(aclrtMallocHost((void**)(&zHost), outputByteSize));    // 分配Device内存    CHECK_ACL(aclrtMalloc((void**)&xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));    CHECK_ACL(aclrtMalloc((void**)&yDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));    CHECK_ACL(aclrtMalloc((void**)&zDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));    // Host内存初始化    ReadFile("./input/input_x.bin", inputByteSize, xHost, inputByteSize);    ReadFile("./input/input_y.bin", inputByteSize, yHost, inputByteSize);    CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));    CHECK_ACL(aclrtMemcpy(yDevice, inputByteSize, yHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));    // 用内核调用符<<<>>>调用核函数完成指定的运算,add_custom_do中封装了<<<>>>调用    add_custom_do(blockDim, nullptr, stream, xDevice, yDevice, zDevice);    CHECK_ACL(aclrtSynchronizeStream(stream));    // 将Device上的运算结果拷贝回Host    CHECK_ACL(aclrtMemcpy(zHost, outputByteSize, zDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST));    WriteFile("./output/output_z.bin", zHost, outputByteSize);    // 释放申请的资源    CHECK_ACL(aclrtFree(xDevice));    CHECK_ACL(aclrtFree(yDevice));    CHECK_ACL(aclrtFree(zDevice));    CHECK_ACL(aclrtFreeHost(xHost));    CHECK_ACL(aclrtFreeHost(yHost));    CHECK_ACL(aclrtFreeHost(zHost));    // AscendCL去初始化    CHECK_ACL(aclrtDestroyStream(stream));    CHECK_ACL(aclrtResetDevice(deviceId));    CHECK_ACL(aclFinalize());
复制代码

3.3 完整 main.cpp

/** * @file main.cpp * * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. */#include "data_utils.h"#ifndef ASCENDC_CPU_DEBUG#include "acl/acl.h"extern void add_custom_do(uint32_t blockDim, void *stream, uint8_t *x, uint8_t *y, uint8_t *z);#else#include "tikicpulib.h"extern "C" __global__ __aicore__ void add_custom(GM_ADDR x, GM_ADDR y, GM_ADDR z);#endif
int32_t main(int32_t argc, char *argv[]){ uint32_t blockDim = 8; size_t inputByteSize = 8 * 2048 * sizeof(uint16_t); size_t outputByteSize = 8 * 2048 * sizeof(uint16_t);
#ifdef ASCENDC_CPU_DEBUG uint8_t *x = (uint8_t *)AscendC::GmAlloc(inputByteSize); uint8_t *y = (uint8_t *)AscendC::GmAlloc(inputByteSize); uint8_t *z = (uint8_t *)AscendC::GmAlloc(outputByteSize);
ReadFile("./input/input_x.bin", inputByteSize, x, inputByteSize); ReadFile("./input/input_y.bin", inputByteSize, y, inputByteSize);
AscendC::SetKernelMode(KernelMode::AIV_MODE); ICPU_RUN_KF(add_custom, blockDim, x, y, z); // use this macro for cpu debug
WriteFile("./output/output_z.bin", z, outputByteSize);
AscendC::GmFree((void *)x); AscendC::GmFree((void *)y); AscendC::GmFree((void *)z);#else CHECK_ACL(aclInit(nullptr)); int32_t deviceId = 0; CHECK_ACL(aclrtSetDevice(deviceId)); aclrtStream stream = nullptr; CHECK_ACL(aclrtCreateStream(&stream));
uint8_t *xHost, *yHost, *zHost; uint8_t *xDevice, *yDevice, *zDevice;
CHECK_ACL(aclrtMallocHost((void **)(&xHost), inputByteSize)); CHECK_ACL(aclrtMallocHost((void **)(&yHost), inputByteSize)); CHECK_ACL(aclrtMallocHost((void **)(&zHost), outputByteSize)); CHECK_ACL(aclrtMalloc((void **)&xDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST)); CHECK_ACL(aclrtMalloc((void **)&yDevice, inputByteSize, ACL_MEM_MALLOC_HUGE_FIRST)); CHECK_ACL(aclrtMalloc((void **)&zDevice, outputByteSize, ACL_MEM_MALLOC_HUGE_FIRST));
ReadFile("./input/input_x.bin", inputByteSize, xHost, inputByteSize); ReadFile("./input/input_y.bin", inputByteSize, yHost, inputByteSize);
CHECK_ACL(aclrtMemcpy(xDevice, inputByteSize, xHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE)); CHECK_ACL(aclrtMemcpy(yDevice, inputByteSize, yHost, inputByteSize, ACL_MEMCPY_HOST_TO_DEVICE));
add_custom_do(blockDim, stream, xDevice, yDevice, zDevice); CHECK_ACL(aclrtSynchronizeStream(stream));
CHECK_ACL(aclrtMemcpy(zHost, outputByteSize, zDevice, outputByteSize, ACL_MEMCPY_DEVICE_TO_HOST)); WriteFile("./output/output_z.bin", zHost, outputByteSize);
CHECK_ACL(aclrtFree(xDevice)); CHECK_ACL(aclrtFree(yDevice)); CHECK_ACL(aclrtFree(zDevice)); CHECK_ACL(aclrtFreeHost(xHost)); CHECK_ACL(aclrtFreeHost(yHost)); CHECK_ACL(aclrtFreeHost(zHost));
CHECK_ACL(aclrtDestroyStream(stream)); CHECK_ACL(aclrtResetDevice(deviceId)); CHECK_ACL(aclFinalize());#endif return 0;}
复制代码


整体运行起来,请参考operator/AddCustomSample/KernelLaunch/AddKernelInvocationNeo · Ascend/samples - 码云 - 开源中国 (gitee.com)

用户头像

zjun

关注

还未添加个人签名 2020-03-06 加入

还未添加个人简介

评论

发布
暂无评论
AscendC从入门到精通系列(二)基于Kernel直调开发AscendC算子_算子_zjun_InfoQ写作社区