更新时间:2021-03-18 GMT+08:00
分享

调用示例

示例1 同步调用示例

当前示例是单模型情况下推理Engine的实现代码。

在多模型情况下,如果需要参考该示例,您需要注意以下几点:

  • 声明preOutBuffer变量时,不能带有static关键字,您需要按如下方式定义:
    bool preOutBuffer = false;
  • 在调用Process函数前,需要调用AddPara函数分别设置每个模型的名称:
    ai_context.AddPara("model_name", modelName);//多模型,一定要分别设置模型名称
    ret = ai_model_manager_->Process(ai_context,
            inDataVec, outDataVec_, 0);

示例代码如下:

// 推理Engine Process函数实现
HIAI_IMPL_ENGINE_PROCESS("ClassifyNetEngine", ClassifyNetEngine, CLASSIFYNET_ENGINE_INPUT_SIZE)
{
    HIAI_ENGINE_LOG(this, HIAI_OK, "ClassifyNetEngine Process");
    HIAI_StatusT ret = HIAI_OK;
    static bool preOutBuffer = false;
    std::vector<std::shared_ptr<hiai::IAITensor>> inDataVec;

    // 获取从上个Engine传入的数据
    std::shared_ptr<EngineTransNewT> input_arg =
        std::static_pointer_cast<EngineTransNewT>(arg0);
    // 如果传入数据为空指针,直接返回
    if (nullptr == input_arg)
    {
        HIAI_ENGINE_LOG(this, HIAI_INVALID_INPUT_MSG,
            "fail to process invalid message");
        return HIAI_INVALID_INPUT_MSG;
    }

    // 准备输出数据,输出数据使用HIAI_DMalloc接口分配内存, 并通过CreateTensor给到算法推理,如果使用同步的机制
    // 调用推理的Process,则只需要分配一次输出内存
    if (preOutBuffer == false) {
        std::vector<hiai::TensorDimension> inputTensorVec;
        std::vector<hiai::TensorDimension> outputTensorVec;
        ret = ai_model_manager_->GetModelIOTensorDim(modelName, inputTensorVec, outputTensorVec);
        if (ret != hiai::SUCCESS)
        {
            HIAI_ENGINE_LOG(this, HIAI_AI_MODEL_MANAGER_INIT_FAIL,
                "hiai ai model manager init fail");
            return HIAI_AI_MODEL_MANAGER_INIT_FAIL;
        }
        // allocate OutData in advance
        HIAI_StatusT hiai_ret = HIAI_OK;
        for (uint32_t index = 0; index < outputTensorVec.size(); index++) {
            hiai::AITensorDescription outputTensorDesc = hiai::AINeuralNetworkBuffer::GetDescription();
            uint8_t* buffer = nullptr;
            // HIAI_Dmalloc分配内存,该内存主要是给算法进行推理,需要调用HIAI_DFree释放内存
            // Engine析构时释放
            hiai_ret = hiai::HIAIMemory::HIAI_DMalloc(outputTensorVec[index].size, (void*&)buffer, 1000);
            if (hiai_ret != HIAI_OK || buffer == nullptr) {
                std::cout<<"HIAI_DMalloc failed"<< std::endl;
                continue;
            }
            outData_.push_back(buffer);
            shared_ptr<hiai::IAITensor> outputTensor =
                hiai::AITensorFactory::GetInstance()->CreateTensor(outputTensorDesc, buffer, outputTensorVec[index].size);
            outDataVec_.push_back(outputTensor);
        }
        preOutBuffer = true;
    }

    // Transfer buffer to Framework directly, only one inputsize
    hiai::AITensorDescription inputTensorDesc =
        hiai::AINeuralNetworkBuffer::GetDescription();
    shared_ptr<hiai::IAITensor> inputTensor =
        hiai::AITensorFactory::GetInstance()->CreateTensor(inputTensorDesc,
        input_arg->trans_buff.get(), input_arg->buffer_size);
    // AIModelManager. fill in the input data.
    inDataVec.push_back(inputTensor);

    hiai::AIContext ai_context;
    // Process work
    ret = ai_model_manager_->Process(ai_context,
        inDataVec, outDataVec_, 0);
    if (hiai::SUCCESS != ret)
    {
        HIAI_ENGINE_LOG(this, HIAI_AI_MODEL_MANAGER_PROCESS_FAIL,
            "Fail to process ai model manager");
        return HIAI_AI_MODEL_MANAGER_PROCESS_FAIL;
    }

    // Convert the generated data to the buffer of the string type and send the data.
    for (uint32_t index = 0; index < outDataVec_.size(); index++)
    {
        HIAI_ENGINE_LOG(this, HIAI_OK, "ClassifyNetEngine SendData");
        std::shared_ptr<hiai::AINeuralNetworkBuffer> output_data = std::static_pointer_cast<hiai::AINeuralNetworkBuffer>(outDataVec_[index]);
        std::shared_ptr<std::string> output_string_ptr =
            std::shared_ptr<std::string>(new std::string((char*)output_data->GetBuffer(), output_data->GetSize()));
        hiai::Engine::SendData(0, "string",
            std::static_pointer_cast<void>(output_string_ptr));
    }
    inDataVec.clear();
    return HIAI_OK;
}

ClassifyNetEngine::~ClassifyNetEngine() {
    // 释放outData预分配内存
    HIAI_StatusT ret = HIAI_OK;
    for (auto buffer : outData_) {
        if (buffer != nullptr) {
            ret = hiai::HIAIMemory::HIAI_DFree(buffer);
            buffer = nullptr;
        }
    }
 }

示例2 异步调用示例

请参见DDK样例中的“ddk安装目录/ddk/sample/customop/customop_app/main.cpp”

分享:

    相关文档

    相关产品