更新时间:2021-03-18 GMT+08:00
分享

调用示例

/**
* @file mngr_sample.h
*
* Copyright(c)<2018>, <Huawei Technologies Co.,Ltd>
*
* @version 1.0
*
* @date 2018-4-25
*/
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <assert.h>
#include "hiaiengine/ai_model_manager.h"
using namespace std;
using namespace hiai;
// 图片数据路径
static const std::string IMAGE_FILE_PATH = "/data/input_zebra.bin";
static const char* MODEL_PATH = "/data/ResNet.davincimodel";
static const char* MODEL_NAME = "resnet18";
/*
*** brief:读取输入数据
*/
char* ReadBinFile(const char *file_name, uint32_t *fileSize)
{
    std::filebuf *pbuf;
    std::ifstream filestr;
    size_t size;
    filestr.open(file_name, std::ios::binary);
    if (!filestr)
    {
        return nullptr;
    }
    pbuf = filestr.rdbuf();
    size = pbuf->pubseekoff(0, std::ios::end, std::ios::in);
    pbuf->pubseekpos(0, std::ios::in);
    char * buffer = (char*)malloc(size);
    if (nullptr == buffer)
    {
        return nullptr;
    }
    pbuf->sgetn(buffer, size);
    *fileSize = size;
    filestr.close();
    return buffer;
}
int main(int argc, char* argv[])
{
    vector<shared_ptr<IAITensor>> model_input;
    vector<shared_ptr<IAITensor>> model_output;
    AIModelManager model_mngr;
    AIModelDescription model_desc;
    AIConfig config;
    AIContext context;
    model_desc.set_path(MODEL_PATH);
    model_desc.set_name(MODEL_NAME);
    model_desc.set_type(0);
    vector<AIModelDescription> model_descs;
    model_descs.push_back(model_desc);
    // AIModelManager Init
    AIStatus ret = model_mngr.Init(config, model_descs);
    if (SUCCESS != ret)
    {
        printf("AIModelManager Init failed. ret = %d\n", ret);
        return -1;
    }
    // 输入tensor
    // 输入tensor会在读取图片数据后重新设置,这里的作用只是初始化
    AITensorDescription tensor_desc = AINeuralNetworkBuffer::GetDescription();
    shared_ptr<IAITensor> input_tensor = AITensorFactory::GetInstance()->CreateTensor(tensor_desc);
    if (nullptr == input_tensor)
    {
        printf("Create input_tensor failed.\n");
        return -1;
    }
    // 读取图片数据
    uint32_t image_data_size = 0;
    float* image_data = (float*)ReadBinFile(IMAGE_FILE_PATH.c_str(), &image_data_size);
    if (nullptr == image_data)
    {
        printf("ReadBinFile failed bin file path= %s \n", IMAGE_FILE_PATH.c_str());
        return -1;
    }
    // 将图片数据地址指针及长度,设置给input_simple_tensor
    shared_ptr<AISimpleTensor> input_simple_tensor = static_pointer_cast<AISimpleTensor>(input_tensor);
    input_simple_tensor->SetBuffer((void*)image_data, image_data_size);
    model_input.push_back(input_tensor);
    // 创建输出tensor
    if(model_mngr.IsPreAllocateOutputMem())
    {
        ret = model_mngr.CreateOutputTensor(model_input, model_output);
        if (SUCCESS != ret)
        {
            printf("CreateOutputTensor failed.ret = %d\n", ret);
            delete image_data;
            return -1;
        }
    }
    else
    {
        // 用户创建tensor
        ret = model_mngr.GetModelIOTensorDim(MODEL_NAME, input_tensor_dims, output_tensor_dims);
        std::vector<TensorDimension> input_tensor_dims;
        std::vector<TensorDimension> output_tensor_dims;
        for(TensorDimension & dims : output_tensor_dims)
        {
            shared_ptr<IAITensor> output_tensor = AITensorFactory::GetInstance()->CreateTensor(tensor_desc);
            shared_ptr<AISimpleTensor> output_simple_tensor = static_pointer_cast<AISimpleTensor>(output_tensor);
            output_simple_tensor->setBuffer((void*) new char[dims.size], dims.size);
            model_output.push_back(output_tensor)
        }
    }

    bool dynamic_aipp_flag = true;
    if (dynamic_aipp_flag) 
    {
        const int batch_number = 2;
        AITensorDescription desc = AippDynamicParaTensor::GetDescription(std::to_string(batch_number));
        shared_ptr<IAITensor> tensor = AITensorFactory::GetInstance()->CreateTensor(desc);
        shared_ptr<AippDynamicParaTensor> aipp_params_tensor = static_pointer_cast<AippDynamicParaTensor>(tensor);
        //设置模型的原始输入的类型。
        aipp_params_tensor->SetInputFormat(hiai::YUV420SP_U8);
        //设置色域矩阵参数值
        aipp_params_tensor->SetCscParams(hiai::YUV420SP_U8, hiai::MODEL_BGR888_U8, hiai::JPEG);
        //设置原始图片的宽和高信息
        aipp_params_tensor->SetSrcImageSize(256, 224);
        //如果有多batch的情况下,为每个batch设置AIPP参数。
        for (int i = 0; i < batch_number; i++) {
            //设置抠图参数值
            aipp_params_tensor->SetCropParams(true, 0, 0, 200, 200, i);
            //设置Padding参数值
            aipp_params_tensor->SetPaddingParams(true, 12, 12, 12, 12, i);
            //设置DTC通道均值参数
            aipp_params_tensor->SetDtcPixelMean(104, 117, 123, 0, i);
            //设置DTC通道方差或(max-min)的倒数
            aipp_params_tensor->SetPixelVarReci(1.0, 1.0, 1.0, 1.0, i);
        }
		
	model_mngr.SetInputDynamicAIPP(model_input, aipp_params_tensor);
    }

    // 启动模型推理
    printf("Start process.\n");
    ret = model_mngr.Process(context, model_input, model_output, 0);
    if (SUCCESS != ret)
    {
        printf("Process failed.ret = %d\n", ret);
        return -1;
    }
    // 因未设置监听器,所以模型推理为同步调用,可直接取出模型推理输出数据
    shared_ptr<AISimpleTensor> result_tensor = static_pointer_cast<AISimpleTensor>(model_output[0]);
    printf("Get Result, buffsize is %d",result_tensor->GetSize());
    for(TensorDimension & dims : output_tensor_dims)
    {
    }
    printf("predict ok.\n");
    return 0;
}
分享:

    相关文档

    相关产品