Updated on 2022-03-13 GMT+08:00

Calling Example

/**
* @file mngr_sample.h
*
* Copyright(c)<2018>, <Huawei Technologies Co.,Ltd>
*
* @version 1.0
*
* @date 2018-4-25
*/
#include <stdio.h>
#include <string>
#include <vector>
#include <iostream>
#include <fstream>
#include <assert.h>
#include "hiaiengine/ai_model_manager.h"
using namespace std;
using namespace hiai;
// Image data path
static const std::string IMAGE_FILE_PATH = "/data/input_zebra.bin";
static const char* MODEL_PATH = "/data/ResNet.davincimodel";
static const char* MODEL_NAME = "resnet18";
/*
*** brief: Read input data.
*/
char* ReadBinFile(const char *file_name, uint32_t *fileSize)
{
    std::filebuf *pbuf;
    std::ifstream filestr;
    size_t size;
    filestr.open(file_name, std::ios::binary);
    if (!filestr)
    {
        return nullptr;
    }
    pbuf = filestr.rdbuf();
    size = pbuf->pubseekoff(0, std::ios::end, std::ios::in);
    pbuf->pubseekpos(0, std::ios::in);
    char * buffer = (char*)malloc(size);
    if (nullptr == buffer)
    {
        return nullptr;
    }
    pbuf->sgetn(buffer, size);
    *fileSize = size;
    filestr.close();
    return buffer;
}
int main(int argc, char* argv[])
{
    vector<shared_ptr<IAITensor>> model_input;
    vector<shared_ptr<IAITensor>> model_output;
    AIModelManager model_mngr;
    AIModelDescription model_desc;
    AIConfig config;
    AIContext context;
    model_desc.set_path(MODEL_PATH);
    model_desc.set_name(MODEL_NAME);
    model_desc.set_type(0);
    vector<AIModelDescription> model_descs;
    model_descs.push_back(model_desc);
    // AIModelManager Init
    AIStatus ret = model_mngr.Init(config, model_descs);
    if (SUCCESS != ret)
    {
        printf("AIModelManager Init failed. ret = %d\n", ret);
        return -1;
    }
   // Input tensor
   // The input tensor will be reset after the image data is read. The function here is only for initialization.
    AITensorDescription tensor_desc = AINeuralNetworkBuffer::GetDescription();
    shared_ptr<IAITensor> input_tensor = AITensorFactory::GetInstance()->CreateTensor(tensor_desc);
    if (nullptr == input_tensor)
    {
        printf("Create input_tensor failed.\n");
        return -1;
    }
   // Read image data.
    uint32_t image_data_size = 0;
    float* image_data = (float*)ReadBinFile(IMAGE_FILE_PATH.c_str(), &image_data_size);
    if (nullptr == image_data)
    {
        printf("ReadBinFile failed bin file path= %s \n", IMAGE_FILE_PATH.c_str());
        return -1;
    }
   // Set the pointer and length of the image data address to input_simple_tensor.
    shared_ptr<AISimpleTensor> input_simple_tensor = static_pointer_cast<AISimpleTensor>(input_tensor);
    input_simple_tensor->SetBuffer((void*)image_data, image_data_size);
    model_input.push_back(input_tensor);
   // Create an output tensor.
    if(model_mngr.IsPreAllocateOutputMem())
    {
        ret = model_mngr.CreateOutputTensor(model_input, model_output);
        if (SUCCESS != ret)
        {
            printf("CreateOutputTensor failed.ret = %d\n", ret);
            delete image_data;
            return -1;
        }
    }
    else
    {
       // Create a tensor.
        ret = model_mngr.GetModelIOTensorDim(MODEL_NAME, input_tensor_dims, output_tensor_dims);
        std::vector<TensorDimension> input_tensor_dims;
        std::vector<TensorDimension> output_tensor_dims;
        for(TensorDimension & dims : output_tensor_dims)
        {
            shared_ptr<IAITensor> output_tensor = AITensorFactory::GetInstance()->CreateTensor(tensor_desc);
            shared_ptr<AISimpleTensor> output_simple_tensor = static_pointer_cast<AISimpleTensor>(output_tensor);
            output_simple_tensor->setBuffer((void*) new char[dims.size], dims.size);
            model_output.push_back(output_tensor)
        }
    }

    bool dynamic_aipp_flag = true;
    if (dynamic_aipp_flag) 
    {
        const int batch_number = 2;
        AITensorDescription desc = AippDynamicParaTensor::GetDescription(std::to_string(batch_number));
        shared_ptr<IAITensor> tensor = AITensorFactory::GetInstance()->CreateTensor(desc);
        shared_ptr<AippDynamicParaTensor> aipp_params_tensor = static_pointer_cast<AippDynamicParaTensor>(tensor);
       // Set the original input type of a model.
        aipp_params_tensor->SetInputFormat(hiai::YUV420SP_U8);
       // Set the CSC matrix parameters.
        aipp_params_tensor->SetCscParams(hiai::YUV420SP_U8, hiai::MODEL_BGR888_U8, hiai::JPEG);
        // Set the width and height of the source image.
        aipp_params_tensor->SetSrcImageSize(256, 224);
       // If there are multiple batchs, set the AIPP parameters for each batch.
        for (int i = 0; i < batch_number; i++) {
           // Set the crop parameters.
            aipp_params_tensor->SetCropParams(true, 0, 0, 200, 200, i);
           // Set the padding parameters.
            aipp_params_tensor->SetPaddingParams(true, 12, 12, 12, 12, i);
            // Set the parameters for the mean values of DTC channels.
            aipp_params_tensor->SetDtcPixelMean(104, 117, 123, 0, i);
            // Set the parameters for the variance values or (max-min) reciprocal values of DTC channels.
            aipp_params_tensor->SetPixelVarReci(1.0, 1.0, 1.0, 1.0, i);
        }
		
	model_mngr.SetInputDynamicAIPP(model_input, aipp_params_tensor);
    }

    // Start model inference.
    printf("Start process.\n");
    ret = model_mngr.Process(context, model_input, model_output, 0);
    if (SUCCESS != ret)
    {
        printf("Process failed.ret = %d\n", ret);
        return -1;
    }
   // Because the listener is not set, the model inference is synchronously processed. The model inference output data can be obtained directly.
    shared_ptr<AISimpleTensor> result_tensor = static_pointer_cast<AISimpleTensor>(model_output[0]);
    printf("Get Result, buffsize is %d",result_tensor->GetSize());
    for(TensorDimension & dims : output_tensor_dims)
    {
    }
    printf("predict ok.\n");
    return 0;
}