Help Center/
Atlas 300 Application (Model 3000)/
Matrix API Reference/
Model Manager APIs (C++ Language)/
Other Compilation Dependent APIs/
IAINNNode
Updated on 2022-03-13 GMT+08:00
IAINNNode
NN node API, which is implemented by the service provider. The IAINNNode class is defined in ai_nn_node.h.
class IAINNNode { public: virtual ~IAINNNode(){} /* * @brief initialization interface, in which the service implements model loading or other initialization actions * @param [in] model_desc Model information. If the model is not required, a null vector is transferred. * @param [in] config Configuration parameters * @return SUCCESS Success * @return Other: failure */ virtual AIStatus Init(const AIConfig &config, const std::vector<AIModelDescription> &model_descs = {}) = 0; /* * @brief Set listener. * @parm [in] If the listener is set to a null pointer, the process interface is called synchronously. Otherwise, the process interface is called asynchronously. * @return SUCCESS Success * @return Other: failure */ virtual AIStatus SetListener(std::shared_ptr<IAIListener> listener) = 0; /* * @brief Compute interface * @param [in] context Context information, including variable parameter configurations when the NNNode is running * @param [in] in_data Input data * @param [out] out_data Output data * @param [in] timeout Timeout interval, which is invalid during calling in sync mode * @return SUCCESS Success * @return Other: failure */ virtual AIStatus Process(AIContext &context, const std::vector<std::shared_ptr<IAITensor>> &in_data, std::vector<std::shared_ptr<IAITensor>> &out_data, uint32_t timeout) = 0; /* * @brief Create a list of output tensors. * @param [in] in_data List of input tensors, which may be used for calculating output * @param [out] out_data List of output tensors * @return SUCCESS Success * @return Other: failure */ virtual AIStatus CreateOutputTensor( const std::vector<std::shared_ptr<IAITensor>> &in_data, std::vector<std::shared_ptr<IAITensor>> &out_data) { (void)in_data;(void)out_data;return SUCCESS; } /* * @brief Whether to pre-allocate the output memory. This interface is implemented by the NNNode service. The default value is true. */ virtual bool IsPreAllocateOutputMem() { return true; } /* * @brief Check whether the NNNode is valid. */ virtual AIStatus IsValid() { return SUCCESS; } /* * @brief Query the synchronization mode supported by the node. * @return BOTH Synchronous and asynchronous modes are both supported. * @return ASYNC Only the asynchronous mode is supported. * @return SYNC Only the synchronous mode is supported. */ virtual AI_NODE_EXEC_MODE GetSupportedExecMode() { return AI_NODE_EXEC_MODE::BOTH; } #ifdef __LITE__ /* * @brief Release resources when an NN node is in the idle state and times out. This API is user-defined and applies to the lite scenario. If the model manger is used, the corresponding IdleRelease function should be called. */ virtual AIStatus IdleRelease() { return SUCCESS; } /* * @brief Resume services after an NN node times out, including enabling a device and loading a model. This API is user-defined and applies to the lite scenario. If the model manger is used, the corresponding IdleResume function should be called. */ virtual AIStatus IdleResume() { return SUCCESS; } /* * @brief Set the maximum idle time. If the maximum idle time is exceeded, the system automatically destroys and releases resources. The default idle time is 60s. * Called in the service constructor function * Implemented in AIServiceBase * @param [in] time Maximum idle time, in ms */ virtual void SetMaxIdleTime(const int32_t time) { (void)time;} #endif // __LITE__ /* * @brief Obtain the maximum memory used by the service. * @return Maximum size of the memory used by the service */ virtual uint32_t GetMaxUsedMemory() { return 0; } };
Parent topic: Other Compilation Dependent APIs
Feedback
Was this page helpful?
Provide feedbackThank you very much for your feedback. We will continue working to improve the documentation.See the reply and handling status in My Cloud VOC.
The system is busy. Please try again later.
For any further questions, feel free to contact us through the chatbot.
Chatbot