Help Center/
    
      
      Atlas 300 Application (Model 3000)/
      
      
        
        
        Matrix API Reference/
        
        
        Model Manager APIs (C++ Language)/
        
        
        Other Compilation Dependent APIs/
        
      
      AIModelManager
    
  
  
    
        Updated on 2022-03-13 GMT+08:00
        
          
          
        
      
      
      
      
      
      
      
      
  
      
      
      
        
AIModelManager
Model manager class. This class is defined in ai_model_manager.h.
    class AIModelManager : public IAINNNode
    {
    public:
        AIModelManager();
       /*
       * @brief   Set the dynamic batch feature. This API does not need to be called by users.
        * @param [in] inputDim   Input dimension of the model
        * @param [in] input   Input data
        * @param [out] inputIndex   Set the sequence number of a dynamic batch for input, starting from 0.
        * @param [in] batchNumber   Dynamic batch size
        * @return SUCCESS   Success
       * @return  Other: failure
        */
        AIStatus SetInputDynamicBatch(const vector<TensorDimension>& inputDim, std::vector <std::shared_ptr <IAITensor> > &input,
            uint32_t inputIndex, uint32_t batchNumber);
       /*
        * @brief   Whether to pre-allocate the output memory. This API is implemented by the NNNode service. The default value is true.
        */
        virtual bool IsPreAllocateOutputMem() override;
        
       /*
        * @brief Obtain the AINNNodeDescription object.
        */
        static AINNNodeDescription GetDescription();
        /*
        * @brief  Obtain the input and output dimensions of the loaded model.
        * @param [in] model_name  Model name
        * @param [out] input_tensor   Input dimension of the model
        * @param [out] output_tensor  Output dimension of the model
        * @return SUCCESS   Success
       * @return  Other: failure
        */
        AIStatus GetModelIOTensorDim(const std::string& model_name,
            std::vector<TensorDimension>& input_tensor, std::vector<TensorDimension>& output_tensor);
        /*
       * @brief  Set the thread inference request ID.
        * @param [in] request_id  Model name
        * @return None
        */
        static void SetRequestId(uint64_t request_id);
        ~AIModelManager();
#ifdef __LITE__
        /*
        * @brief   Release resources in the case of idle state and timeout. UnloadModels is called for implementation.
        */
        virtual AIStatus IdleRelease();
        /*
        * @brief recovers   Resume services after timeout, including enabling a device and loading a model. LoadModels is called for implementation.
        */
        virtual AIStatus IdleResume();
#endif // __LITE__
    private:
        AIModelManagerImpl* impl_;
    };
   Parent topic: Other Compilation Dependent APIs
  
 Feedback
Was this page helpful?
Provide feedbackThank you very much for your feedback. We will continue working to improve the documentation.See the reply and handling status in My Cloud VOC.
                The system is busy. Please try again later.
                
            
        For any further questions, feel free to contact us through the chatbot.
Chatbot