#include </home/runner/work/acts/acts/Plugins/Onnx/include/ActsPlugins/Onnx/OnnxRuntimeBase.hpp>
◆ OnnxRuntimeBase() [1/2]
| ActsPlugins::OnnxRuntimeBase::OnnxRuntimeBase |
( |
| ) |
|
|
default |
◆ OnnxRuntimeBase() [2/2]
| ActsPlugins::OnnxRuntimeBase::OnnxRuntimeBase |
( |
Ort::Env & | env, |
|
|
const char * | modelPath ) |
Parametrized constructor.
- Parameters
-
| env | the ONNX runtime environment |
| modelPath | the path to the ML model in *.onnx format |
◆ ~OnnxRuntimeBase()
| ActsPlugins::OnnxRuntimeBase::~OnnxRuntimeBase |
( |
| ) |
|
|
default |
◆ runONNXInference() [1/2]
| std::vector< std::vector< float > > ActsPlugins::OnnxRuntimeBase::runONNXInference |
( |
NetworkBatchInput & | inputTensorValues | ) |
const |
Run the ONNX inference function for a batch of input.
- Parameters
-
| inputTensorValues | Vector of the input feature values of all the inputs used for prediction |
- Returns
- The vector of output (predicted) values
◆ runONNXInference() [2/2]
| std::vector< float > ActsPlugins::OnnxRuntimeBase::runONNXInference |
( |
std::vector< float > & | inputTensorValues | ) |
const |
Run the ONNX inference function.
- Parameters
-
| inputTensorValues | The input feature values used for prediction |
- Returns
- The output (predicted) values
◆ runONNXInferenceMultiOutput()
| std::vector< std::vector< std::vector< float > > > ActsPlugins::OnnxRuntimeBase::runONNXInferenceMultiOutput |
( |
NetworkBatchInput & | inputTensorValues | ) |
const |
Run the multi-output ONNX inference function for a batch of input.
- Parameters
-
| inputTensorValues | Vector of the input feature values of all the inputs used for prediction |
- Returns
- The vector of output (predicted) values, one for each output