def run_onnxruntime(self, model_path, inputs, output_names):
     """Run test against onnxruntime backend."""
     from onnxruntime_customops import get_library_path
     import onnxruntime as rt
     opt = rt.SessionOptions()
     opt.register_custom_ops_library(get_library_path())
     m = rt.InferenceSession(model_path, opt)
     results = m.run(output_names, inputs)
     return results
Example #2
0
 def run_onnxruntime(self,
                     model_path,
                     inputs,
                     output_names,
                     use_custom_ops=False):
     """Run test against onnxruntime backend."""
     import onnxruntime as rt
     providers = ['CPUExecutionProvider']
     if rt.get_device() == "GPU":
         gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
         if gpus is None or len(gpus) > 1:
             providers = ['CUDAExecutionProvider']
     opt = rt.SessionOptions()
     if use_custom_ops:
         from onnxruntime_customops import get_library_path
         opt.register_custom_ops_library(get_library_path())
     # in case of issues with the runtime, one can enable more logging
     # opt.log_severity_level = 0
     # opt.log_verbosity_level = 255
     # opt.enable_profiling = True
     m = rt.InferenceSession(model_path, opt, providers=providers)
     results = m.run(output_names, inputs)
     return results