Exemplo n.º 1
0
 def run_onnxruntime(self,
                     name,
                     model_proto,
                     inputs,
                     external_tensor_storage=None):
     """Run test against onnxruntime backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(
         TEMP_DIR,
         name,
         inputs,
         model_proto,
         include_test_data=True,
         as_text=utils.is_debug_mode(),
         external_tensor_storage=external_tensor_storage)
     logger.info("Model saved to %s", model_path)
     if self.use_custom_ops:
         from ortcustomops import get_library_path
         opt = rt.SessionOptions()
         opt.register_custom_ops_library(get_library_path())
         m = rt.InferenceSession(model_path, opt)
     else:
         m = rt.InferenceSession(model_path)
     results = m.run(self.output_names, inputs)
     if self.perf:
         start = time.time()
         for _ in range(PERFITER):
             _ = m.run(self.output_names, inputs)
         self.onnx_runtime = time.time() - start
     return results
Exemplo n.º 2
0
 def run_onnxruntime(self, model_path, inputs, output_names):
     """Run test against onnxruntime backend."""
     from ortcustomops import get_library_path
     import onnxruntime as rt
     opt = rt.SessionOptions()
     opt.register_custom_ops_library(get_library_path())
     m = rt.InferenceSession(model_path, opt)
     results = m.run(output_names, inputs)
     return results
 def run_onnxruntime(self,
                     name,
                     model_proto,
                     inputs,
                     outputs,
                     external_tensor_storage=None):
     """Run test against onnxruntime backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(
         TEMP_DIR,
         name,
         inputs,
         model_proto,
         include_test_data=True,
         as_text=utils.is_debug_mode(),
         external_tensor_storage=external_tensor_storage)
     logger.info("Model saved to %s", model_path)
     opt = rt.SessionOptions()
     if self.use_custom_ops:
         from ortcustomops import get_library_path
         opt.register_custom_ops_library(get_library_path())
         m = rt.InferenceSession(model_path, opt)
     if self.ort_profile is not None:
         opt.enable_profiling = True
     m = rt.InferenceSession(model_path, opt)
     results = m.run(outputs, inputs)
     if self.perf:
         n = 0
         start = time.time()
         stop = start + PERF_TIME
         while time.time() < stop:
             for _ in range(PERF_STEP):
                 _ = m.run(outputs, inputs)
             n += PERF_STEP
         self.onnx_runtime = 1000 * (time.time() - start) / n
         logger.info("ORT perf {:.2f}ms/inference, n={}".format(
             self.onnx_runtime, n))
     if self.ort_profile is not None:
         tmp_path = m.end_profiling()
         shutil.move(tmp_path, self.ort_profile)
     return results
    def setUpClass(cls):
        try:
            from ortcustomops import (onnx_op, PyCustomOpDef, get_library_path)
        except ImportError:
            return

        @onnx_op(op_type="SolveFloat",
                 inputs=[PyCustomOpDef.dt_float, PyCustomOpDef.dt_float],
                 outputs=[PyCustomOpDef.dt_float])
        def solveopf(a, b):
            # The user custom op implementation here.
            return scipy.linalg.solve(a, b).astype(np.float32)

        @onnx_op(op_type="SolveDouble",
                 inputs=[PyCustomOpDef.dt_double, PyCustomOpDef.dt_double],
                 outputs=[PyCustomOpDef.dt_double])
        def solveopd(a, b):
            # The user custom op implementation here.
            return scipy.linalg.solve(a, b).astype(np.float64)

        cls.path = get_library_path()