Ejemplo n.º 1
0
    def save_onnx_model(self, model_proto, feed_dict):
        save_path = os.path.join(type(self).TMPPATH, self._testMethodName)
        target_path = utils.save_onnx_model(save_path, self._testMethodName, feed_dict,
                                            model_proto, include_test_data=self.debug_mode())

        self.log.debug("create model file: %s", target_path)
        return target_path
Ejemplo n.º 2
0
 def run_onnxruntime(self,
                     name,
                     model_proto,
                     inputs,
                     external_tensor_storage=None):
     """Run test against onnxruntime backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(
         TEMP_DIR,
         name,
         inputs,
         model_proto,
         include_test_data=True,
         as_text=utils.is_debug_mode(),
         external_tensor_storage=external_tensor_storage)
     logger.info("Model saved to %s", model_path)
     if self.use_custom_ops:
         from ortcustomops import get_library_path
         opt = rt.SessionOptions()
         opt.register_custom_ops_library(get_library_path())
         m = rt.InferenceSession(model_path, opt)
     else:
         m = rt.InferenceSession(model_path)
     results = m.run(self.output_names, inputs)
     if self.perf:
         start = time.time()
         for _ in range(PERFITER):
             _ = m.run(self.output_names, inputs)
         self.onnx_runtime = time.time() - start
     return results
Ejemplo n.º 3
0
    def save_onnx_model(self, model_proto, feed_dict, postfix=""):
        target_path = utils.save_onnx_model(self.test_data_directory, self._testMethodName + postfix, feed_dict,
                                            model_proto, include_test_data=self.config.is_debug_mode,
                                            as_text=self.config.is_debug_mode)

        self.logger.debug("create model file: %s", target_path)
        return target_path
Ejemplo n.º 4
0
 def run_onnxruntime(self, name, model_proto, inputs):
     """Run test against msrt-next backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(TMPPATH, name, inputs, model_proto)
     m = rt.InferenceSession(model_path)
     results = m.run(self.output_names, inputs)
     if self.perf:
         start = time.time()
         for _ in range(PERFITER):
             _ = m.run(self.output_names, inputs)
         self.onnx_runtime = time.time() - start
     return results
Ejemplo n.º 5
0
 def run_onnxruntime(self, name, model_proto, inputs):
     """Run test against msrt-next backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(TEMP_DIR, name, inputs, model_proto, include_test_data=True)
     logger.info("Model saved to %s", model_path)
     m = rt.InferenceSession(model_path)
     results = m.run(self.output_names, inputs)
     if self.perf:
         start = time.time()
         for _ in range(PERFITER):
             _ = m.run(self.output_names, inputs)
         self.onnx_runtime = time.time() - start
     return results
    def run_onnxruntime(self,
                        name,
                        model_proto,
                        inputs,
                        outputs,
                        external_tensor_storage=None):
        """Run test against onnxruntime backend."""
        import onnxruntime as rt
        model_path = utils.save_onnx_model(
            TEMP_DIR,
            name,
            inputs,
            model_proto,
            include_test_data=True,
            as_text=utils.is_debug_mode(),
            external_tensor_storage=external_tensor_storage)
        logger.info("Model saved to %s", model_path)
        providers = ['CPUExecutionProvider']
        if rt.get_device() == "GPU":
            gpus = os.environ.get("CUDA_VISIBLE_DEVICES")
            if gpus is None or len(gpus) > 1:
                providers = ['CUDAExecutionProvider']

        opt = rt.SessionOptions()
        if self.use_custom_ops:
            from onnxruntime_extensions import get_library_path
            opt.register_custom_ops_library(get_library_path())
        if self.ort_profile is not None:
            opt.enable_profiling = True
        m = rt.InferenceSession(model_path,
                                sess_options=opt,
                                providers=providers)
        results = m.run(outputs, inputs)
        if self.perf:
            n = 0
            start = time.time()
            stop = start + PERF_TIME
            while time.time() < stop:
                for _ in range(PERF_STEP):
                    _ = m.run(outputs, inputs)
                n += PERF_STEP
            self.onnx_runtime = 1000 * (time.time() - start) / n
            logger.info("ORT perf {:.2f}ms/inference, n={}".format(
                self.onnx_runtime, n))
        if self.ort_profile is not None:
            tmp_path = m.end_profiling()
            shutil.move(tmp_path, self.ort_profile)
        return results
 def run_onnxruntime(self,
                     name,
                     model_proto,
                     inputs,
                     outputs,
                     external_tensor_storage=None):
     """Run test against onnxruntime backend."""
     import onnxruntime as rt
     model_path = utils.save_onnx_model(
         TEMP_DIR,
         name,
         inputs,
         model_proto,
         include_test_data=True,
         as_text=utils.is_debug_mode(),
         external_tensor_storage=external_tensor_storage)
     logger.info("Model saved to %s", model_path)
     opt = rt.SessionOptions()
     if self.use_custom_ops:
         from ortcustomops import get_library_path
         opt.register_custom_ops_library(get_library_path())
         m = rt.InferenceSession(model_path, opt)
     if self.ort_profile is not None:
         opt.enable_profiling = True
     m = rt.InferenceSession(model_path, opt)
     results = m.run(outputs, inputs)
     if self.perf:
         n = 0
         start = time.time()
         stop = start + PERF_TIME
         while time.time() < stop:
             for _ in range(PERF_STEP):
                 _ = m.run(outputs, inputs)
             n += PERF_STEP
         self.onnx_runtime = 1000 * (time.time() - start) / n
         logger.info("ORT perf {:.2f}ms/inference, n={}".format(
             self.onnx_runtime, n))
     if self.ort_profile is not None:
         tmp_path = m.end_profiling()
         shutil.move(tmp_path, self.ort_profile)
     return results
Ejemplo n.º 8
0
# [array([[[-0.05846359 -0.06566401  0.02254938 -0.26033643 -0.07923548]],
#         [[ 0.04879569  0.04215769 -0.06720451 -0.60583305  0.06223793]],
#         [[-0.05626901 -0.06627436  0.00422506 -0.5533649  -0.0767431 ]]], dtype=float32)]
with tf.Session() as sess:
    # output_dict: get tensor by output name
    output_dict = []
    for out_name in output_names_with_port:
        output_dict.append(sess.graph.get_tensor_by_name(out_name))

    expected = sess.run(output_dict, feed_dict={"input_1:0": x_val})

# tf optimize
graph_def = tf_optimize(input_names_with_port,
                        output_names_with_port,
                        sess.graph_def,
                        fold_constant=True)

tf.reset_default_graph()
tf.import_graph_def(graph_def, name='')

# convert to onnx
with tf.Session() as sess:
    g = process_tf_graph(sess.graph, output_names=output_names_with_port)
    g = optimizer.optimize_graph(g)
    model_proto = g.make_model("lstm")
    utils.save_onnx_model("./models",
                          "lstm",
                          feed_dict={"input_1:0": input},
                          model_proto=model_proto)