def run_test(self,
                 name,
                 backend="caffe2",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None,
                 fold_const=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        outputs = self.output_names
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                input_names,
                outputs,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=self.large_model)
            if self.large_model:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported = loaded
            else:
                graph_def, input_names, outputs = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if self.large_model:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            if self.structured_outputs is None:
                tf_results = list(tf_results_d.values())
            else:
                tf_results = [
                    tf_results_d[output] for output in self.structured_outputs
                ]
            if self.perf:
                logger.info("Running TF perf")
                start = time.time()
                for _ in range(PERFITER):
                    _ = concrete_func(**inputs)
                self.tf_runtime = time.time() - start
            logger.info("TensorFlow OK")

        inputs = {}
        shape_override = {}
        tf_reset_default_graph()

        from tf2onnx.tf_utils import compress_graph_def
        const_node_values = None
        with tf.Graph().as_default() as tf_graph:
            if self.large_model:
                const_node_values = compress_graph_def(graph_def)
            tf.import_graph_def(graph_def, name='')

        with tf_session(graph=tf_graph) as sess:
            # create the input data
            for k in input_names:
                v = self.input_names[k]
                t = sess.graph.get_tensor_by_name(k)
                expected_dtype = tf.as_dtype(t.dtype).name
                if isinstance(v, six.text_type) and v.startswith("np."):
                    np_value = eval(v)  # pylint: disable=eval-used
                    if expected_dtype != np_value.dtype:
                        logger.warning(
                            "dtype mismatch for input %s: expected=%s, actual=%s",
                            k, expected_dtype, np_value.dtype)
                    inputs[k] = np_value.astype(expected_dtype)
                else:
                    inputs[k] = self.make_input(v).astype(expected_dtype)

            if self.force_input_shape:
                for k, v in inputs.items():
                    shape_override[k] = list(v.shape)

            # run the model with tensorflow
            if self.skip_tensorflow:
                logger.info("TensorFlow SKIPPED")
            elif not self.large_model:
                tf_results = self.run_tensorflow(sess, inputs)
                logger.info("TensorFlow OK")

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(sess.graph,
                                          opset=opset,
                                          extra_opset=extra_opset,
                                          shape_override=shape_override,
                                          input_names=inputs.keys(),
                                          const_node_values=const_node_values)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "caffe2":
                onnx_results = self.run_caffe2(name, model_proto, inputs)
            elif backend == "onnxruntime":
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_allclose(tf_res,
                                                       onnx_res,
                                                       rtol=self.rtol,
                                                       atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False
    def run_test(self,
                 name,
                 backend="onnxruntime",
                 onnx_file=None,
                 opset=None,
                 extra_opset=None,
                 perf=None):
        """Run complete test against backend."""
        self.perf = perf

        # get the model
        if self.url:
            _, dir_name = self.download_model()
            logger.info("Downloaded to %s", dir_name)
            model_path = os.path.join(
                dir_name, self.local) if self.local != "." else dir_name
        else:
            model_path = self.local

        logger.info("Load model from %s", model_path)
        input_names = list(self.input_names.keys())
        initialized_tables = {}
        outputs = self.output_names
        tflite_path = None
        to_rename = None
        if self.model_type in ["checkpoint"]:
            graph_def, input_names, outputs = tf_loader.from_checkpoint(
                model_path, input_names, outputs)
        elif self.model_type in ["saved_model"]:
            loaded = tf_loader.from_saved_model(
                model_path,
                None,
                None,
                self.tag,
                self.signatures,
                self.concrete_function,
                self.large_model,
                return_concrete_func=not self.run_tf_frozen,
                return_initialized_tables=True,
                return_tensors_to_rename=True)
            if not self.run_tf_frozen:
                # Must maintain ref to imported since concrete_func uses weak refs
                # pylint: disable=unused-variable
                graph_def, input_names, outputs, concrete_func, imported, initialized_tables, to_rename = loaded
            else:
                graph_def, input_names, outputs, initialized_tables, to_rename = loaded
        elif self.model_type in ["keras"]:
            graph_def, input_names, outputs = tf_loader.from_keras(
                model_path, input_names, outputs)
        elif self.model_type in ["tflite"]:
            tflite_path = model_path
            graph_def = None
        else:
            graph_def, input_names, outputs = tf_loader.from_graphdef(
                model_path, input_names, outputs)

        if utils.is_debug_mode():
            utils.save_protobuf(
                os.path.join(TEMP_DIR, name + "_after_tf_optimize.pb"),
                graph_def)

        if tflite_path is not None:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k] = self.make_input(v)

            interpreter = tf.lite.Interpreter(tflite_path)
            input_details = interpreter.get_input_details()
            output_details = interpreter.get_output_details()
            input_name_to_index = {
                n['name'].split(':')[0]: n['index']
                for n in input_details
            }
            for k, v in inputs.items():
                interpreter.resize_tensor_input(input_name_to_index[k],
                                                v.shape)
            interpreter.allocate_tensors()

            def run_tflite():
                for k, v in inputs.items():
                    interpreter.set_tensor(input_name_to_index[k], v)
                interpreter.invoke()
                result = [
                    interpreter.get_tensor(output['index'])
                    for output in output_details
                ]
                return result

            tf_results = run_tflite()
            if self.perf:
                logger.info("Running TFLite perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = run_tflite()
                    n += PERF_STEP
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TFLite perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TFLite OK")

        if not self.run_tf_frozen:
            inputs = {}
            for k in input_names:
                v = self.input_names[k]
                inputs[k.split(":")[0]] = tf.constant(self.make_input(v))
            tf_func = tf.function(concrete_func)
            logger.info("Running TF")
            tf_results_d = tf_func(**inputs)
            # If there is only a single output a dict might not be returned
            if isinstance(tf_results_d, tf.Tensor):
                tf_results = [tf_results_d]
            else:
                tf_results = [
                    tf_results_d[k] for k in sorted(tf_results_d.keys())
                ]
            tf_results = [tf_res.numpy() for tf_res in tf_results]
            if self.perf:
                logger.info("Running TF perf")
                n = 0
                start = time.time()
                stop = start + PERF_TIME
                if self.tf_profile is not None:
                    tf.profiler.experimental.start(self.tf_profile)
                while time.time() < stop:
                    for _ in range(PERF_STEP):
                        _ = concrete_func(**inputs)
                    n += PERF_STEP
                if self.tf_profile is not None:
                    tf.profiler.experimental.stop()
                self.tf_runtime = 1000 * (time.time() - start) / n
                logger.info("TF perf {:.2f}ms/inference, n={}".format(
                    self.tf_runtime, n))
            logger.info("TensorFlow OK")

        shape_override = {}
        const_node_values = None
        tf_graph = None

        if graph_def is not None:
            inputs = {}
            tf_reset_default_graph()

            with tf.Graph().as_default() as tf_graph:
                from tf2onnx.tf_utils import compress_graph_def
                if self.large_model:
                    const_node_values = compress_graph_def(graph_def)
                tf.import_graph_def(graph_def, name='')

            with tf_session(graph=tf_graph) as sess:
                # create the input data
                for k in input_names:
                    v = self.input_names[k]
                    t = sess.graph.get_tensor_by_name(k)
                    expected_dtype = tf.as_dtype(t.dtype).name
                    if isinstance(v, six.text_type) and v.startswith("np."):
                        np_value = eval(v)  # pylint: disable=eval-used
                        if expected_dtype != np_value.dtype:
                            logger.warning(
                                "dtype mismatch for input %s: expected=%s, actual=%s",
                                k, expected_dtype, np_value.dtype)
                        inputs[k] = np_value.astype(expected_dtype)
                    else:
                        if expected_dtype == "string":
                            inputs[k] = self.make_input(v).astype(
                                np.str).astype(np.object)
                        else:
                            inputs[k] = self.make_input(v).astype(
                                expected_dtype)

                if self.force_input_shape:
                    for k, v in inputs.items():
                        shape_override[k] = list(v.shape)

                # run the model with tensorflow
                if self.skip_tensorflow:
                    logger.info("TensorFlow SKIPPED")
                elif self.run_tf_frozen:
                    if self.tf_profile is not None:
                        tf.profiler.experimental.start(self.tf_profile)
                    tf_results = self.run_tensorflow(sess, inputs)
                    if self.tf_profile is not None:
                        tf.profiler.experimental.stop()
                    logger.info("TensorFlow OK")
                tf_graph = sess.graph

        model_proto = None
        if self.skip_conversion:
            if self.large_model:
                external_tensor_storage = ExternalTensorStorage()
                model_proto = utils.model_proto_from_zip(
                    self.converted_model, external_tensor_storage)
            else:
                external_tensor_storage = None
                model_proto = utils.model_proto_from_file(self.converted_model)
            logger.info("ONNX loaded from file")
        else:
            try:
                # convert model to onnx
                onnx_graph = self.to_onnx(
                    tf_graph,
                    opset=opset,
                    extra_opset=extra_opset,
                    shape_override=shape_override,
                    input_names=inputs.keys(),
                    const_node_values=const_node_values,
                    initialized_tables=initialized_tables,
                    tflite_path=tflite_path,
                    tensors_to_rename=to_rename)
                onnx_graph = optimizer.optimize_graph(onnx_graph)
                print("ONNX", onnx_graph.dump_node_statistics())
                external_tensor_storage = ExternalTensorStorage(
                ) if self.large_model else None
                model_proto = onnx_graph.make_model(
                    "converted from tf2onnx",
                    external_tensor_storage=external_tensor_storage)
                logger.info("To_ONNX, OK")
                if onnx_file:
                    self.create_onnx_file(name, model_proto, inputs, onnx_file,
                                          external_tensor_storage)
                if self.converted_model:
                    if self.large_model:
                        utils.save_onnx_zip(self.converted_model, model_proto,
                                            external_tensor_storage)
                    else:
                        utils.save_protobuf(self.converted_model, model_proto)
                    logger.info("Created %s", self.converted_model)

            except Exception:
                logger.error("To_ONNX FAIL", exc_info=1)
                return False

        try:
            onnx_results = None
            if backend == "onnxruntime":
                if to_rename is None:
                    struc_outputs = self.output_names
                else:
                    struc_outputs = [
                        to_rename.get(k, k) for k in self.output_names
                    ]
                onnx_results = self.run_onnxruntime(name, model_proto, inputs,
                                                    struc_outputs,
                                                    external_tensor_storage)
            else:
                raise ValueError("unknown backend")
            logger.info("Run_ONNX OK")

            try:
                if self.skip_tensorflow:
                    logger.info("Results: skipped tensorflow")
                else:
                    if self.check_only_shape:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            np.testing.assert_array_equal(
                                tf_res.shape, onnx_res.shape)
                    else:
                        for tf_res, onnx_res in zip(tf_results, onnx_results):
                            good_cnt = np.count_nonzero(
                                np.isclose(tf_res,
                                           onnx_res,
                                           rtol=self.rtol,
                                           atol=self.atol))
                            bad_cnt = tf_res.size - good_cnt
                            if bad_cnt > self.ptol / 100 * tf_res.size:
                                # Prints a nice error message with stats
                                np.testing.assert_allclose(tf_res,
                                                           onnx_res,
                                                           rtol=self.rtol,
                                                           atol=self.atol)
                    logger.info("Results: OK")
                return True
            except Exception:
                logger.error("Results", exc_info=1)

        except Exception:
            logger.error("Run_ONNX FAIL", exc_info=1)

        return False