Exemplo n.º 1
0
    def load():
        config = TestConfig()
        # if not launched by pytest, parse console arguments to override config
        if "pytest" not in sys.argv[0]:
            parser = argparse.ArgumentParser()
            parser.add_argument("--backend", default=config.backend,
                                choices=["caffe2", "onnxruntime"],
                                help="backend to test against")
            parser.add_argument("--opset", type=int, default=config.opset, help="opset to test against")
            parser.add_argument("--target", default=",".join(config.target), choices=constants.POSSIBLE_TARGETS,
                                help="target platform")
            parser.add_argument("--verbose", "-v", help="verbose output, option is additive", action="count")
            parser.add_argument("--debug", help="output debugging information", action="store_true")
            parser.add_argument("--temp_dir", help="temp dir")
            parser.add_argument("unittest_args", nargs='*')

            args = parser.parse_args()
            if args.debug:
                utils.set_debug_mode(True)

            config.backend = args.backend
            config.opset = args.opset
            config.target = args.target.split(',')
            config.log_level = logging.get_verbosity_level(args.verbose, config.log_level)
            if args.temp_dir:
                config.temp_dir = args.temp_dir

            # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone)
            sys.argv[1:] = args.unittest_args

        return config
Exemplo n.º 2
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {op: (default_custom_op_handler, []) for op in args.custom_ops.split(",")}
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = loader.from_graphdef(args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = loader.from_checkpoint(args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs = loader.from_saved_model(
            args.saved_model, args.inputs, args.outputs, args.signature_def)
        model_path = args.saved_model

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    # todo: consider to enable const folding by default?
    graph_def = tf_optimize(inputs, outputs, graph_def, args.fold_const)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX", model_path)
    if args.output:
        utils.save_protobuf(args.output, model_proto)
        logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info("To export ONNX model to file, please run with `--output` option")
Exemplo n.º 3
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    Test.cache_dir = args.cache
    Test.target = args.target
    tests = load_tests_from_yaml(args.config)
    if args.list:
        logger.info(sorted(tests.keys()))
        return 0
    if args.tests:
        test_keys = args.tests.split(",")
    else:
        test_keys = list(tests.keys())

    failed = 0
    count = 0
    for test in test_keys:
        logger.info("===================================")

        t = tests[test]
        if args.tests is None:
            if t.disabled and not args.include_disabled:
                logger.info("Skip %s: disabled", test)
                continue

            condition, reason = t.check_opset_constraints(args.opset, args.extra_opset)
            if not condition:
                logger.info("Skip %s: %s", test, reason)
                continue

        count += 1
        try:
            logger.info("Running %s", test)
            ret = t.run_test(test, backend=args.backend, onnx_file=args.onnx_file,
                             opset=args.opset, extra_opset=args.extra_opset, perf=args.perf,
                             fold_const=args.fold_const)
        except Exception:
            logger.error("Failed to run %s", test, exc_info=1)
            ret = None
        finally:
            if not utils.is_debug_mode():
                utils.delete_directory(TEMP_DIR)
        if not ret:
            failed += 1

    logger.info("===================================")
    logger.info("RESULT: %s failed of %s, backend=%s", failed, count, args.backend)

    if args.perf:
        with open(args.perf, "w") as f:
            f.write("test,tensorflow,onnx\n")
            for test in test_keys:
                t = tests[test]
                if t.perf:
                    f.write("{},{},{}\n".format(test, t.tf_runtime, t.onnx_runtime))
    return failed
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    Test.cache_dir = args.cache
    Test.target = args.target
    tests = tests_from_yaml(args.config)
    if args.list:
        print(sorted(tests.keys()))
        return 0
    if args.tests:
        test_keys = args.tests.split(",")
    else:
        test_keys = list(tests.keys())

    failed = 0
    count = 0
    for test in test_keys:
        t = tests[test]
        if args.tests is None and t.disabled and not args.include_disabled:
            continue
        count += 1
        try:
            ret = t.run_test(test,
                             backend=args.backend,
                             onnx_file=args.onnx_file,
                             opset=args.opset,
                             extra_opset=args.extra_opset,
                             perf=args.perf,
                             fold_const=args.fold_const)
        except Exception as ex:
            ret = None
            tb = traceback.format_exc()
            print(ex, tb)
        finally:
            if not utils.is_debug_mode():
                utils.delete_directory(TEMP_DIR)
        if not ret:
            failed += 1

    print("=== RESULT: {} failed of {}, backend={}".format(
        failed, count, args.backend))

    if args.perf:
        with open(args.perf, "w") as f:
            f.write("test,tensorflow,onnx\n")
            for test in test_keys:
                t = tests[test]
                if t.perf:
                    f.write("{},{},{}\n".format(test, t.tf_runtime,
                                                t.onnx_runtime))
    return failed
Exemplo n.º 5
0
def convert_onnx(sess, graph_def, input_path, inputs_op, outputs_op):

    graphdef = input_path

    if inputs_op:
        inputs_op, shape_override = utils.split_nodename_and_shape(inputs_op)
    if outputs_op:
        outputs_op = outputs_op.split(",")

    logging.basicConfig(level=logging.get_verbosity_level(True))

    utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    graph_def, inputs_op, outputs_op = from_graphdef(sess, graph_def, graphdef,
                                                     inputs_op, outputs_op)
    model_path = graphdef

    graph_def = tf_optimize(inputs_op, outputs_op, graph_def, True)

    with tf.Graph().as_default() as tf_graph:
        tf.import_graph_def(graph_def, name='')
    with tf.Session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=False,
                             target=",".join(constants.DEFAULT_TARGET),
                             opset=10,
                             custom_op_handlers=None,
                             extra_opset=None,
                             shape_override=None,
                             input_names=inputs_op,
                             output_names=outputs_op,
                             inputs_as_nchw=None)

    onnx_graph = optimizer.optimize_graph(g)
    model_proto = onnx_graph.make_model("converted from {}".format(model_path))

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    # if args.output:
    output_path = input_path.replace(".pb", ".onnx")
    utils.save_protobuf(output_path, model_proto)
    logger.info("ONNX model is saved at %s", output_path)
Exemplo n.º 6
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        # default custom ops for tensorflow-onnx are in the "tf" namespace
        custom_ops = {
            op: (default_custom_op_handler, [])
            for op in args.custom_ops.split(",")
        }
        extra_opset.append(constants.TENSORFLOW_OPSET)

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    with tf.Graph().as_default() as tf_graph:
        const_node_values = None
        if args.large_model:
            const_node_values = compress_graph_def(graph_def)
        if args.output_frozen_graph:
            utils.save_protobuf(args.output_frozen_graph, graph_def)
        tf.import_graph_def(graph_def, name='')
    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Exemplo n.º 7
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    tensors_to_rename = {}
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    graph_def = None
    inputs = None
    outputs = None
    model_path = None

    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables, tensors_to_rename = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True,
            return_tensors_to_rename=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        # Optional, but used to cut graph if provided.
        inputs = args.inputs
        outputs = args.outputs
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    if args.rename_inputs:
        tensors_to_rename.update(zip(inputs, args.rename_inputs))
    if args.rename_outputs:
        tensors_to_rename.update(zip(outputs, args.rename_outputs))

    with tf.device("/cpu:0"):
        model_proto, _ = _convert_common(
            graph_def,
            name=model_path,
            continue_on_error=args.continue_on_error,
            target=args.target,
            opset=args.opset,
            custom_op_handlers=custom_ops,
            extra_opset=extra_opset,
            shape_override=args.shape_override,
            input_names=inputs,
            output_names=outputs,
            inputs_as_nchw=args.inputs_as_nchw,
            large_model=args.large_model,
            tensors_to_rename=tensors_to_rename,
            ignore_default=args.ignore_default,
            use_default=args.use_default,
            tflite_path=tflite_path,
            dequantize=args.dequantize,
            initialized_tables=initialized_tables,
            output_frozen_graph=args.output_frozen_graph,
            output_path=args.output)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)

    logger.info("Model inputs: %s", [n.name for n in model_proto.graph.input])
    logger.info("Model outputs: %s",
                [n.name for n in model_proto.graph.output])
    if args.output:
        if args.large_model:
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")
Exemplo n.º 8
0
def main():
    args = get_args()
    logging.basicConfig(level=logging.get_verbosity_level(args.verbose))
    if args.debug:
        utils.set_debug_mode(True)

    logger = logging.getLogger(constants.TF2ONNX_PACKAGE_NAME)

    extra_opset = args.extra_opset or []
    tflite_path = None
    custom_ops = {}
    initialized_tables = None
    if args.custom_ops:
        using_tf_opset = False
        for op in args.custom_ops.split(","):
            if ":" in op:
                op, domain = op.split(":")
            else:
                # default custom ops for tensorflow-onnx are in the "tf" namespace
                using_tf_opset = True
                domain = constants.TENSORFLOW_OPSET.domain
            custom_ops[op] = (make_default_custom_op_handler(domain), [])
        if using_tf_opset:
            extra_opset.append(constants.TENSORFLOW_OPSET)

    if any(opset.domain == constants.CONTRIB_OPS_DOMAIN
           for opset in extra_opset):
        try:
            import tensorflow_text  # pylint: disable=import-outside-toplevel
        except ModuleNotFoundError:
            logger.warning(
                "tensorflow_text not installed. Model will fail to load if tensorflow_text ops are used."
            )

    # get the frozen tensorflow model from graphdef, checkpoint or saved_model.
    if args.graphdef:
        graph_def, inputs, outputs = tf_loader.from_graphdef(
            args.graphdef, args.inputs, args.outputs)
        model_path = args.graphdef
    if args.checkpoint:
        graph_def, inputs, outputs = tf_loader.from_checkpoint(
            args.checkpoint, args.inputs, args.outputs)
        model_path = args.checkpoint
    if args.saved_model:
        graph_def, inputs, outputs, initialized_tables = tf_loader.from_saved_model(
            args.saved_model,
            args.inputs,
            args.outputs,
            args.tag,
            args.signature_def,
            args.concrete_function,
            args.large_model,
            return_initialized_tables=True)
        model_path = args.saved_model
    if args.keras:
        graph_def, inputs, outputs = tf_loader.from_keras(
            args.keras, args.inputs, args.outputs)
        model_path = args.keras
    if args.tflite:
        graph_def = None
        inputs = None
        outputs = None
        tflite_path = args.tflite
        model_path = tflite_path

    if args.verbose:
        logger.info("inputs: %s", inputs)
        logger.info("outputs: %s", outputs)

    tf_graph = None
    const_node_values = None
    if graph_def is not None:
        with tf.Graph().as_default() as tf_graph:
            const_node_values = None
            if args.large_model:
                const_node_values = compress_graph_def(graph_def)
            if args.output_frozen_graph:
                utils.save_protobuf(args.output_frozen_graph, graph_def)
            tf.import_graph_def(graph_def, name='')

    with tf_loader.tf_session(graph=tf_graph):
        g = process_tf_graph(tf_graph,
                             continue_on_error=args.continue_on_error,
                             target=args.target,
                             opset=args.opset,
                             custom_op_handlers=custom_ops,
                             extra_opset=extra_opset,
                             shape_override=args.shape_override,
                             input_names=inputs,
                             output_names=outputs,
                             inputs_as_nchw=args.inputs_as_nchw,
                             ignore_default=args.ignore_default,
                             use_default=args.use_default,
                             const_node_values=const_node_values,
                             initialized_tables=initialized_tables,
                             tflite_path=tflite_path,
                             dequantize=args.dequantize)

    onnx_graph = optimizer.optimize_graph(g)

    tensor_storage = ExternalTensorStorage() if args.large_model else None
    model_proto = onnx_graph.make_model("converted from {}".format(model_path),
                                        external_tensor_storage=tensor_storage)

    # write onnx graph
    logger.info("")
    logger.info("Successfully converted TensorFlow model %s to ONNX",
                model_path)
    if args.output:
        if args.large_model:
            utils.save_onnx_zip(args.output, model_proto, tensor_storage)
            logger.info(
                "Zipped ONNX model is saved at %s. Unzip before opening in onnxruntime.",
                args.output)
        else:
            utils.save_protobuf(args.output, model_proto)
            logger.info("ONNX model is saved at %s", args.output)
    else:
        logger.info(
            "To export ONNX model to file, please run with `--output` option")