Ejemplo n.º 1
0
    def __init__(self,
                 nodes,
                 output_shapes=None,
                 dtypes=None,
                 target=None,
                 opset=None,
                 extra_opset=None,
                 output_names=None):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of tensorflow output shapes
            dtypes: dict of tensorflow dtype
        """
        if target is None:
            target = []
        self._nodes = []
        self._initializers = {}
        self._nodes_by_name = {}
        self.shapes = {}
        self._model_inputs = {}
        self._target = set(target)
        self._dtypes = dtypes

        # override tf original output type, only used in make_model.
        # for some ops such as TopK, the 2nd output must be int64 in onnx
        # but has type int32 in tf
        self._dtypes_override = {}

        self._output_shapes = output_shapes
        ops = [Node(node, self) for node in nodes]
        self.set_nodes(ops)
        self._opset = find_opset(opset)
        self._extra_opset = extra_opset
        self.output_names = output_names
Ejemplo n.º 2
0
    def __init__(self,
                 nodes,
                 output_shapes=None,
                 dtypes=None,
                 target=None,
                 opset=None,
                 extra_opset=None,
                 output_names=None):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of tensorflow output shapes
            dtypes: dict of tensorflow dtype
        """
        if target is None:
            target = []
        self._nodes = []
        self._initializers = {}
        self._nodes_by_name = {}
        self._output_to_node_name = {}
        self.shapes = {}
        self._model_inputs = {}
        self._target = set(target)
        self._dtypes = dtypes

        self._output_shapes = output_shapes
        ops = [Node(node, self) for node in nodes]
        self.set_nodes(ops)
        self._opset = find_opset(opset)
        self._extra_opset = extra_opset
        self.output_names = output_names
Ejemplo n.º 3
0
    def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None,
                 output_names=None):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of tensorflow output shapes
            dtypes: dict of tensorflow dtype
        """
        if target is None:
            target = []
        self._nodes = []
        self._nodes_by_name = {}
        self._output_to_node_name = {}
        self.shapes = {}

        self._target = set(target)
        self._dtypes = dtypes

        self._output_shapes = output_shapes
        self._opset = find_opset(opset)

        if extra_opset is not None:
            utils.make_sure(isinstance(extra_opset, list), "invalid extra_opset")
        self._extra_opset = extra_opset

        self._order_sensitive_inputs = []
        self.outputs = output_names if output_names is not None else []

        self.parent_graph = None
        self.contained_graphs = {}  # {node_name: {node_attribute_name: Graph}}

        ops = [Node(node, self) for node in nodes]
        self.reset_nodes(ops)

        # add identity node after each output, in case it is renamed during conversion.
        for o in self.outputs:
            n = self.get_node_by_output_in_current_graph(o)
            new_output_name = port_name(n.name + "_" + utils.make_name("raw_output_"))
            n_shapes = n.output_shapes
            n_dtypes = n.output_dtypes
            body_graphs = n.graph.contained_graphs.pop(n.name, None)
            self.remove_node(n.name)

            new_outputs = [output if output != o else new_output_name for output in n.output]
            # domain should be passed to new node
            new_node = self.make_node(n.type, n.input, outputs=new_outputs, attr=n.attr, name=n.name,
                                      skip_conversion=n._skip_conversion, dtypes=n_dtypes, shapes=n_shapes,
                                      domain=n.domain)

            if body_graphs:
                for attr_name, body_graph in body_graphs.items():
                    body_graph.parent_graph = self
                    new_node.set_body_graph_as_attr(attr_name, body_graph)

            self.replace_all_inputs(self.get_nodes(), o, new_output_name)
            self.make_node("Identity", [new_output_name], outputs=[o], op_name_scope=n.name + "_" + "graph_outputs")
            self.copy_shape(new_output_name, o)
            self.copy_dtype(new_output_name, o)
Ejemplo n.º 4
0
    def __init__(self, nodes, output_shapes=None, dtypes=None, target=None, opset=None, extra_opset=None,
                 output_names=None):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of tensorflow output shapes
            dtypes: dict of tensorflow dtype
        """
        if target is None:
            target = []
        self._nodes = []
        self._initializers = {}
        self._nodes_by_name = {}
        self._output_to_node_name = {}
        self.shapes = {}

        self._target = set(target)
        self._dtypes = dtypes

        self._output_shapes = output_shapes
        self._opset = find_opset(opset)
        self._extra_opset = extra_opset

        self.inputs = []
        self.outputs = output_names

        self.parent_graph = None
        self.contained_graphs = {}  # {node_name: {node_attribute_name: Graph}}

        ops = [Node(node, self) for node in nodes]

        # add identity node after each output, in case it is renamed during conversion.
        if self.outputs:
            to_append = []
            for n in ops:
                raw_outputs = n.output
                new_output_base_name = None
                index_out = 0
                for i, o in enumerate(raw_outputs):
                    if o in output_names:
                        if not new_output_base_name:
                            new_output_base_name = utils.make_name("raw_output_")
                        new_out = port_name(new_output_base_name, index_out)
                        self.replace_all_inputs(ops, o, new_out)
                        n.output[i] = new_out
                        index_out += 1
                        new_output_node = self.make_node("Identity", [new_out], outputs=[o])
                        to_append.append(new_output_node)

                        self.copy_shape(o, new_out)
                        self.set_dtype(new_out, self.get_dtype(o))

                self.set_node_by_name(n)
            ops.extend(to_append)

        self.set_nodes(ops)
Ejemplo n.º 5
0
def process_tf_graph(tf_graph,
                     continue_on_error=False,
                     verbose=False,
                     target=None,
                     opset=None,
                     custom_op_handlers=None,
                     custom_rewriter=None,
                     extra_opset=None,
                     shape_override=None,
                     inputs_as_nchw=None,
                     input_names=None,
                     output_names=None):
    """Convert tensorflow graph to onnx graph.
        Args:
            tf_graph: tensorflow graph
            continue_on_error: if an op can't be processed (aka there is no mapping), continue
            verbose: print summary stats
            target: list of workarounds applied to help certain platforms
            opset: the opset to be used (int, default is latest)
            custom_op_handlers: dictionary of custom ops handlers
            custom_rewriter: list of custom graph rewriters
            extra_opset: list of extra opset's, for example the opset's used by custom ops
            shape_override: dict with inputs that override the shapes given by tensorflow
            inputs_as_nchw: transpose inputs in list from nchw to nchw
            input_names: list of input node names in graph, input name format as node_name:port_id
            output_names: list of output node names in graph, output name format as node_name:port_id
        Return:
            onnx graph
    """
    opset = utils.find_opset(opset)
    print("using tensorflow={}, onnx={}, opset={}, tfonnx={}/{}".format(
        tf.__version__, utils.get_onnx_version(), opset, tf2onnx.__version__,
        tf2onnx.version.git_version[:6]))

    if opset > schemas.get_max_supported_opset_version():
        log.warning(
            "currently installed onnx package %s is too low to support opset %s, "
            "please upgrade onnx package to avoid potential conversion issue.",
            utils.get_onnx_version(), opset)

    if shape_override is None:
        shape_override = {}
    if inputs_as_nchw is None:
        inputs_as_nchw = []
    if target is None:
        target = constants.DEFAULT_TARGET

    onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes = tensorflow_to_onnx(
        tf_graph, shape_override)

    io_to_check = []
    if input_names:
        io_to_check.extend(input_names)
    if output_names:
        io_to_check.extend(output_names)

    if io_to_check:
        # check output existence in case user passed in wrong output ids
        non_exists = set(io_to_check) - set(output_shapes.keys())
        if non_exists:
            log.error(
                "\nFailed to convert: inputs/outputs specified do not exist, make sure your passed"
                "in format: input/output_node_name:port_id. Problematical inputs/outputs are: %s \n",
                non_exists)
            raise ValueError("Inputs/Outputs Not Found")

    g = Graph(onnx_nodes, output_shapes, dtypes, target, opset, extra_opset,
              output_names)

    # create ops mapping for the desired opsets
    ops_mapping = handler.tf_op.create_mapping(g.opset, g.extra_opset)

    # apply custom ops on top of the assembled opset. We can either complement the opset
    # or override existing ops with a custom op.
    if custom_op_handlers is not None:
        # below is a bit tricky since there are a few api's:
        # 1. the future way we want custom ops to be registered with the @tf_op decorator. THose handlers will be
        #     registered via the decorator on load of the module ... nothing is required here.
        # 2. the old custom op api: a dictionary of {name: (func, args[])
        #     We deal with this by using a compat_handler that wraps to old handler with a new style handler.
        #     This is tempoary to give people give to move to the new api and after tf2onnx-1.5 we want to remove this
        custom_opset = {}
        for k, v in custom_op_handlers.items():
            # FIXME: remove this after tf2onnx-1.5
            def compat_handler(ctx, node, **kwargs):
                # wrap old handler
                name = node.name
                args = kwargs["args"]
                func = kwargs["func"]
                return func(ctx, node, name, args)

            args = v[1]
            kwargs = {"func": v[0]}
            if args:
                onnx_op = args[0]
                kwargs["onnx_op"] = onnx_op
                args = args[1:]
            kwargs["args"] = args
            new_handler = handler.tf_op(
                k, domain=constants.TENSORFLOW_OPSET.domain, kwargs=kwargs)
            new_handler.register_compat_handler(compat_handler, 1)
            custom_opset[k] = (compat_handler, kwargs)
        ops_mapping.update(custom_opset)

    infer_shape_for_graph(g)

    if inputs_as_nchw:
        transpose_inputs(g, inputs_as_nchw)

    # pre-processing graph rewrites
    # bi-directional re-writer should be placed after single directional re-writer
    rewriters = [
        rewrite_transpose, rewrite_flatten, rewrite_random_uniform,
        rewrite_random_uniform_fold_const, rewrite_random_normal,
        rewrite_dropout, rewrite_leakyrelu, rewrite_conv2d_with_pad,
        rewrite_single_direction_lstm, rewrite_bi_direction_lstm,
        rewrite_single_direction_gru, rewrite_bi_direction_gru,
        rewrite_custom_rnn_cell, rewrite_generic_loop, rewrite_cond
    ]

    if custom_rewriter is not None:
        rewriters.extend(custom_rewriter)

    run_rewriters(g, rewriters, continue_on_error)

    # some nodes may already copied into inner Graph, so remove them from main Graph.
    g.delete_unused_nodes(output_names)
    topological_sort(g, continue_on_error)

    mapped_op, unmapped_op = tensorflow_onnx_mapping(g, continue_on_error,
                                                     ops_mapping)

    # post-processing rewriters
    late_rewriters = []
    if constants.TARGET_RS5 in target:
        late_rewriters.append(rewrite_incomplete_type_support_rs5)
    if constants.TARGET_RS6 in target:
        late_rewriters.append(rewrite_incomplete_type_support_rs6)
    if late_rewriters:
        run_rewriters(g, late_rewriters, continue_on_error)

    # onnx requires topological sorting
    topological_sort(g, continue_on_error)

    g.update_proto()

    if verbose:
        print("tensorflow ops: {}".format(op_cnt))
        print("tensorflow attr: {}".format(attr_cnt))
        print("onnx mapped: {}".format(mapped_op))
        print("onnx unmapped: {}".format(unmapped_op))

    return g
Ejemplo n.º 6
0
    def __init__(self,
                 nodes,
                 output_shapes=None,
                 dtypes=None,
                 target=None,
                 opset=None,
                 extra_opset=None,
                 output_names=None):
        """Create Graph.
        Args:
            nodes: list of Node()
            output_shapes: dict of tensorflow output shapes
            dtypes: dict of tensorflow dtype
        """
        if target is None:
            target = []
        self._nodes = []
        self._nodes_by_name = {}
        self._output_to_node_name = {}
        self.shapes = {}

        self._target = set(target)
        self._dtypes = dtypes

        self._output_shapes = output_shapes
        self._opset = find_opset(opset)
        self._extra_opset = extra_opset

        self._order_sensitive_inputs = []
        self.outputs = output_names if output_names is not None else []

        self.parent_graph = None
        self.contained_graphs = {}  # {node_name: {node_attribute_name: Graph}}

        ops = [Node(node, self) for node in nodes]
        self.reset_nodes(ops)

        # add identity node after each output, in case it is renamed during conversion.
        nodes_seen = set()
        multi_output_nodes = set()
        for o in self.outputs:
            n = self.get_node_by_output_in_current_graph(o)
            if n in nodes_seen:
                multi_output_nodes.add(n)
            else:
                nodes_seen.add(n)

        for o in self.outputs:
            n = self.get_node_by_output_in_current_graph(o)
            # TODO: below doesn't work for nodes with multiple outputs. A work around, keep those intact.
            if n in multi_output_nodes:
                continue
            new_output_name = port_name(n.name + "_" +
                                        utils.make_name("raw_output_"))
            n_shapes = n.output_shapes
            n_dtypes = n.output_dtypes
            body_graphs = n.graph.contained_graphs.pop(n.name, None)
            self.remove_node(n.name)

            new_outputs = [
                o if o != output else new_output_name for output in n.output
            ]
            new_node = self.make_node(n.type,
                                      n.input,
                                      outputs=new_outputs,
                                      attr=n.attr,
                                      name=n.name,
                                      skip_conversion=n._skip_conversion,
                                      dtypes=n_dtypes,
                                      shapes=n_shapes)

            if body_graphs:
                for attr_name, body_graph in body_graphs.items():
                    body_graph.parent_graph = self
                    new_node.set_body_graph_as_attr(attr_name, body_graph)

            self.replace_all_inputs(self.get_nodes(), o, new_output_name)
            self.make_node("Identity", [new_output_name],
                           outputs=[o],
                           op_name_scope=n.name + "_" + "graph_outputs")
            self.copy_shape(new_output_name, o)
            self.copy_dtype(new_output_name, o)