Exemplo n.º 1
0
    def test_tensor_data(self):
        tensors = {
            "empty_tensor": np.array([], dtype=np.float32),
            "multi_dim_empty_tensor": np.array([[], []], dtype=np.float32),
            "scalar": np.array(1., dtype=np.float32),
            "one_item_array": np.array([1.], dtype=np.float32),
            "normal_array": np.array([[1., 2.], [2., 3.]], dtype=np.float32)
        }
        tf.reset_default_graph()
        with tf.Session() as sess:
            for n, data in tensors.items():
                tf.constant(data, dtype=tf.float32, name=n)

        for tf_node in sess.graph.get_operations():
            name = tf_node.name
            self.assertTrue(name in tensors.keys())

            self.assertTrue("value" in tf_node.node_def.attr)
            # convert to onnx tensor value
            tensor_value = utils.tf_to_onnx_tensor(
                utils.get_tf_node_attr(tf_node, "value"),
                name=utils.port_name(tf_node.name))
            attr = helper.make_attribute("value", tensor_value)
            # same as node.get_tensor_value(is_list=False)
            actual = numpy_helper.to_array(helper.get_attribute_value(attr))

            expected = tensors[name]

            self.assertTrue(np.array_equal(expected, actual))
Exemplo n.º 2
0
def tensorflow_to_onnx(graph):
    """
    Load tensorflow graph into an onnx graph with minimal rewrites so
    we can use the onnx graph as intermediate graph.
    """

    # ignore the following attributes
    ignored_attr = ["unknown_rank", "_class", "Tidx", "Tshape", "use_cudnn_on_gpu", "Index",
                    "Tpaddings", "TI", "Tparams", "Tindices", "Tlen", "Tdim", "dynamic_size", "element_shape",
                    "Tmultiples", "output_dtype", "Tblock_shape", "Tcrops", "index_type"]
    # some stats
    op_cnt = collections.Counter()
    attr_cnt = collections.Counter()
    onnx_nodes = []
    output_shapes = {}
    dtypes = {}

    # find outputs
    ops = graph.get_operations()

    # create dict with output to shape mappings
    for node in ops:
        for out in node.outputs:
            try:
                shape = out.get_shape().as_list()
            except Exception as ex:
                shape = []
            output_shapes[out.name] = shape

    # minimal conversion of attributes
    for node in ops:
        attr = {}
        takeit = True
        op_cnt[node.type] += 1
        for a in node.node_def.attr:
            attr_cnt[a] += 1
            if a == "dtype":
                attr[a] = utils.get_tf_dtype(node)
            elif a == "T":
                dtype = node.get_attr("T")
                if dtype:
                    if not isinstance(dtype, list):
                        dtypes[node.name] = utils.TF_TO_ONNX_DTYPE.get(dtype)
            elif a == "output_type":
                out_type = node.get_attr("output_type")
                out_type = utils.TF_TO_ONNX_DTYPE[out_type]
                attr[a] = out_type
            elif a == "out_type":
                out_type = node.get_attr("out_type")
                out_type = utils.TF_TO_ONNX_DTYPE[out_type]
                attr[a] = out_type
            elif a == "shape":
                attr[a] = utils.get_shape(node)
            elif a == "Tperm":
                pass
            elif a == "_output_shapes":
                attr[a] = utils.get_shape(node)
            elif a == "value":
                onnx_tensor = utils.tf_to_onnx_tensor(node.get_attr(a), name=node.name + ":0")
                attr[a] = onnx_tensor
            elif a == "DstT":
                dst = node.get_attr("DstT")
                dst = tf2onnx.utils.TF_TO_ONNX_DTYPE[dst]
                dst = tf2onnx.utils.ONNX_DTYPE_NAMES[dst]
                attr["to"] = dst
            elif a == "SrcT":
                continue
            elif a in ignored_attr:
                continue
            else:
                attr[a] = node.get_attr(a)

        if takeit:
            try:
                input_names = [i.name for i in node.inputs]
                output_names = [i.name for i in node.outputs]
                onnx_node = helper.make_node(node.type, input_names, output_names, name=node.name, **attr)
                onnx_nodes.append(onnx_node)
            except Exception as ex:
                log.error("pass1 convert failed for %s, ex=%s", node, ex)
                raise

    return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes
Exemplo n.º 3
0
def tflist_to_onnx(node_list, shape_override):
    """
    Convert the tf-node list into an onnx graph with minimal rewrites so
    we can use the onnx graph as intermediate graph.
    """

    # ignore the following attributes
    ignored_attr = [
        "unknown_rank", "_class", "Tshape", "use_cudnn_on_gpu", "Index",
        "Tpaddings", "TI", "Tparams", "Tindices", "Tlen", "Tdim",
        "dynamic_size", "Tmultiples", "Tblock_shape", "Tcrops", "index_type",
        "Taxis", "U", "maxval", "Tout", "Tlabels", "Tindex", "element_shape"
    ]
    # some stats
    op_cnt = collections.Counter()
    attr_cnt = collections.Counter()
    onnx_nodes = []
    output_shapes = {}
    dtypes = {}

    # find outputs
    ops = node_list

    # create dict with output to shape mappings
    for node in ops:
        for out in node.outputs:
            shape = shape_override.get(out.name)
            if shape is None:
                try:
                    shape = out.get_shape().as_list()
                except Exception as ex:
                    shape = None
            dtypes[out.name] = utils.map_tf_dtype(out.dtype)
            output_shapes[out.name] = shape

    # minimal conversion of attributes
    for node in ops:
        attr = {}
        takeit = True
        op_cnt[node.type] += 1
        for a in node.node_def.attr:
            attr_cnt[a] += 1
            if a == "dtype":
                attr[a] = utils.map_tf_dtype(
                    utils.get_tf_node_attr(node, "dtype"))
            elif a == "T":
                dtype = utils.get_tf_node_attr(node, "T")
                if dtype:
                    if not isinstance(dtype, list):
                        dtypes[node.name] = utils.map_tf_dtype(dtype)
            elif a in [
                    "output_type", "output_dtype", "out_type", "Tidx",
                    "out_idx"
            ]:
                # Tidx is used by Range
                # out_idx is used by ListDiff
                attr[a] = utils.map_tf_dtype(utils.get_tf_node_attr(node, a))
            elif a == "shape":
                attr[a] = utils.get_shape(node)
            elif a == "Tperm":
                pass
            elif a == "_output_shapes":
                attr[a] = utils.get_shape(node)
            elif a == "value":
                onnx_tensor = utils.tf_to_onnx_tensor(
                    utils.get_tf_node_attr(node, a), name=port_name(node.name))
                attr[a] = onnx_tensor
            elif a == "DstT":
                attr["to"] = utils.map_tf_dtype(
                    utils.get_tf_node_attr(node, "DstT"))
            elif a == "SrcT":
                continue
            elif a in ignored_attr:
                continue
            else:
                attr[a] = utils.get_tf_node_attr(node, a)

        if takeit:
            try:
                input_names = [i.name for i in node.inputs]
                output_names = [i.name for i in node.outputs]
                onnx_node = helper.make_node(node.type,
                                             input_names,
                                             output_names,
                                             name=node.name,
                                             **attr)
                onnx_nodes.append(onnx_node)
            except Exception as ex:
                log.error("pass1 convert failed for %s, ex=%s", node, ex)
                raise

    return onnx_nodes, op_cnt, attr_cnt, output_shapes, dtypes