def add_const_proto_explicit(self,
                                 name,
                                 value,
                                 np_dtype=None,
                                 tf_dtype=None,
                                 onnx_dtype=None):
        dtype_mask = [
            1 if val else 0 for val in [np_dtype, tf_dtype, onnx_dtype]
        ]
        num_type_set = sum(dtype_mask)
        assert num_type_set == 1, "One and only one type must be set. However, {} set.".format(
            sum(num_type_set))

        if np_dtype:
            onnx_dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[np_dtype]
        if tf_dtype:
            onnx_dtype = data_type.tf2onnx(tf_dtype)

        const_dim = len(value.shape)

        if const_dim == 0:
            raw_values = [value.tolist()]
            values = [value]
        else:
            raw_values = value.flatten().tolist()
            values = value

        shape = np.array(values).shape
        const_proto = make_tensor(name=name,
                                  data_type=onnx_dtype,
                                  dims=shape,
                                  vals=raw_values)
        self._consts_proto.append(const_proto)
    def _make_major_transpose_nodes(inputs, scope, node_dict, prev_node, post):
        """Make major transpose nodes if is batch major.

    Args:
      inputs: Inputs names.
      scope: Name scope.
      node_dict: Node dict.
      prev_node: Previous node.
      post: If post transpose flag.

    Returns:
      Perm node.
      Transpose node.

    """
        input_shape = node_dict[inputs[0]].attr["_output_shapes"][0]
        input_rank = len(input_shape)

        perm_node = TensorflowNode(
            op_type="Const",
            name="/".join([scope, "transpose", "perm",
                           get_unique_suffix()]),
            attr={
                "value": np.asarray([1, 0] + list(range(input_rank))[2:],
                                    np.int32),
                "dtype": data_type.tf2onnx(tf.int32),
                "_output_shapes": [input_rank]
            })

        if post:
            input_shape = [input_shape[i] for i in perm_node.attr["value"]]
            prev_node.attr["_output_shapes"] = [input_shape]

        trans_node = TensorflowNode(
            op_type="Transpose",
            name="/".join([scope, "transpose",
                           get_unique_suffix()]),
            inputs=[inputs[0] if not post else prev_node.name, perm_node.name],
            attr={
                "dtype":
                data_type.tf2onnx(node_dict[inputs[0]].attr["T"]),
                "_output_shapes":
                [[input_shape[i] for i in perm_node.attr["value"]]]
            })
        return [perm_node, trans_node]
    def process_kernel_and_bias(cls, nodes, cell_dict, node_dict):
        new_kernel = None
        new_bias = None
        scopes = cell_dict["kernel"].split("/")
        scope = "/".join(scopes[:scopes.index("kernel")])
        for key, value in [("kernel", node_dict[cell_dict["kernel"][0]]),
                           ("bias", node_dict[cell_dict["bias"][0]])]:
            output_shape = node_dict[value.name].attr["_output_shapes"][0]
            if key == "kernel":
                hidden_size = output_shape[1]
                input_size = output_shape[0] - hidden_size
                transposed_shape = output_shape[::-1]
                transpose_node = TensorflowNode(
                    op_type="Transpose",
                    name="/".join(
                        [scope, key, "transpose_" + get_unique_suffix()]),
                    inputs=[value.name, None],
                    attr={"_output_shapes": [transposed_shape]})

                split_const_node = TensorflowNode(
                    op_type="Const",
                    name="/".join(
                        [scope, key, "split_const_" + get_unique_suffix()]),
                    attr={
                        "value": np.asarray([input_size, hidden_size],
                                            np.int32),
                        "dtype": data_type.tf2onnx(tf.int32),
                        "_output_shapes": [[1]]
                    })

                split_node = TensorflowNode(
                    op_type="SplitV",
                    name="/".join([scope, key,
                                   "split_" + get_unique_suffix()]),
                    inputs=transpose_node.outputs + split_const_node.outputs +
                    [CONST_ONE_INT32],
                    attr={
                        "num_split":
                        2,
                        "_output_shapes": [[hidden_size, input_size],
                                           [hidden_size, hidden_size]]
                    })

                nodes.extend([transpose_node, split_const_node, split_node])
                new_kernel = split_node.outputs
            else:
                new_bias = [value.name]
        return new_kernel + new_bias
Exemple #4
0
    def test(self):
        _model = onnx.load(self.model_path)
        print("Total node count in model: ", len(_model.graph.node))

        # The input tensors could be provided as constants
        # The example below illustrates such a dictionary could be
        # provided for models with unknown input shapes. Since
        # mnist has known input shape, we don't provide input tensors.
        # input_tensors = {'Input3': tf.constant(0, dtype = tf.float32,
        #                    name='Input3',
        #                    shape=[1, 1, 28, 28])}
        input_tensors = {}
        tensor_dict = otf.prepare(_model,
                                  gen_tensor_dict=True,
                                  input_tensor_dict=input_tensors).tensor_dict
        more_outputs = []
        output_to_check = []
        for node in _model.graph.node:
            # add the first output of each node to the model output
            output_tensor = None
            for i in range(len(_model.graph.value_info)):
                if _model.graph.value_info[i].name == node.output[0]:
                    output_tensor = _model.graph.value_info[i]

            for i in range(len(_model.graph.initializer)):
                if _model.graph.initializer[i].name == node.output[0]:
                    output_tensor = _model.graph.initializer[i]

            # assume the first output is a tensor
            tensor = tensor_dict[node.output[0]]
            output_tensor = helper.make_tensor_value_info(
                node.output[0], data_type.tf2onnx(tensor.dtype),
                tensor.shape) if output_tensor is None else output_tensor
            more_outputs.append(output_tensor)
            output_to_check.append(node.output[0])
        _model.graph.output.extend(more_outputs)

        tf_rep = otf.prepare(_model)
        rt_rep = ort.prepare(_model)

        # prepare input data
        mnist = tf.keras.datasets.mnist
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train, x_test = x_train / 255.0, x_test / 255.0
        sample = x_test[:1].reshape(1, 1, 28, 28).astype(np.float32)

        inputs = [sample]
        my_out = tf_rep.run(inputs)
        rt_out = rt_rep.run(inputs)

        for op in output_to_check:
            for i in range(len(my_out)):
                # find the index of output in the list
                if my_out[op] is my_out[i]:

                    try:
                        np.savetxt(op.replace("/", "__") + ".rt",
                                   rt_out[i].flatten(),
                                   delimiter='\t')
                        np.savetxt(op.replace("/", "__") + ".tf",
                                   my_out[i].flatten(),
                                   delimiter='\t')
                        np.testing.assert_allclose(my_out[i],
                                                   rt_out[i],
                                                   rtol=1e-2)
                        print(
                            op,
                            "results of this layer are correct within tolerence."
                        )
                    except Exception as e:
                        np.set_printoptions(threshold=np.inf)
                        mismatch_percent = (find_between(
                            str(e), "(mismatch", "%)"))
                        print(
                            op, "mismatch with percentage {} %".format(
                                mismatch_percent))
Exemple #5
0
from tensorflow.python.framework.tensor_util import MakeNdarray

from onnx_tf.common import data_type

# Keyed by old attribute names.
__tf_attr_translator = {
    "_output_shapes":
    lambda x: list(
        map(lambda shape: get_tf_shape_as_list(shape.dim), x.list.shape)),
    "shape":
    lambda x: get_tf_shape_as_list(x.shape.dim),
    "T":
    lambda x: data_type.tf2onnx(x.type),
    "dtype":
    lambda x: data_type.tf2onnx(x.type),
    "value":
    lambda x: MakeNdarray(x.tensor),
    "seed2":
    lambda x: float(x.i),
    "seed":
    lambda x: float(x.i),
    "keep_dims":
    lambda x: int(x.b),
    "squeeze_dims":
    lambda x: list(x.list.i),
}

__onnx_attr_translator = {
    "axis": lambda x: int(x),
    "axes": lambda x: [int(a) for a in x],
    "dtype": lambda x: data_type.onnx2tf(x),
    def process_kernel_and_bias(cls, nodes, cell_dict, node_dict):
        new_kernel = None
        new_bias = None
        scopes = cell_dict["kernel"][0].split("/")
        scope = "/".join(scopes[:scopes.index("kernel")])
        for key, value in [[
                "kernel",
            [node_dict[kernel] for kernel in cell_dict["kernel"]]
        ], ["bias", [node_dict[bias] for bias in cell_dict["bias"]]]]:
            gate_output_shape = node_dict[
                value[0].name].attr["_output_shapes"][0]
            candidate_output_shape = node_dict[
                value[1].name].attr["_output_shapes"][0]
            last_idx = range(len(gate_output_shape))[-1]
            concat_output_shapes = [
                g if i != last_idx else g + c for i, (g, c) in enumerate(
                    zip(gate_output_shape, candidate_output_shape))
            ]
            concat_node = TensorflowNode(
                op_type="ConcatV2",
                name="/".join([scope, key, "concat_" + get_unique_suffix()]),
                inputs=[value[0].name, value[1].name, CONST_MINUS_ONE_INT32],
                attr={"_output_shapes": [concat_output_shapes]})
            nodes.append(concat_node)

            if key == "kernel":
                hidden_size = gate_output_shape[1] // 2
                input_size = gate_output_shape[0] - hidden_size
                transposed_shape = concat_output_shapes[::-1]
                transpose_node = TensorflowNode(
                    op_type="Transpose",
                    name="/".join(
                        [scope, key, "transpose_" + get_unique_suffix()]),
                    inputs=concat_node.outputs + [None],
                    attr={"_output_shapes": [transposed_shape]})

                split_const_node = TensorflowNode(
                    op_type="Const",
                    name="/".join(
                        [scope, key, "split_const_" + get_unique_suffix()]),
                    attr={
                        "value": np.asarray([input_size, hidden_size],
                                            np.int32),
                        "dtype": data_type.tf2onnx(tf.int32),
                        "_output_shapes": [[1]]
                    })

                split_node = TensorflowNode(
                    op_type="Split",
                    name="/".join([scope, key,
                                   "split_" + get_unique_suffix()]),
                    inputs=[CONST_ZERO_INT32] + transpose_node.outputs,
                    attr={
                        "num_split":
                        3,
                        "_output_shapes":
                        [[int(transposed_shape[0] / 3), transposed_shape[1]]
                         for _ in range(3)]
                    })

                re_concat_node = TensorflowNode(
                    op_type="ConcatV2",
                    name="/".join(
                        [scope, key, "re_concat_" + get_unique_suffix()]),
                    inputs=[
                        split_node.outputs[1], split_node.outputs[0],
                        CONST_ZERO_INT32
                    ],
                    attr={
                        "_output_shapes": [[
                            int(transposed_shape[0] / 3 * 2),
                            transposed_shape[1]
                        ]]
                    })

                nodes.extend([
                    transpose_node, split_const_node, split_node,
                    re_concat_node
                ])
                new_kernel = re_concat_node.outputs + [split_node.outputs[2]]
            else:
                new_bias = concat_node.outputs

        return new_kernel + new_bias
from tensorflow.python.framework.tensor_util import MakeNdarray

from onnx_tf.common import data_type

# Keyed by old attribute names.
__tf_attr_translator = {
    "_output_shapes":
    lambda x: list(
        map(lambda shape: get_tf_shape_as_list(shape.dim), x.list.shape)),
    "shape":
    lambda x: get_tf_shape_as_list(x.shape.dim),
    "T":
    lambda x: data_type.tf2onnx(list(x.list.type) or x.type),
    "dtype":
    lambda x: data_type.tf2onnx(list(x.list.type) or x.type),
    "value":
    lambda x: MakeNdarray(x.tensor),
    "seed2":
    lambda x: float(x.i),
    "seed":
    lambda x: float(x.i),
    "keep_dims":
    lambda x: int(x.b),
    "squeeze_dims":
    lambda x: list(x.list.i),
}

__onnx_attr_translator = {
    "axis": lambda x: int(x),
    "axes": lambda x: [int(a) for a in x],
    "dtype": lambda x: data_type.onnx2tf(x),