def version_1(cls, node, **kwargs):
        blocksize = node.attr["block_size"]
        data_format = node.attr.get("data_format", "NHWC").decode()

        if data_format == "NHWC":
            transpose_unique_suffix = get_unique_suffix()
            space_to_depth_unique_suffix = get_unique_suffix()
            transpose_name = node.inputs[0] + "_T_" + transpose_unique_suffix
            space_to_depth_name = node.inputs[
                0] + "_T_STD_" + space_to_depth_unique_suffix
            before_transpose_node = cls.make_node_from_tf_node(
                node, [node.inputs[0]], [transpose_name],
                perm=get_perm_from_formats(data_format, "NCHW"),
                op_type="Transpose",
                name=transpose_name)
            space_to_depth_node = cls.make_node_from_tf_node(
                node, [transpose_name], [space_to_depth_name],
                blocksize=blocksize,
                name=space_to_depth_name)
            after_transpose_node = cls.make_node_from_tf_node(
                node, [space_to_depth_name],
                perm=get_perm_from_formats("NCHW", data_format),
                op_type="Transpose")
            return [
                before_transpose_node, space_to_depth_node,
                after_transpose_node
            ]

        return cls.make_node_from_tf_node(node, [node.inputs[0]],
                                          blocksize=blocksize)
示例#2
0
 def _common(cls, node, **kwargs):
     transpose_a = node.attr.get("transpose_a", False)
     transpose_b = node.attr.get("transpose_b", False)
     input_a = node.inputs[0]
     input_b = node.inputs[1]
     nodes = []
     if transpose_a:
         unique_suffix_a = get_unique_suffix()
         transposed_a = cls.make_node_from_tf_node(
             node, [node.inputs[0]],
             [node.inputs[0] + "_T_" + unique_suffix_a],
             op_type="Transpose",
             name=node.inputs[0] + "_T_" + unique_suffix_a)
         input_a = node.inputs[0] + "_T_" + unique_suffix_a
         nodes.append(transposed_a)
     if transpose_b:
         unique_suffix_b = get_unique_suffix()
         transposed_b = cls.make_node_from_tf_node(
             node, [node.inputs[1]],
             [node.inputs[1] + "_T_" + unique_suffix_b],
             op_type="Transpose",
             name=node.inputs[1] + "_T_" + unique_suffix_b)
         input_b = node.inputs[1] + "_T_" + unique_suffix_b
         nodes.append(transposed_b)
     nodes.append(cls.make_node_from_tf_node(node, [input_a, input_b]))
     return nodes
示例#3
0
        def validate_initializer_name(name):
            # Prepend a unique suffix if leading charater is "_"
            name = get_unique_suffix() + name if name[0] is "_" else name

            # Replace ":" with "_tf_" and append a unique suffix for
            # traceability
            return name.replace(":", "_tf_") + "_" + get_unique_suffix(
            ) if ":" in name else name
示例#4
0
    def process_kernel_and_bias(cls, nodes, cell_dict, node_dict):
        new_kernel = None
        new_bias = None
        scopes = cell_dict["kernel"].split("/")
        scope = "/".join(scopes[:scopes.index("kernel")])
        for key, value in [("kernel", node_dict[cell_dict["kernel"][0]]),
                           ("bias", node_dict[cell_dict["bias"][0]])]:
            output_shape = node_dict[value.name].attr["_output_shapes"][0]
            if key == "kernel":
                hidden_size = output_shape[1]
                input_size = output_shape[0] - hidden_size
                transposed_shape = output_shape[::-1]
                transpose_node = TensorflowNode(
                    op_type="Transpose",
                    name="/".join(
                        [scope, key, "transpose_" + get_unique_suffix()]),
                    inputs=[value.name, None],
                    attr={"_output_shapes": [transposed_shape]})

                split_const_node = TensorflowNode(
                    op_type="Const",
                    name="/".join(
                        [scope, key, "split_const_" + get_unique_suffix()]),
                    attr={
                        "value": np.asarray([input_size, hidden_size],
                                            np.int32),
                        "dtype": data_type.tf2onnx(tf.int32),
                        "_output_shapes": [[1]]
                    })

                split_node = TensorflowNode(
                    op_type="SplitV",
                    name="/".join([scope, key,
                                   "split_" + get_unique_suffix()]),
                    inputs=transpose_node.outputs + split_const_node.outputs +
                    [CONST_ONE_INT32],
                    attr={
                        "num_split":
                        2,
                        "_output_shapes": [[hidden_size, input_size],
                                           [hidden_size, hidden_size]]
                    })

                nodes.extend([transpose_node, split_const_node, split_node])
                new_kernel = split_node.outputs
            else:
                new_bias = [value.name]
        return new_kernel + new_bias
示例#5
0
  def _conv(cls, node, d, **kwargs):
    auto_pad = node.attr["padding"].decode("UTF-8")
    auto_pad = "SAME_UPPER" if auto_pad == "SAME" else auto_pad
    data_format = node.attr["data_format"].decode("UTF-8")
    spatial_indices = [
        i for i in range(len(data_format)) if data_format[i] not in ["N", "C"]
    ]
    strides = list(map(lambda i: node.attr["strides"][i], spatial_indices))
    dilations = list(
        map(lambda i: node.attr.get("dilations", [1] * (d + 2))[i],
            spatial_indices))
    node_dict = kwargs["node_dict"]
    kernel_shape = node_dict[node.inputs[1]].attr["_output_shapes"][0][:d]
    output_shape = list(
        map(lambda i: node.attr["_output_shapes"][0][i], spatial_indices))
    input_shape = list(
        map(lambda i: node_dict[node.inputs[0]].attr["_output_shapes"][0][i],
            spatial_indices))
    pads = cls._cal_pads(auto_pad, len(spatial_indices), input_shape,
                         output_shape, strides, kernel_shape)
    unique_suffix = get_unique_suffix()
    transpose_node = helper.make_node(
        "Transpose", [node.inputs[1]], [node.inputs[1] + "_T_" + unique_suffix],
        perm=[d + 1, d] + list(range(d)))
    conv_node = helper.make_node(
        "Conv", [node.inputs[0], node.inputs[1] + "_T_" + unique_suffix],
        [node.name],
        pads=pads,
        kernel_shape=kernel_shape,
        strides=strides,
        dilations=dilations)

    return [transpose_node, conv_node]
示例#6
0
    def version_7(cls, node, **kwargs):
        data_format = node.attr.get("data_format", "NHWC")
        channel_first = chr(data_format[1]) == "C" if isinstance(
            data_format[1], int) else data_format[1] == "C"
        axis = 1 if channel_first else -1

        unsqueeze_suffix = get_unique_suffix()
        if axis == 1:
            # In this case, we manually unsqueeze the bias term
            # to facilitate broadcasting.
            num_sp_dim = len(data_format) - 2
            unsqueeze_axes = [i + 1 for i in range(num_sp_dim)]
            reshape_node = cls.make_node_from_tf_node(
                node, [node.inputs[1]],
                [node.inputs[1] + "_" + unsqueeze_suffix],
                axes=unsqueeze_axes,
                op_type="Unsqueeze",
                name=node.inputs[1] + unsqueeze_suffix)
            node_update_input = copy.deepcopy(node)
            node_update_input.inputs = [
                node.inputs[0], node.inputs[1] + "_" + unsqueeze_suffix
            ]
            return [
                reshape_node,
                cls.arithmetic_op(node_update_input, **kwargs)
            ]
        else:
            return cls.arithmetic_op(node, **kwargs)
示例#7
0
  def version_1(cls, node, **kwargs):
    begin_mask = node.attr.get("begin_mask", 0)
    end_mask = node.attr.get("end_mask", 0)
    ellipsis_mask = node.attr.get("ellipsis_mask", 0)
    new_axis_mask = node.attr.get("new_axis_mask", 0)
    shrink_axis_mask = node.attr.get("shrink_axis_mask", 0)

    only_support = (int(begin_mask) is 0 and int(end_mask) is 0 and
                    int(ellipsis_mask) is 0 and int(new_axis_mask) is 0)
    assert only_support, "limited strided slice support"

    # Assert that strides are all ones, since we have limited support.
    const_strides = kwargs["consts"][node.inputs[3]]
    np.testing.assert_array_equal(np.ones_like(const_strides), const_strides)

    need_post_processing = (shrink_axis_mask > 0 or begin_mask > 0 or
                            end_mask > 0 or ellipsis_mask > 0 or
                            new_axis_mask > 0 or shrink_axis_mask > 0)

    slice_suffix = "_" + get_unique_suffix() if need_post_processing else ""
    slice_output_name = node.outputs[0]
    slice_node = cls.make_node("DynamicSlice", node.inputs[0:3],
                               [slice_output_name + slice_suffix],
                               node.name + slice_suffix)

    if not need_post_processing:
      return [slice_node]

    shrink_axis = cls._int_to_set_pos_list(shrink_axis_mask)
    squeeze_node = cls.make_node(
        "Squeeze", [slice_output_name + slice_suffix],
        node.outputs,
        node.name,
        axes=shrink_axis)
    return [slice_node, squeeze_node]
示例#8
0
    def _make_major_transpose_nodes(inputs, scope, node_dict, prev_node, post):
        """Make major transpose nodes if is batch major.

    Args:
      inputs: Inputs names.
      scope: Name scope.
      node_dict: Node dict.
      prev_node: Previous node.
      post: If post transpose flag.

    Returns:
      Perm node.
      Transpose node.

    """
        input_shape = node_dict[inputs[0]].attr["_output_shapes"][0]
        input_rank = len(input_shape)

        perm_node = TensorflowNode(
            op_type="Const",
            name="/".join([scope, "transpose", "perm",
                           get_unique_suffix()]),
            attr={
                "value": np.asarray([1, 0] + list(range(input_rank))[2:],
                                    np.int32),
                "dtype": data_type.tf2onnx(tf.int32),
                "_output_shapes": [input_rank]
            })

        if post:
            input_shape = [input_shape[i] for i in perm_node.attr["value"]]
            prev_node.attr["_output_shapes"] = [input_shape]

        trans_node = TensorflowNode(
            op_type="Transpose",
            name="/".join([scope, "transpose",
                           get_unique_suffix()]),
            inputs=[inputs[0] if not post else prev_node.name, perm_node.name],
            attr={
                "dtype":
                data_type.tf2onnx(node_dict[inputs[0]].attr["T"]),
                "_output_shapes":
                [[input_shape[i] for i in perm_node.attr["value"]]]
            })
        return [perm_node, trans_node]
示例#9
0
 def version_1(cls, node, **kwargs):
     axis = node.attr.get("axis", 0)
     unsqueeze_outputs = [
         i + "_Unsqueeze_" + get_unique_suffix() for i in node.inputs
     ]
     nodes = []
     for i, o in zip(node.inputs, unsqueeze_outputs):
         nodes.append(
             cls.make_node("Unsqueeze", [i], [o],
                           node.name + "_Unsqueeze_" + get_unique_suffix(),
                           axes=[axis],
                           version=1))
     concat = cls.make_node("Concat",
                            unsqueeze_outputs,
                            node.outputs,
                            node.name,
                            axis=axis,
                            version=4)
     return nodes + [concat]
示例#10
0
 def version_1(cls, node, **kwargs):
     axis = node.attr.get("axis", 0)
     outputs = node.outputs
     split_outputs = [o + "_Split_" + get_unique_suffix() for o in outputs]
     splited = cls.make_node("Split",
                             node.inputs,
                             split_outputs,
                             node.name,
                             axis=axis,
                             split=[1] * node.attr["num"],
                             version=2)
     nodes = [splited]
     for split_output, output in zip(split_outputs, outputs):
         nodes.append(
             cls.make_node("Squeeze", [split_output], [output],
                           node.name + "_Squeeze_" + get_unique_suffix(),
                           axes=[axis],
                           version=1))
     return nodes
示例#11
0
 def version_1(cls, node, **kwargs):
     output = "unclipped" + get_unique_suffix()
     nodes = [
         cls.make_node("Relu", node.inputs, [output], version=1),
         cls.make_node("Clip", [output],
                       node.outputs,
                       min=0.0,
                       max=6.0,
                       version=1),
     ]
     return nodes
示例#12
0
    def _onnx_graph_to_tensorflow_rep(cls, graph_def, opset, strict):
        """ Convert ONNX graph to TensorflowRep.

    :param graph_def: ONNX GraphProto object.
    :param opset: ONNX OperatorSetIdProto list.
    :param strict: whether to enforce semantic equivalence between the original model
      and the converted tensorflow model.
    :return: TensorflowRep object.
    """
        handlers = cls._get_handlers(opset)

        # initializer: TensorProtos representing the values to initialize
        # a given tensor.
        # initialized: A list of names of the initialized tensors.

        if graph_def.initializer:
            initialized = {init.name for init in graph_def.initializer}
        else:
            initialized = set()

        module = BackendTFModule(handlers, opset, strict, graph_def, cls)
        signatures = dict()

        for value_info in graph_def.input:
            if value_info.name in initialized:
                continue
            shape = list(d.dim_value if (
                d.dim_value > 0 and d.dim_param == "") else None
                         for d in value_info.type.tensor_type.shape.dim)
            value_info_name = value_info.name.replace(
                ":", "_tf_") + "_" + get_unique_suffix(
                ) if ":" in value_info.name else value_info.name

            tf_spec = tf.TensorSpec(
                shape,
                data_type.onnx2tf(value_info.type.tensor_type.elem_type),
                value_info_name)
            signatures[value_info.name] = tf_spec

        tf_rep = TensorflowRep()
        tf_rep.inputs = [
            value_info.name for value_info in graph_def.input
            if value_info.name not in initialized
        ]
        tf_rep.outputs = [value_info.name for value_info in graph_def.output]
        module.outputs = tf_rep.outputs
        tf_rep.tf_module = module
        tf_rep.signatures = signatures
        return tf_rep
示例#13
0
    def conv_op(cls, node, d=2, is_depthwise=False, **kwargs):
        auto_pad = node.attr["padding"].decode("UTF-8")
        auto_pad = "SAME_UPPER" if auto_pad == "SAME" else auto_pad
        data_format = node.attr["data_format"].decode("UTF-8")
        spatial_indices = [
            i for i in range(len(data_format))
            if data_format[i] not in ["N", "C"]
        ]
        strides = list(map(lambda i: node.attr["strides"][i], spatial_indices))
        dilations = list(
            map(lambda i: node.attr.get("dilations", [1] * (d + 2))[i],
                spatial_indices))
        node_dict = kwargs["node_dict"]
        kernel_shape = node_dict[node.inputs[1]].attr["_output_shapes"][0][:d]
        n_groups = 1
        if is_depthwise:
            n_groups = kernel_shape[-1]
        output_shape = list(
            map(lambda i: node.attr["_output_shapes"][0][i], spatial_indices))
        input_shape = list(
            map(
                lambda i: node_dict[node.inputs[0]].attr["_output_shapes"][0][
                    i], spatial_indices))
        pads = cls.cal_pads(auto_pad, len(spatial_indices), input_shape,
                            output_shape, strides, kernel_shape)

        w_unique_suffix = get_unique_suffix()
        w_transpose_node = Transpose.handle_node_proto(
            make_node("Transpose", [node.inputs[1], "perm"],
                      [node.inputs[1] + "_T_" + w_unique_suffix],
                      name=node.inputs[1] + "_T_" + w_unique_suffix),
            consts={"perm": [d + 1, d] + list(range(d))})

        conv_node = cls.make_node_from_tf_node(
            node, [node.inputs[0], w_transpose_node.output[0]],
            pads=pads,
            group=n_groups,
            kernel_shape=kernel_shape,
            strides=strides,
            dilations=dilations,
            data_format_auto_convert=True)

        if not isinstance(conv_node, list):
            conv_node = [conv_node]
        return [w_transpose_node] + conv_node
示例#14
0
    def version_1(cls, node, **kwargs):
        # tf.size out_type could be int32 or int64
        need_cast = node.attr['out_type'] == tf.int32

        size_suffix = "_" + get_unique_suffix() if need_cast else ""
        size_output_name = cls.get_outputs_names(node)[0] + size_suffix
        size_node = cls.make_node_from_tf_node(node, [node.inputs[0]],
                                               outputs=[size_output_name],
                                               name=node.name + size_suffix)

        if not need_cast:
            return [size_node]

        cast_node = Cast.handle(
            make_node("Cast", [size_output_name],
                      outputs=cls.get_outputs_names(node),
                      name=node.name,
                      DstT=node.attr['out_type']))
        return [size_node, cast_node]
示例#15
0
 def version_1(cls, node, **kwargs):
   div_suffix = '_' + get_unique_suffix()
   div_output_name = node.outputs[0] + div_suffix
   div_node = Div.handle(
       TensorflowNode(
           name='Div',
           inputs=node.inputs[0:2],
           outputs=[div_output_name],
           attr=node.attr,
           domain=node.domain,
           op_type='Div'), **kwargs)
   floor_node = Floor.handle(
       TensorflowNode(
           name='Floor',
           inputs=[div_output_name],
           outputs=node.outputs,
           attr=node.attr,
           domain=node.domain,
           op_type='Floor'))
   return [div_node, floor_node]
示例#16
0
  def version_1(cls, node, **kwargs):
    rsqrt_suffix = "_" + get_unique_suffix()
    rsqrt_output_name = cls.get_outputs_names(node)[0] + rsqrt_suffix

    sqrt_node = Sqrt.handle(
        TensorflowNode(
            op_type='Sqrt',
            name=node.name + rsqrt_suffix,
            inputs=[node.inputs[0]],
            outputs=[rsqrt_output_name],
            attr=node.attr), **kwargs)

    reciprocal_node = Reciprocal.handle(
        TensorflowNode(
            op_type='Reciprocal',
            inputs=[rsqrt_output_name],
            outputs=cls.get_outputs_names(node),
            name=node.name,
            attr=node.attr), **kwargs)
    return [sqrt_node, reciprocal_node]
示例#17
0
    def version_9(cls, node, **kwargs):
        indices = node.inputs[0]
        depth = node.inputs[1]
        axis = node.attr.get('axis', -1)

        import pdb
        pdb.set_trace()
        on_value = kwargs['consts'][node.inputs[2]].item(0)
        off_value = kwargs['consts'][node.inputs[3]].item(0)
        values = np.array([off_value, on_value])
        constant_output_name = node.outputs[0] + '_' + get_unique_suffix()

        constant_node = make_node('Constant',
                                  inputs=[],
                                  outputs=[constant_output_name],
                                  value=from_array(values))

        onehot_node = cls.make_node_from_tf_node(
            node, [indices, depth, constant_output_name], axis=axis)

        return [constant_node, onehot_node]
示例#18
0
    def conv_op(cls, node, d=2, is_depthwise=False, **kwargs):
        auto_pad = node.attr["padding"].decode("UTF-8")
        auto_pad = "SAME_UPPER" if auto_pad == "SAME" else auto_pad
        data_format = node.attr["data_format"].decode("UTF-8")
        spatial_indices = [
            i for i in range(len(data_format))
            if data_format[i] not in ["N", "C"]
        ]
        strides = list(map(lambda i: node.attr["strides"][i], spatial_indices))
        dilations = list(
            map(lambda i: node.attr.get("dilations", [1] * (d + 2))[i],
                spatial_indices))
        node_dict = kwargs["node_dict"]
        kernel_shape = node_dict[node.inputs[1]].attr["_output_shapes"][0][:d]
        n_groups = 1
        if is_depthwise:
            n_groups = kernel_shape[-1]
        output_shape = list(
            map(lambda i: node.attr["_output_shapes"][0][i], spatial_indices))
        input_shape = list(
            map(
                lambda i: node_dict[node.inputs[0]].attr["_output_shapes"][0][
                    i], spatial_indices))
        pads = cls.cal_pads(auto_pad, len(spatial_indices), input_shape,
                            output_shape, strides, kernel_shape)
        unique_suffix = get_unique_suffix()
        transpose_node = cls.make_node_from_tf_node(
            node, [node.inputs[1]], [node.inputs[1] + "_T_" + unique_suffix],
            perm=[d + 1, d] + list(range(d)),
            op_type="Transpose",
            name=node.inputs[1] + "_T_" + unique_suffix)
        conv_node = cls.make_node_from_tf_node(
            node, [node.inputs[0], node.inputs[1] + "_T_" + unique_suffix],
            pads=pads,
            group=n_groups,
            kernel_shape=kernel_shape,
            strides=strides,
            dilations=dilations)

        return [transpose_node, conv_node]
示例#19
0
    def version_1(cls, node, **kwargs):
        # tf.size out_type could be int32 or int64
        need_cast = node.attr['out_type'] == tf.int32

        size_suffix = "_" + get_unique_suffix() if need_cast else ""
        size_output_name = node.outputs[0] + size_suffix
        size_node = cls.make_node_from_tf_node(node, [node.inputs[0]],
                                               outputs=[size_output_name],
                                               name=node.name + size_suffix)

        if not need_cast:
            return [size_node]

        attrs = {}
        attrs['DstT'] = node.attr['out_type']

        cast_node = Cast.handle(
            TensorflowNode(name=node.name,
                           inputs=[size_output_name],
                           outputs=node.outputs,
                           op_type='Cast',
                           attr=attrs))
        return [size_node, cast_node]
示例#20
0
  def _common(cls, node, **kwargs):
    tensor_dict = kwargs["tensor_dict"]
    x = tensor_dict[node.inputs[0]]
    input_shape = x.get_shape().as_list()
    input_size = len(node.inputs)
    hidden_size = node.attrs["hidden_size"]
    direction = node.attrs.get("direction", "forward")
    num_directions = 2 if direction == "bidirectional" else 1

    # removed from version 7, default is 0
    output_sequence = node.attrs.get("output_sequence", 0)

    # TODO(fumihwh): check if prev node is one of RNN
    # process input if it comes from other previous cell
    # which has shape [seq_length, num_directions, batch_size, hidden_size]
    if len(input_shape) == 4 and input_shape[1] == 1:
      x = tf.squeeze(x)

    sequence_length = None
    if input_size >= 5 and node.inputs[4] in tensor_dict:
      sequence_length = tensor_dict[node.inputs[4]]

    cell_kwargs = {}

    tf_activations = [tf.nn.tanh]
    if "activations" in node.attrs:
      activations = list(map(lambda x: x.lower(), node.attrs["activations"]))
      activation_alpha = node.attrs.get("activation_alpha", [None] * 4)
      activation_beta = node.attrs.get("activation_beta", [None] * 4)
      tf_activations = [
          cls.rnn_get_activation(activations[1], activation_alpha[1],
                                 activation_beta[1])
      ]
      if num_directions == 2:
        tf_activations.append(
            cls.rnn_get_activation(activations[3], activation_alpha[3],
                                   activation_beta[3]))

    # TODO(fumihwh): check if reverse and bidirectional works
    with tf.variable_scope(
        "GRU_" + get_unique_suffix(),
        custom_getter=partial(
            cls._custom_getter,
            node=node,
            tensor_dict=tensor_dict,
            is_bidirectional=num_directions == 2)):

      cell_kwargs["num_units"] = hidden_size
      if input_size < 4 or node.inputs[3] not in tensor_dict:
        cell_kwargs["bias_initializer"] = tf.zeros_initializer
      initial_state = None
      initial_state_bw = None
      if input_size == 6:
        initial_h = tensor_dict.get(node.inputs[5], None)
        if initial_h is not None:
          initial_state = (initial_h[0],)
          if num_directions == 2:
            initial_state_bw = (initial_h[1],)

      rnn_kwargs = {}
      if num_directions == 1:
        rnn_kwargs["initial_state"] = initial_state
      elif num_directions == 2:
        rnn_kwargs["initial_state_fw"] = initial_state
        rnn_kwargs["initial_state_bw"] = initial_state_bw
      rnn_kwargs["sequence_length"] = sequence_length
      rnn_kwargs["time_major"] = True
      rnn_kwargs["dtype"] = tf.float32

      outputs, states = cls.rnn(x, tf.nn.rnn_cell.GRUCell, cell_kwargs,
                                rnn_kwargs, tf_activations, direction)

    if num_directions == 1:
      state = states[0]
      h = tf.expand_dims(state, 0)
      output = tf.expand_dims(outputs, 1)
    else:
      state_fw = states[0][0]
      state_bw = states[1][0]
      output_fw = outputs[0]
      output_bw = outputs[1]
      h_fw = tf.expand_dims(state_fw, 0)
      h_bw = tf.expand_dims(state_bw, 0)
      h = tf.concat((h_fw, h_bw), axis=0)
      output_fw = tf.expand_dims(output_fw, 1)
      output_bw = tf.expand_dims(output_bw, 1)
      output = tf.concat((output_fw, output_bw), axis=1)

    return [output, h] if output_sequence == 0 else [h]
示例#21
0
    def _common(cls, node, **kwargs):
        tensor_dict = kwargs["tensor_dict"]
        x = tensor_dict[node.inputs[0]]
        input_shape = x.get_shape().as_list()
        input_size = len(node.inputs)
        hidden_size = node.attrs["hidden_size"]
        direction = node.attrs.get("direction", "forward")
        num_directions = 2 if direction == "bidirectional" else 1

        # removed from version 7, default is 0
        output_sequence = node.attrs.get("output_sequence", 0)

        # TODO(fumihwh): check if prev node is one of RNN
        # process input if it comes from other previous cell
        # which has shape [seq_length, num_directions, batch_size, hidden_size]
        if len(input_shape) == 4 and input_shape[1] == 1:
            x = tf.squeeze(x)

        sequence_length = None
        if input_size >= 5 and node.inputs[4] in tensor_dict:
            sequence_length = tensor_dict[node.inputs[4]]

        cell_kwargs = {}

        if "clip" in node.attrs:
            cell_kwargs["cell_clip"] = node.attrs["clip"]

        tf_activations = [tf.nn.tanh] * num_directions
        if "activations" in node.attrs:
            activations = list(
                map(lambda x: x.lower(), node.attrs["activations"]))
            activation_alpha = node.attrs.get("activation_alpha", [None] * 6)
            activation_beta = node.attrs.get("activation_beta", [None] * 6)

            # tf only supports cutomizing hidden states activation function,
            # which correspond to activation functions specified at position 1
            # and 4 in onnx's activations attribute.
            activation_idxs = [1, 4] if num_directions == 2 else [1]
            tf_activations = [
                cls.rnn_get_activation(activations[i], activation_alpha[i],
                                       activation_beta[i])
                for i in activation_idxs
            ]

        # TODO(fumihwh): check if reverse and bidirectional works
        with tf.compat.v1.variable_scope(
                "LSTM_" + get_unique_suffix(),
                custom_getter=partial(cls._custom_getter,
                                      node=node,
                                      tensor_dict=tensor_dict,
                                      is_bidirectional=num_directions == 2)):
            cell_kwargs["use_peepholes"] = input_size == 8 and node.inputs[
                7] in tensor_dict
            cell_kwargs["forget_bias"] = 0.
            cell_kwargs["num_units"] = hidden_size
            initial_state = None
            initial_state_bw = None
            if input_size >= 6:
                initial_h = tensor_dict.get(node.inputs[5], None)
                initial_c = tensor_dict.get(
                    node.inputs[6],
                    None) if input_size >= 7 else tf.zeros_like(initial_h)
                if initial_h is not None and initial_c is not None:
                    initial_state = (tf.compat.v1.nn.rnn_cell.LSTMStateTuple(
                        initial_c[0], initial_h[0]), )
                    if num_directions == 2:
                        initial_state_bw = (
                            tf.compat.v1.nn.rnn_cell.LSTMStateTuple(
                                initial_c[1], initial_h[1]), )

            rnn_kwargs = {}
            if num_directions == 1:
                rnn_kwargs["initial_state"] = initial_state
            elif num_directions == 2:
                rnn_kwargs["initial_state_fw"] = initial_state
                rnn_kwargs["initial_state_bw"] = initial_state_bw
            rnn_kwargs["sequence_length"] = sequence_length
            rnn_kwargs["time_major"] = True
            rnn_kwargs["dtype"] = tf.float32

            outputs, states = cls.rnn(x, tf.compat.v1.nn.rnn_cell.LSTMCell,
                                      cell_kwargs, rnn_kwargs, tf_activations,
                                      direction)

        if num_directions == 1:
            state = states[0]
            c = tf.expand_dims(state[0], 0)
            h = tf.expand_dims(state[1], 0)
            output = tf.expand_dims(outputs, 1)
        else:
            state_fw = states[0][0]
            state_bw = states[1][0]
            output_fw = outputs[0]
            output_bw = outputs[1]
            c_fw = tf.expand_dims(state_fw[0], 0)
            c_bw = tf.expand_dims(state_bw[0], 0)
            c = tf.concat((c_fw, c_bw), axis=0)
            h_fw = tf.expand_dims(state_fw[1], 0)
            h_bw = tf.expand_dims(state_bw[1], 0)
            h = tf.concat((h_fw, h_bw), axis=0)
            output_fw = tf.expand_dims(output_fw, 1)
            output_bw = tf.expand_dims(output_bw, 1)
            output = tf.concat((output_fw, output_bw), axis=1)

        return [output, h, c] if output_sequence == 0 else [h, c]
示例#22
0
    def _onnx_graph_to_tensorflow_rep(cls, graph_def, opset, strict, **kwargs):
        """ Convert ONNX graph to TensorflowRep.

    :param graph_def: ONNX GraphProto object.
    :param opset: ONNX OperatorSetIdProto list.
    :param strict: whether to enforce semantic equivalence between the original model
      and the converted tensorflow model.
    :kwargs: additional arguements to generate tensor_dict for model debugging
    :return: TensorflowRep object.
    """
        # To generate tensor_dict or not, default is False
        gen_tensor_dict = kwargs[
            'gen_tensor_dict'] if 'gen_tensor_dict' in kwargs else False
        # User provided input tensors, in the case the model inputs have unknown shapes
        input_tensor_dict = kwargs[
            'input_tensor_dict'] if 'input_tensor_dict' in kwargs else dict()

        handlers = cls._get_handlers(opset)

        # initializer: TensorProtos representing the values to initialize
        # a given tensor.
        # initialized: A list of names of the initialized tensors.

        if graph_def.initializer:
            initialized = {init.name for init in graph_def.initializer}
        else:
            initialized = set()

        input_dict = dict()

        module = BackendTFModule(handlers, opset, strict, graph_def, cls)
        signatures = dict()
        for value_info in graph_def.input:
            if value_info.name in initialized:
                continue
            shape = list(d.dim_value if (
                d.dim_value > 0 and d.dim_param == "") else None
                         for d in value_info.type.tensor_type.shape.dim)
            value_info_name = value_info.name.replace(
                ":", "_tf_") + "_" + get_unique_suffix(
                ) if ":" in value_info.name else value_info.name

            tf_spec = tf.TensorSpec(
                shape,
                data_type.onnx2tf(value_info.type.tensor_type.elem_type),
                value_info_name)
            signatures[value_info.name] = tf_spec

            if gen_tensor_dict:
                x = tf.constant(
                    0,
                    dtype=data_type.onnx2tf(
                        value_info.type.tensor_type.elem_type),
                    name=value_info_name,
                    shape=shape
                ) if value_info.name not in input_tensor_dict else input_tensor_dict[
                    value_info.name]
                input_dict[value_info.name] = x

        tf_rep = TensorflowRep()
        tf_rep.inputs = [
            value_info.name for value_info in graph_def.input
            if value_info.name not in initialized
        ]
        tf_rep.outputs = [value_info.name for value_info in graph_def.output]
        module.outputs = tf_rep.outputs
        tf_rep.tf_module = module
        tf_rep.signatures = signatures
        tf_rep.tensor_dict = module.gen_tensor_dict(
            input_dict) if gen_tensor_dict else None
        tf_rep.onnx_op_list = cls._get_onnx_op_list(graph_def)
        return tf_rep
示例#23
0
    def _onnx_graph_to_tensorflow_rep(cls, graph_def, opset, strict):
        """ Convert ONNX graph to TensorflowRep.

    :param graph_def: ONNX GraphProto object.
    :param opset: ONNX OperatorSetIdProto list.
    :param strict: whether to enforce semantic equivalence between the original model
      and the converted tensorflow model.
    :return: TensorflowRep object.
    """
        handlers = cls._get_handlers(opset)

        tf_rep_graph = tf.Graph()
        with tf_rep_graph.as_default():
            # initializer: TensorProtos representing the values to initialize
            # a given tensor.
            # initialized: A list of names of the initialized tensors.
            if graph_def.initializer:
                input_dict_items = cls._onnx_initializer_to_input_dict_items(
                    graph_def.initializer)
                initialized = {init.name for init in graph_def.initializer}
            else:
                input_dict_items = []
                initialized = set()

            # creating placeholders for currently unknown inputs
            for value_info in graph_def.input:
                if value_info.name in initialized:
                    continue
                shape = list(d.dim_value if (
                    d.dim_value > 0 and d.dim_param == "") else None
                             for d in value_info.type.tensor_type.shape.dim)
                value_info_name = value_info.name.replace(
                    ":", "_tf_") + "_" + get_unique_suffix(
                    ) if ":" in value_info.name else value_info.name

                x = tf.compat.v1.placeholder(data_type.onnx2tf(
                    value_info.type.tensor_type.elem_type),
                                             name=value_info_name,
                                             shape=shape)
                input_dict_items.append((value_info.name, x))

            # tensor dict: this dictionary is a map from variable names
            # to the latest produced TF tensors of the given name.
            # This dictionary will get updated as we build the graph to
            # record the names of newly produced tensors.
            tensor_dict = dict(input_dict_items)
            # Since tensor dict may be updated, we need to keep a copy
            # of the original input dict where we track the earliest
            # defined tensors so we can have access to the placeholders
            # to feed in input tensors when we run the graph.
            input_dict = dict(input_dict_items)

            for node in graph_def.node:
                onnx_node = OnnxNode(node)
                output_ops = cls._onnx_node_to_tensorflow_op(onnx_node,
                                                             tensor_dict,
                                                             handlers,
                                                             opset=opset,
                                                             strict=strict)
                curr_node_output_map = dict(zip(onnx_node.outputs, output_ops))
                tensor_dict.update(curr_node_output_map)

        tf_rep = TensorflowRep()
        tf_rep.graph = tf_rep_graph
        tf_rep.inputs = [
            value_info.name for value_info in graph_def.input
            if value_info.name not in initialized
        ]
        tf_rep.outputs = [value_info.name for value_info in graph_def.output]
        tf_rep.tensor_dict = tensor_dict
        return tf_rep
示例#24
0
    def _onnx_graph_to_tensorflow_rep(cls, graph_def, opset, strict, **kwargs):
        """ Convert ONNX graph to TensorflowRep.

    :param graph_def: ONNX GraphProto object.
    :param opset: ONNX OperatorSetIdProto list.
    :param strict: whether to enforce semantic equivalence between the original model
      and the converted tensorflow model.
    :kwargs: additional arguements to generate tensor_dict for model debugging
    :return: TensorflowRep object.
    """
        # To generate tensor_dict or not, default is False
        gen_tensor_dict = kwargs[
            'gen_tensor_dict'] if 'gen_tensor_dict' in kwargs else False
        # User provided input tensors, in the case the model inputs have unknown shapes
        input_tensor_dict = kwargs[
            'input_tensor_dict'] if 'input_tensor_dict' in kwargs else dict()
        training_mode = kwargs[
            'training_mode'] if 'training_mode' in kwargs else False

        handlers = cls._get_handlers(opset)

        # initializer: TensorProtos representing the values to initialize
        # a given tensor.
        # initialized: A list of names of the initialized tensors.

        if graph_def.initializer:
            initialized = {init.name for init in graph_def.initializer}
        else:
            initialized = set()

        input_dict = dict()

        module = BackendTFModule(handlers, opset, strict, graph_def, cls)
        signatures = dict()
        tf_rep_graph = tf.Graph()
        with tf_rep_graph.as_default():
            for value_info in graph_def.input:
                if value_info.name in initialized:
                    continue
                shape = list(d.dim_value if (
                    d.dim_value > 0 and d.dim_param == "") else None
                             for d in value_info.type.tensor_type.shape.dim)
                value_info_name = value_info.name.replace(
                    ":", "_tf_") + "_" + get_unique_suffix(
                    ) if ":" in value_info.name else value_info.name

                tf_spec = tf.TensorSpec(
                    shape,
                    data_type.onnx2tf(value_info.type.tensor_type.elem_type),
                    value_info_name)
                signatures[value_info.name] = tf_spec

                if gen_tensor_dict or training_mode:
                    x = tf.compat.v1.placeholder(
                        data_type.onnx2tf(
                            value_info.type.tensor_type.elem_type),
                        name=value_info_name,
                        shape=shape
                    ) if value_info.name not in input_tensor_dict else input_tensor_dict[
                        value_info.name]
                    input_dict[value_info.name] = x

            if gen_tensor_dict or training_mode:
                input_dict_items = cls._onnx_initializer_to_input_dict_items(
                    graph_def.initializer, training_mode=True)
                tensor_dict = dict(input_dict)
                tensor_dict.update(input_dict_items)
                tensor_dict[
                    training_flag_name] = tf.compat.v1.placeholder_with_default(
                        False, shape=[])
                for node in graph_def.node:
                    onnx_node = OnnxNode(node)
                    output_ops = cls._onnx_node_to_tensorflow_op(onnx_node,
                                                                 tensor_dict,
                                                                 handlers,
                                                                 opset=opset,
                                                                 strict=strict)
                    curr_node_output_map = dict(
                        zip(onnx_node.outputs, output_ops))
                    tensor_dict.update(curr_node_output_map)

        tf_rep = TensorflowRep()
        tf_rep.inputs = [
            value_info.name for value_info in graph_def.input
            if value_info.name not in initialized
        ]
        tf_rep.outputs = [value_info.name for value_info in graph_def.output]
        module.outputs = tf_rep.outputs
        tf_rep.tf_module = module
        tf_rep.signatures = signatures
        if gen_tensor_dict or training_mode:
            tf_rep.tensor_dict = tensor_dict
        if training_mode:
            tf_rep.graph = tf_rep_graph
        tf_rep.onnx_op_list = cls._get_onnx_op_list(graph_def)
        return tf_rep
示例#25
0
 def validate_initializer_name(name):
     # Replace ":" with "_tf_" and append a unique suffix for
     # traceability
     return name.replace(":", "_tf_") + "_" + get_unique_suffix(
     ) if ":" in name else name
示例#26
0
def convert(infile, outfile, convert_to, graph=None, **kwargs):
    """Convert pb.

  Args:
    infile: Input path.
    outfile: Output path.
    convert_to: Format converted to.
    graph: Inference graph.
    **kwargs: Other args for converting.

  Returns:
    None.
  """
    if convert_to == "tf":
        logger.info("Start converting onnx pb to tf pb:")
        onnx_model = onnx.load(infile)
        tf_rep = backend.prepare(onnx_model, **kwargs)
        tf_rep.export_graph(outfile)
    elif convert_to == "onnx":
        ext = os.path.splitext(infile)[1]
        logger.info("Start converting tf pb to onnx pb:")
        if ext == ".pb":
            with open(infile, "rb") as f:
                graph_def = graph_pb2.GraphDef()
                graph_def.ParseFromString(f.read())
        elif ext == ".ckpt":
            latest_ckpt = tf.train.latest_checkpoint(os.path.dirname(infile))
            saver = tf.train.import_meta_graph(latest_ckpt + ".meta")
            temp_file_suffix = get_unique_suffix()
            workdir = 'onnx-tf_workdir_{}'.format(temp_file_suffix)
            with tf.Session() as sess:
                sess.run([
                    tf.global_variables_initializer(),
                    tf.local_variables_initializer()
                ])
                saver.restore(sess, latest_ckpt)
                # Take users' hint or deduce output node automatically.
                kwargs["output"] = kwargs.get(
                    "output", None) or TensorflowGraph.get_output_node_names(
                        sess.graph.as_graph_def())

                # Save the graph to disk for freezing.
                tf.train.write_graph(sess.graph.as_graph_def(add_shapes=True),
                                     workdir,
                                     "input_model.pb",
                                     as_text=False)

            # Freeze graph:
            freeze_graph.freeze_graph(
                input_graph=graph or workdir + "/input_model.pb",
                input_saver="",
                input_binary=True,
                input_checkpoint=latest_ckpt,
                output_node_names=",".join(kwargs["output"]),
                restore_op_name="",
                filename_tensor_name="",
                output_graph=workdir + "/frozen_model.pb",
                clear_devices=True,
                initializer_nodes="")

            # Load back the frozen graph.
            with open(workdir + "/frozen_model.pb", "rb") as f:
                graph_def = graph_pb2.GraphDef()
                graph_def.ParseFromString(f.read())

            # Remove work directory.
            shutil.rmtree(workdir)
        else:
            raise ValueError(
                "Input file is not supported. Should be .pb or .ckpt, but get {}"
                .format(ext))

        if "rnn_type" in kwargs:
            onnx_model = experiment_frontend.rnn_tf_graph_to_onnx_model(
                graph_def, **kwargs)
        else:
            onnx_model = frontend.tensorflow_graph_to_onnx_model(
                graph_def, **kwargs)
        onnx.save(onnx_model, outfile)
    logger.info("Converting completes successfully.")
示例#27
0
    def process_kernel_and_bias(cls, nodes, cell_dict, node_dict):
        new_kernel = None
        new_bias = None
        scopes = cell_dict["kernel"][0].split("/")
        scope = "/".join(scopes[:scopes.index("kernel")])
        for key, value in [[
                "kernel",
            [node_dict[kernel] for kernel in cell_dict["kernel"]]
        ], ["bias", [node_dict[bias] for bias in cell_dict["bias"]]]]:
            gate_output_shape = node_dict[
                value[0].name].attr["_output_shapes"][0]
            candidate_output_shape = node_dict[
                value[1].name].attr["_output_shapes"][0]
            last_idx = range(len(gate_output_shape))[-1]
            concat_output_shapes = [
                g if i != last_idx else g + c for i, (g, c) in enumerate(
                    zip(gate_output_shape, candidate_output_shape))
            ]
            concat_node = TensorflowNode(
                op_type="ConcatV2",
                name="/".join([scope, key, "concat_" + get_unique_suffix()]),
                inputs=[value[0].name, value[1].name, CONST_MINUS_ONE_INT32],
                attr={"_output_shapes": [concat_output_shapes]})
            nodes.append(concat_node)

            if key == "kernel":
                hidden_size = gate_output_shape[1] // 2
                input_size = gate_output_shape[0] - hidden_size
                transposed_shape = concat_output_shapes[::-1]
                transpose_node = TensorflowNode(
                    op_type="Transpose",
                    name="/".join(
                        [scope, key, "transpose_" + get_unique_suffix()]),
                    inputs=concat_node.outputs + [None],
                    attr={"_output_shapes": [transposed_shape]})

                split_const_node = TensorflowNode(
                    op_type="Const",
                    name="/".join(
                        [scope, key, "split_const_" + get_unique_suffix()]),
                    attr={
                        "value": np.asarray([input_size, hidden_size],
                                            np.int32),
                        "dtype": data_type.tf2onnx(tf.int32),
                        "_output_shapes": [[1]]
                    })

                split_node = TensorflowNode(
                    op_type="Split",
                    name="/".join([scope, key,
                                   "split_" + get_unique_suffix()]),
                    inputs=[CONST_ZERO_INT32] + transpose_node.outputs,
                    attr={
                        "num_split":
                        3,
                        "_output_shapes":
                        [[int(transposed_shape[0] / 3), transposed_shape[1]]
                         for _ in range(3)]
                    })

                re_concat_node = TensorflowNode(
                    op_type="ConcatV2",
                    name="/".join(
                        [scope, key, "re_concat_" + get_unique_suffix()]),
                    inputs=[
                        split_node.outputs[1], split_node.outputs[0],
                        CONST_ZERO_INT32
                    ],
                    attr={
                        "_output_shapes": [[
                            int(transposed_shape[0] / 3 * 2),
                            transposed_shape[1]
                        ]]
                    })

                nodes.extend([
                    transpose_node, split_const_node, split_node,
                    re_concat_node
                ])
                new_kernel = re_concat_node.outputs + [split_node.outputs[2]]
            else:
                new_bias = concat_node.outputs

        return new_kernel + new_bias
示例#28
0
    def conv_op(cls, node, d=2, is_depthwise=False, **kwargs):
        auto_pad = node.attr["padding"].decode("UTF-8")
        auto_pad = "SAME_UPPER" if auto_pad == "SAME" else auto_pad
        data_format = node.attr["data_format"].decode("UTF-8")
        spatial_indices = [
            i for i in range(len(data_format))
            if data_format[i] not in ["N", "C"]
        ]
        strides = list(map(lambda i: node.attr["strides"][i], spatial_indices))
        dilations = list(
            map(lambda i: node.attr.get("dilations", [1] * (d + 2))[i],
                spatial_indices))
        node_dict = kwargs["node_dict"]
        kernel_shape = node_dict[node.inputs[1]].attr["_output_shapes"][0][:d]
        n_groups = 1
        if is_depthwise:
            n_groups = kernel_shape[-1]
        output_shape = list(
            map(lambda i: node.attr["_output_shapes"][0][i], spatial_indices))
        input_shape = list(
            map(
                lambda i: node_dict[node.inputs[0]].attr["_output_shapes"][0][
                    i], spatial_indices))
        pads = cls.cal_pads(auto_pad, len(spatial_indices), input_shape,
                            output_shape, strides, kernel_shape)

        w_unique_suffix = get_unique_suffix()
        w_transpose_node = Transpose.handle(
            make_node("Transpose", [node.inputs[1], "perm"],
                      [node.inputs[1] + "_T_" + w_unique_suffix],
                      name=node.inputs[1] + "_T_" + w_unique_suffix),
            consts={"perm": [d + 1, d] + list(range(d))})

        if data_format[-1] == "C":
            c_first_data_format = data_format[0] + "C" + data_format[1:-1]
            pre_unique_suffix = get_unique_suffix()
            pre_transpose_node = Transpose.handle(
                make_node("Transpose", [node.inputs[0], "perm"],
                          [node.inputs[0] + "_T_" + pre_unique_suffix],
                          name=node.inputs[0] + "_T_" + pre_unique_suffix),
                consts={
                    "perm": get_perm_from_formats(data_format,
                                                  c_first_data_format)
                })

            conv_unique_suffix = get_unique_suffix()
            conv_output = cls.get_outputs_names(node)[0]
            conv_node = cls.make_node_from_tf_node(
                node,
                [pre_transpose_node.output[0], w_transpose_node.output[0]],
                [conv_output + "_C_" + conv_unique_suffix],
                pads=pads,
                group=n_groups,
                kernel_shape=kernel_shape,
                strides=strides,
                dilations=dilations)

            post_unique_suffix = get_unique_suffix()
            post_transpose_node = Transpose.handle(
                make_node("Transpose", [conv_node.output[0], "perm"],
                          [conv_output],
                          name=conv_output + "_C_" + conv_unique_suffix +
                          "_T_" + post_unique_suffix),
                consts={
                    "perm": get_perm_from_formats(c_first_data_format,
                                                  data_format)
                })
            post_transpose_node.output.pop()
            post_transpose_node.output.append(conv_output)
            return [
                pre_transpose_node, w_transpose_node, conv_node,
                post_transpose_node
            ]
        else:
            conv_node = cls.make_node_from_tf_node(
                node, [node.inputs[0], w_transpose_node.output[0]],
                pads=pads,
                group=n_groups,
                kernel_shape=kernel_shape,
                strides=strides,
                dilations=dilations)
            return [w_transpose_node, conv_node]
    def make_node_from_tf_node(cls,
                               node,
                               inputs=None,
                               outputs=None,
                               op_type=None,
                               name=None,
                               doc_string=None,
                               version=0,
                               should_check=True,
                               data_format_auto_convert=False,
                               **kwargs):
        """ Helper method to make node.
    The main api is almost same to onnx.helper.make_node with default value
    from TensorflowNode given.

    :param node: TensorflowNode object.
    :param inputs: Inputs names. Default is node.inputs.
    :param outputs: Outputs name. Default is node.outputs.
    :param op_type: ONNX op name. Default is cls.ONNX_OP.
    :param name: Node name. Default is node.name.
    :param doc_string: optional documentation string.
    :param version: Version used for check node. Default is cls.VERSION.
    :param should_check: Should check flag.
    Should set to False if is an unimplemented customized op.
    :param data_format_auto_convert: Pre and post transpose if data format is channel last.
    :param kwargs: Other args.
    :return: NodeProto.
    """
        from .frontend.transpose import Transpose

        inputs = inputs if inputs is not None else node.inputs
        outputs = outputs if outputs is not None else node.outputs
        data_format = node.attr.get("data_format", b"").decode("UTF-8")
        need_transpose = data_format_auto_convert and data_format.find(
            "C") not in (-1, 1)

        nodes = []

        if need_transpose:
            # Add pre transpose
            c_first_data_format = data_format[0] + "C" + data_format[1:-1]
            pre_unique_suffix = get_unique_suffix()
            pre_transpose_node = Transpose.handle_node_proto(
                helper.make_node("Transpose", [node.inputs[0], "perm"],
                                 [node.inputs[0] + "_T_" + pre_unique_suffix],
                                 name=node.inputs[0] + "_T_" +
                                 pre_unique_suffix),
                consts={
                    "perm": get_perm_from_formats(data_format,
                                                  c_first_data_format)
                })
            nodes.append(pre_transpose_node)
            inputs[0] = pre_transpose_node.output[0]

            # Process inputs, outputs name
            # Assume real input is always the first
            onnx_node_suffix = get_unique_suffix()
            onnx_node_output = node.outputs[0]
            inputs = [pre_transpose_node.output[0]] + inputs[1:]
            outputs = [onnx_node_output + "_" + onnx_node_suffix] + outputs[1:]

        onnx_node = helper.make_node(
            op_type if op_type is not None else cls.ONNX_OP,
            inputs,
            outputs,
            name=name if name is not None else node.name,
            doc_string=doc_string,
            **kwargs)

        if should_check:
            cls.check_node(onnx_node, version)
        else:
            warnings.warn("Skipped check for {}.".format(node.op_type))

        if need_transpose:
            nodes.append(onnx_node)
            # Add post transpose
            post_unique_suffix = get_unique_suffix()
            post_transpose_node = Transpose.handle_node_proto(
                helper.make_node("Transpose", [onnx_node.output[0], "perm"],
                                 [onnx_node_output],
                                 name=onnx_node_output + "_" +
                                 onnx_node_suffix + "_T_" +
                                 post_unique_suffix),
                consts={
                    "perm": get_perm_from_formats(c_first_data_format,
                                                  data_format)
                })
            nodes.append(post_transpose_node)
            return nodes
        return onnx_node
示例#30
0
    def version_9(cls, node, **kwargs):
        unique_suffix = get_unique_suffix()

        # Convert to NCHW:
        transpose_node = Transpose.handle(TensorflowNode(
            name='transopose_input_to_nchw_' + unique_suffix,
            inputs=node.inputs[:1] + ["perm"],
            outputs=["transposed_input_" + unique_suffix]),
                                          consts={"perm": [0, 3, 1, 2]})

        # Get shape of NCHW input tensor:
        input_shape_node = Shape.handle(
            TensorflowNode(name='get_input_shape_' + unique_suffix,
                           inputs=transpose_node.output,
                           outputs=["input_shape_" + unique_suffix],
                           attr=node.attr))

        util_one = OnnxGraph.CONST_ONE_FP32

        output_shape_tensor = node.inputs[1]

        # Cast output shape (HW dim only) to float32:
        out_shape_float = Cast.handle(
            TensorflowNode(
                name='cast_output_shape_to_fp32_' + unique_suffix,
                inputs=[output_shape_tensor],
                outputs=["output_shape_float_partial_" + unique_suffix],
                attr={"DstT": tf.float32}))

        # Cast input shape to float32:
        in_shape_cast = Cast.handle(
            TensorflowNode(name='cast_input_shape_to_fp32_' + unique_suffix,
                           inputs=input_shape_node.output,
                           outputs=["input_shape_float_" + unique_suffix],
                           attr={"DstT": tf.float32}))

        slice_const_items = [
            ("begin", np.array([2]).astype(np.int32)),
            ("end", np.array([4]).astype(np.int32)),
            ("strides", np.array([1]).astype(np.int32)),
        ]

        slice_const_proto = {}

        for k, v in slice_const_items:
            const_name = "{}_".format(k) + unique_suffix
            slice_const_proto[k] = make_node(
                "Constant", [], [const_name],
                value=make_tensor(const_name,
                                  any_dtype_to_onnx_dtype(np_dtype=v.dtype),
                                  v.shape, v))

        in_shape_slices = StridedSlice.handle(
            TensorflowNode(
                name="stridedslice_input_shape_" + unique_suffix,
                inputs=list(in_shape_cast.output) +
                [slice_const_proto[k].output[0] for k, v in slice_const_items],
                outputs=["sliced_input_shape_" + unique_suffix]),
            consts={
                slice_const_proto[k].output[0]: v
                for k, v in slice_const_items
            },
            add_consts=True)

        # Divide input shape with output shape to get scaling factor:
        div_node = Div.handle(
            TensorflowNode(name='div_to_get_scale_factor_' + unique_suffix,
                           inputs=list(out_shape_float.output) +
                           list(in_shape_slices[-1].output),
                           outputs=["hw_scale_" + unique_suffix]))

        # Prepend 1's in the N, C dimension:
        full_scale = Concat.handle(TensorflowNode(
            name='prepend_ones_to_scale_factor_' + unique_suffix,
            inputs=[util_one, util_one] + list(div_node.output) +
            ["concat_axis"],
            outputs=["scale_" + unique_suffix]),
                                   consts={"concat_axis": 0})

        # Upsample with the computed scaling factor:
        upsample_node = cls.make_node_from_tf_node(
            node,
            op_type="Upsample",
            mode="bilinear",
            inputs=list(transpose_node.output) + list(full_scale.output),
            outputs=["upsample_to_tranpose_" + unique_suffix])

        # Transpose back to NHWC:
        transpose_output_node = Transpose.handle(TensorflowNode(
            name='transpose_output_to_nhwc_' + unique_suffix,
            inputs=list(upsample_node.output) + ["perm"],
            outputs=node.outputs),
                                                 consts={"perm": [0, 2, 3, 1]})

        transpose_and_get_shapes = [
            transpose_node, input_shape_node, out_shape_float, in_shape_cast
        ]
        slice_shape = list(slice_const_proto.values()) + in_shape_slices
        get_scale_and_upsample_and_transpose = [
            div_node, full_scale, upsample_node, transpose_output_node
        ]

        return transpose_and_get_shapes + slice_shape + get_scale_and_upsample_and_transpose