コード例 #1
0
  def version_1(cls, node, **kwargs):
    blocksize = node.attr["block_size"]
    data_format = node.attr.get("data_format", "NHWC").decode()

    if data_format == "NHWC":
      transpose_unique_suffix = get_unique_suffix()
      space_to_depth_unique_suffix = get_unique_suffix()
      transpose_name = node.inputs[0] + "_T_" + transpose_unique_suffix
      space_to_depth_name = node.inputs[0] + "_T_STD_" + space_to_depth_unique_suffix
      before_transpose_node = cls.make_node_from_tf_node(
          node, [node.inputs[0]], [transpose_name],
          perm=get_perm_from_formats(data_format, "NCHW"),
          op_type="Transpose",
          name=transpose_name)
      space_to_depth_node = cls.make_node_from_tf_node(
          node, [transpose_name], [space_to_depth_name],
          blocksize=blocksize,
          name=space_to_depth_name)
      after_transpose_node = cls.make_node_from_tf_node(
          node, [space_to_depth_name],
          perm=get_perm_from_formats("NCHW", data_format),
          op_type="Transpose")
      return [before_transpose_node, space_to_depth_node, after_transpose_node]

    return cls.make_node_from_tf_node(
        node, [node.inputs[0]], blocksize=blocksize)
コード例 #2
0
ファイル: conv_mixin.py プロジェクト: minhoolee/onnx-darknet
  def conv_op(cls, node, d=2, **kwargs):
    auto_pad = node.attr["padding"].decode("UTF-8")
    auto_pad = "SAME_UPPER" if auto_pad == "SAME" else auto_pad
    data_format = node.attr["data_format"].decode("UTF-8")
    spatial_indices = [
        i for i in range(len(data_format)) if data_format[i] not in ["N", "C"]
    ]
    strides = list(map(lambda i: node.attr["strides"][i], spatial_indices))
    dilations = list(
        map(lambda i: node.attr.get("dilations", [1] * (d + 2))[i],
            spatial_indices))
    node_dict = kwargs["node_dict"]
    kernel_shape = node_dict[node.inputs[1]].attr["_output_shapes"][0][:d]
    output_shape = list(
        map(lambda i: node.attr["_output_shapes"][0][i], spatial_indices))
    input_shape = list(
        map(lambda i: node_dict[node.inputs[0]].attr["_output_shapes"][0][i],
            spatial_indices))
    pads = cls.cal_pads(auto_pad, len(spatial_indices), input_shape,
                        output_shape, strides, kernel_shape)
    unique_suffix = get_unique_suffix()
    transpose_node = cls.make_node_from_tf_node(
        node, [node.inputs[1]], [node.inputs[1] + "_T_" + unique_suffix],
        perm=[d + 1, d] + list(range(d)),
        op_type="Transpose",
        name=node.inputs[1] + "_T_" + unique_suffix)
    conv_node = cls.make_node_from_tf_node(
        node, [node.inputs[0], node.inputs[1] + "_T_" + unique_suffix],
        pads=pads,
        kernel_shape=kernel_shape,
        strides=strides,
        dilations=dilations)

    return [transpose_node, conv_node]
コード例 #3
0
ファイル: matmul.py プロジェクト: minhoolee/onnx-darknet
 def version_1(cls, node, **kwargs):
   transpose_a = node.attr.get("transpose_a", False)
   transpose_b = node.attr.get("transpose_b", False)
   input_a = node.inputs[0]
   input_b = node.inputs[1]
   nodes = []
   if transpose_a:
     unique_suffix_a = get_unique_suffix()
     transposed_a = cls.make_node_from_tf_node(
         node, [node.inputs[0]], [node.inputs[0] + "_T_" + unique_suffix_a],
         op_type="Transpose",
         name=node.inputs[0] + "_T_" + unique_suffix_a)
     input_a = node.inputs[0] + "_T_" + unique_suffix_a
     nodes.append(transposed_a)
   if transpose_b:
     unique_suffix_b = get_unique_suffix()
     transposed_b = cls.make_node_from_tf_node(
         node, [node.inputs[1]], [node.inputs[1] + "_T_" + unique_suffix_b],
         op_type="Transpose",
         name=node.inputs[1] + "_T_" + unique_suffix_b)
     input_b = node.inputs[1] + "_T_" + unique_suffix_b
     nodes.append(transposed_b)
   nodes.append(cls.make_node_from_tf_node(node, [input_a, input_b]))
   return nodes
コード例 #4
0
    def _common(cls, node, **kwargs):
        tensor_dict = kwargs["tensor_dict"]
        x = tensor_dict[node.inputs[0]]
        input_shape = x.get_shape().as_list()
        input_size = len(node.inputs)
        hidden_size = node.attrs["hidden_size"]
        direction = node.attrs.get("direction", "forward")
        num_directions = 2 if direction == "bidirectional" else 1

        # removed from version 7, default is 0
        output_sequence = node.attrs.get("output_sequence", 0)

        # TODO(fumihwh): check if prev node is one of RNN
        # process input if it comes from other previous cell
        # which has shape [seq_length, num_directions, batch_size, hidden_size]
        if len(input_shape) == 4 and input_shape[1] == 1:
            x = tf.squeeze(x)

        sequence_length = None
        if input_size >= 5 and node.inputs[4] in tensor_dict:
            sequence_length = tensor_dict[node.inputs[4]]

        cell_kwargs = {}

        if "clip" in node.attrs:
            cell_kwargs["cell_clip"] = node.attrs["clip"]

        tf_activations = [tf.nn.tanh]
        if "activations" in node.attrs:
            activations = list(
                map(lambda x: x.lower(), node.attrs["activations"]))
            activation_alpha = node.attrs.get("activation_alpha", [None] * 6)
            activation_beta = node.attrs.get("activation_beta", [None] * 6)
            tf_activations = [
                cls.rnn_get_activation(activations[1], activation_alpha[1],
                                       activation_beta[1])
            ]
            if num_directions == 2:
                tf_activations.append(
                    cls.rnn_get_activation(activations[4], activation_alpha[4],
                                           activation_beta[4]))

        # TODO(fumihwh): check if reverse and bidirectional works
        with tf.variable_scope("LSTM_" + get_unique_suffix(),
                               custom_getter=partial(
                                   cls._custom_getter,
                                   node=node,
                                   tensor_dict=tensor_dict,
                                   is_bidirectional=num_directions == 2)):

            cell_kwargs["use_peepholes"] = input_size == 8 and node.inputs[
                7] in tensor_dict
            cell_kwargs["forget_bias"] = 0.
            cell_kwargs["num_units"] = hidden_size
            initial_state = None
            initial_state_bw = None
            if input_size >= 7:
                initial_h = tensor_dict.get(node.inputs[5], None)
                initial_c = tensor_dict.get(node.inputs[6], None)
                if initial_h is not None and initial_c is not None:
                    initial_state = (tf.nn.rnn_cell.LSTMStateTuple(
                        initial_c[0], initial_h[0]), )
                    if num_directions == 2:
                        initial_state_bw = initial_state = (
                            tf.nn.rnn_cell.LSTMStateTuple(
                                initial_c[1], initial_h[1]), )

            rnn_kwargs = {}
            if num_directions == 1:
                rnn_kwargs["initial_state"] = initial_state
            elif num_directions == 2:
                rnn_kwargs["initial_state_fw"] = initial_state
                rnn_kwargs["initial_state_bw"] = initial_state_bw
            rnn_kwargs["sequence_length"] = sequence_length
            rnn_kwargs["time_major"] = True
            rnn_kwargs["dtype"] = tf.float32

            outputs, states = cls.rnn(x, tf.nn.rnn_cell.LSTMCell, cell_kwargs,
                                      rnn_kwargs, tf_activations, direction)

        if num_directions == 1:
            state = states[0]
            c = tf.expand_dims(state[0], 0)
            h = tf.expand_dims(state[1], 0)
            output = tf.expand_dims(outputs, 1)
        else:
            state_fw = states[0][0]
            state_bw = states[1][0]
            output_fw = outputs[0]
            output_bw = outputs[1]
            c_fw = tf.expand_dims(state_fw[0], 0)
            c_bw = tf.expand_dims(state_bw[0], 0)
            c = tf.concat((c_fw, c_bw), axis=0)
            h_fw = tf.expand_dims(state_fw[1], 0)
            h_bw = tf.expand_dims(state_bw[1], 0)
            h = tf.concat((h_fw, h_bw), axis=0)
            output_fw = tf.expand_dims(output_fw, 1)
            output_bw = tf.expand_dims(output_bw, 1)
            output = tf.concat((output_fw, output_bw), axis=1)

        return [output, h, c] if output_sequence == 0 else [h, c]