예제 #1
0
    def test_caffe_conv2d_dynamic_input_infer(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': shape_array([1, 3, dynamic_dimension_value, 227])},
                             'conv_weights': {'shape': np.array([64, 3, 3, 3]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                                           'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
                                           'output_spatial_shape': None, 'output_shape': None,
                                           'stride': np.array([1, 1, 1, 1]), 'group': 1,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'output': 64, 'kernel_spatial': np.array([3, 3]),
                                           'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
                                           'batch_dims': np.array([0])}
                             })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        exp_shape = shape_array([1, 64, dynamic_dimension_value, 225])
        res_shape = graph.node['conv_output']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
예제 #2
0
 def backend_attrs(self):
     # the same attributes as in a regular convolution and additional attributes 'deformable_group', 'group'
     # and 'bilinear_interpolation_pad'
     attrs = Convolution(self.graph, {}).backend_attrs() + ['deformable_group', 'group']
     if self.get_opset() == 'opset8':
         attrs.append('bilinear_interpolation_pad')
     return attrs
예제 #3
0
    def extract(cls, node):
        final_pads = get_pads(node.module)

        # Extract strides attribute
        strides = node.module.stride
        final_strides = np.array([1, 1, *strides], dtype=np.int64)

        # Extract dilations attribute
        dilations = node.module.dilation
        if isinstance(dilations, int):
            dilations = [dilations, dilations]
        final_dilations = np.array([1, 1, *dilations], dtype=np.int64)
        attrs = {
            'type': 'Deconvolution',
            'pad': final_pads,
            'stride': final_strides,
            'dilation': final_dilations,
            'group': node.module.groups,
            'kernel_spatial': np.array(node.module.kernel_size,
                                       dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW',
            'in_ports_count': 2,
            'output': node.module.out_channels,
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #4
0
    def test_caffe_conv2d_infer_wrong_input_shape(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': np.array([1, 3, 1, 1])},
                             'conv_weights': {'shape': np.array([64, 3, 3, 3]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                                           'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
                                           'output_spatial_shape': None, 'output_shape': None,
                                           'stride': np.array([1, 1, 1, 1]), 'group': 1,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'output': 64, 'kernel_spatial': np.array([3, 3]),
                                           'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
                                           'batch_dims': np.array([0])}
                             })

        conv_node = Node(graph, 'conv_node')
        with self.assertRaises(Error):
            Convolution.infer(conv_node)
예제 #5
0
    def test_deconv_infer_no_shape(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': None},
                             'conv_weights': {'shape': np.array([1, 21, 16, 16]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                                           'channel_dims': np.array([1]),
                                           'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
                                           'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
                             })

        deconv_node = Node(graph, 'conv_node')
        with self.assertRaisesRegex(Error, "Input data shape is None for node.*"):
            Convolution.infer(deconv_node)
예제 #6
0
    def test_caffe_conv2d_infer(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': np.array([1, 3, 227, 227])},
                             'conv_weights': {'shape': np.array([64, 3, 3, 3]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                                           'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False,
                                           'output_spatial_shape': None, 'output_shape': None,
                                           'stride': np.array([1, 1, 1, 1]), 'group': 1,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'output': 64, 'kernel_spatial': np.array([3, 3]),
                                           'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]),
                                           'batch_dims': np.array([0])}
                             })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        exp_shape = np.array([1, 64, 225, 225])
        res_shape = graph.node['conv_output']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
예제 #7
0
    def extract(cls, node):
        proto_layer, model_layer = node.pb, node.model_pb

        if not proto_layer:
            raise Error('Protobuf layer can not be empty')

        deconv_param = proto_layer.convolution_param

        params = conv_set_params(deconv_param, 'Deconv2D')
        attrs = conv_create_attrs(params)
        attrs.update({
            'type': 'Deconvolution',
            'op': 'Deconv2D',
            'get_group': lambda node: node.group,
            'get_output_feature_dim': lambda node: node.output,
            'input_feature_channel': 0,
            'output_feature_channel': 1,
        })

        # Embed weights and biases as attributes
        # It will be moved to a separate nodes in special pass
        attrs.update(weights_biases(deconv_param.bias_term, model_layer))
        attrs.update(layout_attrs())

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #8
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 3)

        def get_num_groups(node):
            if 'group' in node:
                return node.group
            elif node.in_node(0).shape is not None and node.kernel_shape is not None \
                    and node.in_node(0).shape[node.channel_dims[0]] is not dynamic_dimension \
                    and node.kernel_shape[node.input_feature_channel] is not dynamic_dimension:
                # if group attribute is not defined, number of groups is calculated
                # from number of input channels and filter channel size
                return node.in_node(0).shape[
                    node.channel_dims] // node.kernel_shape[
                        node.input_feature_channel]
            else:
                return 1

        attrs.update({
            'op':
            __class__.op,
            'get_group':
            get_num_groups,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]),
                                     inv=int64_array([2, 3, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #9
0
    def extract(cls, node):
        proto_layer, model_layer = node.pb, node.model_pb

        if not proto_layer:
            raise Error('Protobuf layer can not be empty')

        conv_param = proto_layer.convolution_param
        conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D'

        params = conv_set_params(conv_param, conv_type)
        attrs = conv_create_attrs(params)
        attrs.update({
            'op': __class__.op,
            'get_group': lambda node: node.group,
            'get_output_feature_dim': lambda node: node.output,
            'weights_index': 1 if conv_type == 'Conv2D' else 2
        })

        # Embed weights and biases as attributes
        # It will be moved to a separate nodes in special pass
        attrs.update(
            weights_biases(conv_param.bias_term,
                           model_layer,
                           start_index=len(proto_layer.bottom),
                           proto=conv_param))
        attrs.update(layout_attrs())

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #10
0
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
                refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
                        refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
예제 #11
0
def add_convolution_to_swap_xy_coordinates(graph: Graph, input_node: Node,
                                           coordinates_size: int):
    """
    The function add convolution node after the node 'input_node' to swap xy coordinates of the boxes produced
    by the node 'input_node'. It is expected that box coordinates are located in the fastest changing dimension of the
    'input_node' output, i.e. the input tensor could be reshaped to [num_boxes, 4] or [num_boxes, 5]. If the size is 5,
    then the 0-th element for each of num_boxes blocks is not changed and element 1 is swapped with element 2, element 3
    is swapped with element 4. This is the case when boxes coordinates are produced by the layer "Proposal". The exact
    amount of elements in each block is equal to the 'coordinates_size' parameter.
    :param graph: graph to operate on.
    :param input_node: node producing boxes coordinates.
    :param coordinates_size: integer value equal to 4 or 5.
    :return convolution node that swaps coordinates.
    """
    # swap of input tensor with 4 or 5 numbers describing boxes are supported
    assert (coordinates_size in [4, 5])

    input_reshape_4d_node = create_op_node_with_second_input(
        graph, Reshape, int64_array([-1, 1, 1, coordinates_size]),
        dict(name=input_node.name + '/reshape_4d'), input_node)
    mark_input_as_in_correct_layout(input_reshape_4d_node, 0)
    # do not mark second input because the reshape works in initial model layout and needs to be transformed to NCHW
    mark_output_as_in_correct_layout(input_reshape_4d_node, 0)

    if coordinates_size == 5:
        # zero indexed element is not box coordinate ("batch id" in case of Proposal)
        conv_filter_data = mo_array(
            mo_array([[[[1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 0, 0, 0],
                        [0, 0, 0, 0, 1], [0, 0, 0, 1, 0]]]],
                     dtype=np.float32))
    else:
        conv_filter_data = mo_array(
            mo_array(
                [[[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]]],
                dtype=np.float32))

    conv_filter_data = np.transpose(conv_filter_data, [2, 3, 0, 1])

    conv_filter_const_op = Const(graph, dict(value=conv_filter_data))
    conv_filter_const_node = conv_filter_const_op.create_node(
        [], dict(name=input_node.name + '/weights'))

    conv_op = Convolution(
        graph, {
            'bias_addable': True,
            'channel_dims': mo_array([3]),
            'batch_dims': mo_array([0]),
            'input_feature_channel': 0,
            'output_feature_channel': 1,
            'group': 1,
            'layout': 'NHWC',
        })
    return conv_op.create_node([input_reshape_4d_node, conv_filter_const_node],
                               dict(name=input_node.name + "/conv"))
예제 #12
0
    def extract(cls, node):
        attr = get_mxnet_layer_attrs(node.symbol_dict)

        kernel = attr.tuple("kernel", int, None)
        stride = attr.tuple("stride", int,
                            tuple(np.ones(len(kernel), dtype=np.int64)))
        padding = attr.tuple("pad", int,
                             tuple(np.zeros(len(kernel), dtype=np.int64)))
        dilate = attr.tuple("dilate", int,
                            tuple(np.ones(len(kernel), dtype=np.int64)))
        group = attr.int("num_group", 1)
        output = attr.int("num_filter", None)
        bias_term = not attr.bool("no_bias", True)
        target_shape = attr.tuple("target_shape", int, None)
        if target_shape:
            target_shape = int64_array(target_shape)

        final_dilations = int64_array([1, 1, *[d for d in dilate]
                                       ]) if dilate is not None else None
        node_attrs = {
            'op': __class__.op,
            'type': 'Deconvolution',
            'bias_addable': True,
            'bias_term': bias_term,
            'pad': int64_array([[0, 0], [0, 0],
                                *[[pad, pad] for pad in padding]]),
            'pad_spatial_shape': int64_array([[pad, pad] for pad in padding]),
            'dilation': final_dilations,
            'output_spatial_shape': target_shape,
            'original_output_spatial_shape': target_shape,
            'output_shape': None,
            'stride': int64_array([1, 1, *[s for s in stride]]),
            'group': group,
            'output': output,
            'kernel_spatial': int64_array([k for k in kernel]),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': None,
            'reshape_kernel': True,
            'spatial_dims': None,
            'channel_dims': int64_array([1]),
            'batch_dims': int64_array([0]),
            'layout': 'NCHW',
            'get_pad': DeconvFrontExtractor.get_pad,
        }

        output_padding = attr.tuple("adj", int, None)
        if target_shape is None and output_padding:
            node_attrs["output_padding"] = int64_array(
                [0, 0, *[s for s in output_padding]])

        # update the attributes of the node
        Convolution.update_node_stat(node, node_attrs)
        return cls.enabled
예제 #13
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        scale = attrs.int("scale", 1)
        num_filter = attrs.int("num_filter", 0)
        mode = attrs.str("sample_type", None)
        if mode == 'nearest':
            node_attrs = {
                'factor': attrs.int("scale", 1),
                'mode': mode,
                'antialias': 0,
                'axes': int64_array([2, 3]),
            }
            Interpolate.update_node_stat(node, node_attrs)
        elif mode == 'bilinear':
            """
            Bilinear UpSampling uses deconvolution algorithm under the hood.
            For MXNet Bilinear UpSampling op just wrapper over Deconvolution op.
            Inputs data:
                input1 - input data
                input2 - deconvolution weight
            """
            kernel = 2 * scale - scale % 2
            stride = scale
            pad = math.ceil((scale - 1) / 2)
            num_group = num_filter

            node_attrs = {
                'op': __class__.op,
                'type': 'Deconvolution',
                'bias_addable': True,
                'bias_term': False,
                'pad': int64_array([[0, 0], [0, 0], [pad, pad], [pad, pad]]),
                'pad_spatial_shape': int64_array([[pad, pad], [pad, pad]]),
                'dilation': None,
                'output_spatial_shape': None,
                'output_shape': None,
                'stride': int64_array([1, 1, stride, stride]),
                'group': num_group,
                'output': num_filter,
                'kernel_spatial': int64_array([kernel, kernel]),
                'input_feature_channel': 0,
                'output_feature_channel': 1,
                'kernel_spatial_idx': None,
                'reshape_kernel': True,
                'spatial_dims': None,
                'channel_dims': int64_array([1]),
                'batch_dims': int64_array([0]),
                'layout': 'NCHW',
                'get_pad': DeconvFrontExtractor.get_pad,
            }
            Convolution.update_node_stat(node, node_attrs)
        return cls.enabled
예제 #14
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 3, 4)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: 1,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]),
                                     inv=int64_array([2, 3, 4, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #15
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 2)
        attrs.update({
            'op':
            __class__.op,
            'kernel_spatial_idx':
            np.array([0, 1], dtype=np.int64),
            'get_group':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[-1] * node.kernel_shape[-2],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([2, 3, 0, 1]),
                                     inv=int64_array([2, 3, 0, 1]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #16
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 3)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: node.group
            if 'group' in node and node.group is not None else node.in_node(0).
            shape[node.channel_dims] // node.kernel_shape[
                node.input_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]),
                                     inv=int64_array([2, 3, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #17
0
    def test_deconv_infer_ideal(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': np.array([1, 21, 16, 16])},
                             'conv_weights': {'shape': np.array([1, 21, 4, 4]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                                           'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False,
                                           'batch_dims': np.array([0]),
                                           'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'output_padding': np.array([0, 0, 1, 1]),
                                           'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
                                           'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
                             })

        deconv_node = Node(graph, 'conv_node')

        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        exp_shape = np.array([1, 21, 35, 35])

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])

        # Check that after double infer shape and pad attrs do not changes
        Convolution.infer(deconv_node)

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
예제 #18
0
    def replace_timeheightconv(self, graph: Graph, node: Node):
        req_time_offsets = node.soft_get('time_offsets')
        offsets = node.soft_get("offsets", [[]])
        all_time_offsets = list(set(offsets[:, 0]))
        all_time_offsets.sort()
        in_name = node.soft_get('name', node.id)
        rename_node(node, in_name + '/to_delete')

        # create memoryoffsets for context gathering
        # we need concat if time offsets more than 1
        concat = Concat(graph,
                        attrs={
                            'name': in_name + '/Concat',
                            'in_ports_count': len(all_time_offsets)
                        }).create_node()
        i = 0
        for t in all_time_offsets:
            # if time offset included in required_time_offsets we don't need default value
            has_default = t not in req_time_offsets
            memoff = MemoryOffset(graph,
                                  attrs={
                                      'name':
                                      in_name + '/MemoryOffset_' + str(i),
                                      't':
                                      t,
                                      'has_default':
                                      has_default,
                                      'splitted':
                                      False,
                                      'pair_name':
                                      in_name + '/MemoryOffset_pair_' + str(i)
                                  }).create_node()
            concat.in_port(i).connect(memoff.out_port(0))
            memoff.in_port(0).connect(node.in_port(0).get_source())
            i = i + 1

        stride = node.soft_get("height_subsample", 1)

        kernel = int64_array([0, 0])
        kernel[0] = len(set(offsets[:, 0]))
        kernel[1] = len(set(offsets[:, 1]))

        pad_h = int64_array([0, 0])
        pad_h[0] = -min(offsets[:, 1]) if min(offsets[:, 1]) < 0 else 0
        pad_h[1] = stride * node.height_out - (node.height_in -
                                               max([max(offsets[:, 1]), 0]))

        dilation_t = (max(offsets[:, 0]) - min(offsets[:, 0])) / (
            kernel[0] - 1) if kernel[0] > 1 else 1
        dilation_h = (max(offsets[:, 1]) - min(offsets[:, 1])) / (
            kernel[1] - 1) if kernel[0] > 1 else 1

        conv_attrs = {
            'name':
            in_name,
            'output':
            node['out_channels'],
            'height_in':
            node.height_in,
            'bias_term':
            None,
            'pad':
            int64_array([[0, 0], [0, 0], [0, 0], pad_h]),
            'pad_spatial_shape':
            int64_array([[0, 0], pad_h]),
            'dilation':
            int64_array([1, 1, dilation_t, dilation_h]),
            'kernel':
            int64_array(
                [node.out_channels, node.in_channels, kernel[0], kernel[1]]),
            'stride':
            int64_array([1, 1, 1, stride]),
            'kernel_spatial':
            kernel,
            'input_feature_channel':
            1,
            'output_feature_channel':
            0,
            'channel_dims':
            int64_array([1]),
            'spatial_dims':
            int64_array([2, 3]),
            'batch_dims':
            int64_array([0]),
            'kernel_spatial_idx':
            int64_array([2, 3]),
            'group':
            1,
            'reshape_kernel':
            True,
            'bias_addable':
            True,
        }
        conv = Convolution(graph, attrs=conv_attrs).create_node()
        conv.in_port(0).connect(concat.out_port(0))
        conv.in_port(1).connect(node.in_port(1).get_source())

        # change layout for weights from OHWI to OIHW
        # in future should be replaced by common Permute mechanics
        weights = conv.in_port(1).get_source().node.value
        weights = weights.reshape(
            int64_array([node.out_channels, -1, node.in_channels]))
        weights = weights.transpose(int64_array([0, 2, 1]))
        weights = weights.flatten()
        conv.in_port(1).get_source().node.value = weights

        conv.in_port(2).connect(node.in_port(2).get_source())
        node.out_port(0).get_connection().set_source(conv.out_port(0))
        graph.remove_node(node.id)
예제 #19
0
    def test_conv_infer_3D_convolution(self):
        graph = build_graph(nodes_attributes,
                            [
                                ('conv_input', 'conv_node'),
                                ('conv_weights', 'conv_node'),
                                ('conv_node', 'conv_output'),
                                ('conv_output', 'op_output')
                            ],
                            {
                                'conv_output': {
                                    'shape': None
                                },
                                'conv_input': {
                                    'shape': int64_array([1, 3, 16, 224, 224])
                                },
                                'conv_weights': {
                                    'shape': int64_array([3, 64, 1, 7, 7]),
                                    'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                                },
                                'conv_node': {
                                    'type': 'Convolution',
                                    'bias_term': None,
                                    'stride': None,
                                    'dilation': None,

                                    'batch_dims': int64_array([0]),
                                    'channel_dims': int64_array([1]),

                                    'output_spatial_shape': None,

                                    'input_feature_channel': 0,
                                    'output_feature_channel': 1,

                                    'group': 1,
                                    'output_shape': None,
                                    'layout': 'NCHW'
                                }
                            })

        conv_node = Node(graph, 'conv_node')
        conv_output = Node(graph, 'conv_output')

        Convolution.infer(conv_node)

        # Check bias_term attribute
        self.assertTrue(conv_node.has_valid('bias_term'))
        self.assertTrue(not conv_node.bias_term)
        # Check kernel_spatial_idx attr detection
        self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
        self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.kernel_spatial_idx))
        # Check spatial_dims attr detection
        self.assertTrue(conv_node.has_valid('spatial_dims'))
        self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.spatial_dims))
        # Check kernel_spatial attr detection
        self.assertTrue(conv_node.has_valid('kernel_spatial'))
        self.assertTrue(np.array_equal(int64_array([1, 7, 7]), conv_node.kernel_spatial))
        # Check output attribute
        self.assertTrue(conv_node.has_valid('output'))
        self.assertEqual(64, conv_node.output)
        # Check dilation value. Should be set to default
        self.assertTrue(conv_node.has_valid('dilation'))
        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.dilation))
        # Check stride value. Should be set to default
        self.assertTrue(conv_node.has_valid('stride'))
        self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.stride))
        # Check pad value. Should be set to default
        self.assertTrue(conv_node.has_valid('pad'))
        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad))
        # Check pad_spatial_shape
        self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
        self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0]]), conv_node.pad_spatial_shape))
        # Check resulting output shape
        self.assertTrue(np.array_equal(int64_array([1, 64, 16, 218, 218]), conv_output.shape))
예제 #20
0
    def extract(cls, node):
        # Extract pads attribute
        # In case if pads is not specified it will be set in default (1) in infer function
        pads = onnx_attr(node,
                         'pads',
                         'ints',
                         default=None,
                         dst_type=lambda x: int64_array(x))
        assert pads is None or len(pads) % 2 == 0
        final_pad = None
        if pads is not None:
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)
            final_pad = int64_array([[0, 0], [0, 0], *pads])

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        dilations = onnx_attr(node,
                              'dilations',
                              'ints',
                              default=None,
                              dst_type=lambda x: int64_array(x))
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        strides = onnx_attr(node,
                            'strides',
                            'ints',
                            default=None,
                            dst_type=lambda x: int64_array(x))
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)
        group = onnx_attr(node,
                          'group',
                          'i',
                          default=1,
                          dst_type=lambda x: int64_array(x))

        attrs = {
            'op':
            __class__.op,
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,
            'pad':
            final_pad,
            'pad_spatial_shape':
            int64_array(pads) if pads is not None else None,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            None,
            'output_shape':
            None,
            'stride':
            final_strides,
            'group':
            group,
            'output':
            None,
            'kernel_spatial':
            int64_array(kernel_shape) if kernel_shape is not None else None,
            'input_feature_channel':
            1,
            'output_feature_channel':
            0,
            'kernel_spatial_idx':
            None,  # Will be calculated in infer function (np.array([2, 3]))
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW'
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
예제 #21
0
    def extract(cls, node):
        pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)

        if pads is not None:
            if len(pads) % 2 != 0:
                raise Error(
                    'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.',
                    node.soft_get('name'), pads)
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)

        final_pads = int64_array([[0, 0], [0, 0], *pads
                                  ]) if pads is not None else None

        dilations = onnx_attr(node, 'dilations', 'ints', default=None)
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        strides = onnx_attr(node, 'strides', 'ints', default=None)
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node,
                                 'kernel_shape',
                                 'ints',
                                 dst_type=int64_array)

        if kernel_shape is None:
            raise Error(
                'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.',
                node.soft_get('name'))

        output_padding = onnx_attr(node,
                                   'output_padding',
                                   'ints',
                                   default=None)
        final_output_padding = int64_array(
            [0, 0, *output_padding]) if output_padding is not None else None

        output_shape = onnx_attr(node,
                                 'output_shape',
                                 'ints',
                                 default=None,
                                 dst_type=int64_array)

        attrs = {
            'type':
            'Deconvolution',
            'op':
            'Deconv2D',
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,  # will be deduced later; not really needed
            'pad':
            final_pads,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            output_shape,
            'original_output_spatial_shape':
            output_shape,
            'output_shape':
            None,
            'output_padding':
            final_output_padding,
            'stride':
            final_strides,
            'group':
            onnx_attr(node, 'group', 'i', default=1),
            'output':
            None,
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW',
            'input_feature_channel':
            0,
            'output_feature_channel':
            1,
            'get_pad':
            ConvTransposeFrontExtractor.get_pad,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel] * node.
            group,
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled