Ejemplo n.º 1
0
    def test_deconv_infer_ideal(self):
        graph = build_graph(
            nodes_attributes,
            [('conv_input', 'conv_node'), ('conv_weights', 'conv_node'),
             ('conv_node', 'conv_output')],
            {
                'conv_output': {
                    'is_output': True,
                    'shape': None
                },
                'conv_input': {
                    'shape': np.array([1, 21, 16, 16])
                },
                'conv_weights': {
                    'shape':
                    np.array([1, 21, 4, 4]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node':
                {  #'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                    'channel_dims': np.array([1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'batch_dims': np.array([0]),
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'kernel_spatial': np.array([4, 4]),
                    'output_spatial_shape': None,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output_padding': np.array([0, 0, 1, 1]),
                    'type': 'Deconvolution',
                    'output': 21,
                    'dilation': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'stride': np.array([1, 1, 2, 2]),
                    'output_shape': None
                }
            })

        deconv_node = Node(graph, 'conv_node')

        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        exp_shape = np.array([1, 21, 35, 35])

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])

        # Check that after double infer shape and pad attrs do not changes
        Convolution.infer(deconv_node)

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
    def extract(cls, node: Node) -> bool:
        """
        Extract conv parameters from node.parameters.
        node.parameters like file descriptor object.
        :param node: Convolution node
        :return:
        """
        pb = node.parameters
        kernel = read_token_value(pb, b'<PatchDim>')
        stride = read_token_value(pb, b'<PatchStep>')
        patch_stride = read_token_value(pb, b'<PatchStride>')

        read_learning_info(pb)

        collect_until_whitespace(pb)
        weights, weights_shape = read_binary_matrix(pb)

        collect_until_whitespace(pb)
        biases = read_binary_vector(pb)

        if (patch_stride - kernel) % stride != 0:
            raise Error(
                'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' +
                refer_to_faq_msg(93))

        output = biases.shape[0]
        if weights_shape[0] != output:
            raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' +
                        refer_to_faq_msg(93))

        mapping_rule = {
            'output': output,
            'patch_stride': patch_stride,
            'bias_term': None,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'dilation': np.array([1, 1, 1, 1], dtype=np.int64),
            'kernel': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, 1, stride], dtype=np.int64),
            'kernel_spatial': np.array([1, kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': [2, 3],
            'group': 1,
            'reshape_kernel': True,
        }

        mapping_rule.update(layout_attrs())
        embed_input(mapping_rule, 1, 'weights', weights)
        embed_input(mapping_rule, 2, 'biases', biases)

        mapping_rule['bias_addable'] = len(biases) > 0

        Convolution.update_node_stat(node, mapping_rule)
        return cls.enabled
Ejemplo n.º 3
0
def add_convolution_to_swap_xy_coordinates(graph: Graph, input_node: Node,
                                           coordinates_size: int):
    """
    The function add convolution node after the node 'input_node' to swap xy coordinates of the boxes produced
    by the node 'input_node'. It is expected that box coordinates are located in the fastest changing dimension of the
    'input_node' output, i.e. the input tensor could be reshaped to [num_boxes, 4] or [num_boxes, 5]. If the size is 5,
    then the 0-th element for each of num_boxes blocks is not changed and element 1 is swapped with element 2, element 3
    is swapped with element 4. This is the case when boxes coordinates are produced by the layer "Proposal". The exact
    amount of elements in each block is equal to the 'coordinates_size' parameter.
    :param graph: graph to operate on.
    :param input_node: node producing boxes coordinates.
    :param coordinates_size: integer value equal to 4 or 5.
    :return convolution node that swaps coordinates.
    """
    # swap of input tensor with 4 or 5 numbers describing boxes are supported
    assert (coordinates_size in [4, 5])

    input_reshape_4d_node = create_op_node_with_second_input(
        graph, Reshape, int64_array([-1, 1, 1, coordinates_size]),
        dict(name=input_node.name + '/reshape_4d'), input_node)
    mark_input_as_in_correct_layout(input_reshape_4d_node, 0)
    # do not mark second input because the reshape works in initial model layout and needs to be transformed to NCHW
    mark_output_as_in_correct_layout(input_reshape_4d_node, 0)

    if coordinates_size == 5:
        # zero indexed element is not box coordinate ("batch id" in case of Proposal)
        conv_filter_data = np.array(
            np.array([[[[1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 1, 0, 0, 0],
                        [0, 0, 0, 0, 1], [0, 0, 0, 1, 0]]]],
                     dtype=np.float32))
    else:
        conv_filter_data = np.array(
            np.array(
                [[[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]]],
                dtype=np.float32))

    conv_filter_data = np.transpose(conv_filter_data, [2, 3, 0, 1])

    conv_filter_const_op = Const(graph, dict(value=conv_filter_data))
    conv_filter_const_node = conv_filter_const_op.create_node(
        [], dict(name=input_node.name + '/weights'))

    conv_op = Convolution(
        graph, {
            'bias_addable': True,
            'channel_dims': np.array([3]),
            'batch_dims': np.array([0]),
            'input_feature_channel': 0,
            'output_feature_channel': 1,
            'group': 1,
            'layout': 'NHWC',
        })
    return conv_op.create_node([input_reshape_4d_node, conv_filter_const_node],
                               dict(name=input_node.name + "/conv"))
Ejemplo n.º 4
0
    def extract(node):
        # Extract pads attribute
        # In case if pads is not specified it will be set in default (1) in infer function
        pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
        assert pads is None or len(pads) % 2 == 0
        final_pad = None
        if pads is not None:
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)
            final_pad = np.array([[0, 0], [0, 0], *pads], dtype=np.int64)

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
        final_dilations = np.array([1, 1, *dilations], dtype=np.int64) if dilations is not None else None

        # Extract dilations attribute
        # In case if dilations is not specified it will be set in default (1) in infer function
        strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: np.array(x, dtype=np.int64))
        final_strides = np.array([1, 1, *strides], dtype=np.int64) if strides is not None else None

        kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None)
        auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad)
        group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: np.array(x, dtype=np.int64))

        attrs = {
            'op': __class__.op,
            'auto_pad': auto_pad,
            'bias_addable': True,
            'bias_term': None,
            'pad': final_pad,
            'pad_spatial_shape': np.array(pads, dtype=np.int64) if pads is not None else None,
            'dilation': final_dilations,
            'output_spatial_shape': None,
            'output_shape': None,
            'stride': final_strides,
            'group': group,
            'output': None,
            'kernel_spatial': np.array(kernel_shape, dtype=np.int64) if kernel_shape is not None else None,

            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': None,  # Will be calculated in infer function (np.array([2, 3]))

            'spatial_dims': None,  # Will be calculated in infer function
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW'
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return __class__.enabled
Ejemplo n.º 5
0
    def extract(cls, node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)
        scale = attrs.int("scale", 1)
        num_filter = attrs.int("num_filter", 0)
        mode = attrs.str("sample_type", None)
        if mode == 'nearest':
            node_attrs = {
                'factor': attrs.int("scale", 1),
                'mode': mode,
                'antialias': 0,
                'axes': int64_array([2, 3]),
            }
            Interpolate.update_node_stat(node, node_attrs)
        elif mode == 'bilinear':
            """
            Bilinear UpSampling uses deconvolution algorithm under the hood.
            For MXNet Bilinear UpSampling op just wrapper over Deconvolution op.
            Inputs data:
                input1 - input data
                input2 - deconvolution weight
            """
            kernel = 2 * scale - scale % 2
            stride = scale
            pad = math.ceil((scale - 1) / 2)
            num_group = num_filter

            node_attrs = {
                'op': __class__.op,
                'type': 'Deconvolution',
                'bias_addable': True,
                'bias_term': False,
                'pad': int64_array([[0, 0], [0, 0], [pad, pad], [pad, pad]]),
                'pad_spatial_shape': int64_array([[pad, pad], [pad, pad]]),
                'dilation': None,
                'output_spatial_shape': None,
                'output_shape': None,
                'stride': int64_array([1, 1, stride, stride]),
                'group': num_group,
                'output': num_filter,
                'kernel_spatial': int64_array([kernel, kernel]),
                'input_feature_channel': 0,
                'output_feature_channel': 1,
                'kernel_spatial_idx': None,
                'reshape_kernel': True,
                'spatial_dims': None,
                'channel_dims': int64_array([1]),
                'batch_dims': int64_array([0]),
                'layout': 'NCHW',
                'get_pad': DeconvFrontExtractor.get_pad,
            }
            Convolution.update_node_stat(node, node_attrs)
        return cls.enabled
Ejemplo n.º 6
0
    def test_deconv_dynamic_infer_ideal(self):
        graph = build_graph(
            nodes_attributes,
            [('conv_input', 'conv_node'), ('conv_weights', 'conv_node'),
             ('conv_node', 'conv_output'), ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': shape_array([1, 21, dynamic_dimension_value, 16])
                },
                'conv_weights': {
                    'shape':
                    np.array([1, 21, 4, 4]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node':
                {  #'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                    'channel_dims': np.array([1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'batch_dims': np.array([0]),
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'kernel_spatial': np.array([4, 4]),
                    'output_spatial_shape': None,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output_padding': np.array([0, 0, 1, 1]),
                    'type': 'Deconvolution',
                    'output': 21,
                    'dilation': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'stride': np.array([1, 1, 2, 2]),
                    'output_shape': None
                }
            })

        deconv_node = Node(graph, 'conv_node')

        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        exp_shape = shape_array([1, 21, dynamic_dimension_value, 35])

        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))

        # Check that after double infer shape and pad attrs do not changes
        Convolution.infer(deconv_node)

        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
Ejemplo n.º 7
0
    def extract(cls, node):
        attr = get_mxnet_layer_attrs(node.symbol_dict)

        kernel = attr.tuple("kernel", int, None)
        stride = attr.tuple("stride", int, tuple(np.ones(len(kernel), dtype=np.int64)))
        padding = attr.tuple("pad", int, tuple(np.zeros(len(kernel), dtype=np.int64)))
        dilate = attr.tuple("dilate", int, tuple(np.ones(len(kernel), dtype=np.int64)))
        group = attr.int("num_group", 1)
        output = attr.int("num_filter", None)
        bias_term = not attr.bool("no_bias", True)
        target_shape = attr.tuple("target_shape", int, None)
        if target_shape:
            target_shape = np.array(target_shape, dtype=np.int64)

        final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None
        node_attrs = {
            'op': __class__.op,
            'type': 'Deconvolution',
            'bias_addable': True,
            'bias_term': bias_term,
            'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
            'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
            'dilation': final_dilations,
            'output_spatial_shape': target_shape,
            'original_output_spatial_shape': target_shape,
            'output_shape': None,
            'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
            'group': group,
            'output': output,
            'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),
            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': None,
            'reshape_kernel': True,

            'spatial_dims': None,
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW',
            'get_pad': DeconvFrontExtractor.get_pad,
        }

        output_padding = attr.tuple("adj", int, None)
        if target_shape is None and output_padding:
            node_attrs["output_padding"] = np.array([0, 0, *[s for s in output_padding]], dtype=np.int64)

        # update the attributes of the node
        Convolution.update_node_stat(node, node_attrs)
        return cls.enabled
Ejemplo n.º 8
0
    def test_caffe_conv2d_infer(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': np.array([1, 3, 227, 227])
                },
                'conv_weights': {
                    'shape':
                    np.array([64, 3, 3, 3]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output': 64,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0])
                }
            })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        exp_shape = np.array([1, 64, 225, 225])
        res_shape = graph.node['conv_output']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
Ejemplo n.º 9
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 3, 4)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: 1,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]),
                                     inv=int64_array([2, 3, 4, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Ejemplo n.º 10
0
    def test_caffe_conv2d_dynamic_input_infer(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': shape_array([1, 3, dynamic_dimension_value, 227])
                },
                'conv_weights': {
                    'shape':
                    np.array([64, 3, 3, 3]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output': 64,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0])
                }
            })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        exp_shape = shape_array([1, 64, dynamic_dimension_value, 225])
        res_shape = graph.node['conv_output']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
Ejemplo n.º 11
0
    def test_caffe_conv2d_infer_wrong_input_shape(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': np.array([1, 3, 1, 1])
                },
                'conv_weights': {
                    'shape':
                    np.array([64, 3, 3, 3]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output': 64,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0])
                }
            })

        conv_node = Node(graph, 'conv_node')
        with self.assertRaises(Error):
            Convolution.infer(conv_node)
Ejemplo n.º 12
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 2)
        attrs.update({
            'op':
            __class__.op,
            'kernel_spatial_idx':
            np.array([0, 1], dtype=np.int64),
            'get_group':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[-1] * node.kernel_shape[-2],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([2, 3, 0, 1]),
                                     inv=int64_array([2, 3, 0, 1]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Ejemplo n.º 13
0
    def test_deconv_infer_no_shape(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': None
                },
                'conv_weights': {
                    'shape':
                    np.array([1, 21, 16, 16]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'spatial_dims': np.array([2, 3]),
                    'batch_dims': np.array([0]),
                    'channel_dims': np.array([1]),
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'kernel_spatial': np.array([4, 4]),
                    'output_spatial_shape': None,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'type': 'Deconvolution',
                    'output': 21,
                    'dilation': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'stride': np.array([1, 1, 2, 2]),
                    'output_shape': None
                }
            })

        deconv_node = Node(graph, 'conv_node')
        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        self.assertIsNone(res_shape)
Ejemplo n.º 14
0
    def extract(cls, node):
        attrs = tf_create_attrs(node, 2, 3)
        attrs.update({
            'op':
            __class__.op,
            'get_group':
            lambda node: node.group
            if 'group' in node and node.group is not None else node.in_node(0).
            shape[node.channel_dims] // node.kernel_shape[
                node.input_feature_channel],
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel],
            'get_weights_permute':
            PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]),
                                     inv=int64_array([2, 3, 1, 0]))
        })

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Ejemplo n.º 15
0
    def extract(node):
        attr = get_mxnet_layer_attrs(node.symbol_dict)

        kernel = attr.tuple("kernel", int, None)
        stride = attr.tuple("stride", int, tuple(np.ones(len(kernel), dtype=np.int64)))
        padding = attr.tuple("pad", int, tuple(np.zeros(len(kernel), dtype=np.int64)))
        dilate = attr.tuple("dilate", int, tuple(np.ones(len(kernel), dtype=np.int64)))
        group = attr.int("num_group", 1)
        output = attr.int("num_filter", None)
        bias_term = attr.str("no_bias", 'False') == 'False'

        final_dilations = np.array([1, 1, *[d for d in dilate]], dtype=np.int64) if dilate is not None else None

        node_attrs = {
            'op': __class__.op,
            'bias_addable': True,
            'bias_term': bias_term,
            'pad': np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]], dtype=np.int64),
            'pad_spatial_shape': np.array([[pad, pad] for pad in padding], dtype=np.int64),
            'dilation': final_dilations,
            'output_spatial_shape': None,
            'output_shape': None,
            'stride': np.array([1, 1, *[s for s in stride]], dtype=np.int64),
            'group': group,
            'output': output,
            'kernel_spatial': np.array([k for k in kernel], dtype=np.int64),

            'input_feature_channel': 1,
            'output_feature_channel': 0,
            'kernel_spatial_idx': None,
            'reshape_kernel': True,

            'spatial_dims': None,
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW',
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, node_attrs)
        return __class__.enabled
Ejemplo n.º 16
0
    def test_caffe_conv2d_infer_no_shape(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': None
                },
                'conv_weights': {
                    'shape':
                    None,
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'output': 64,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0])
                }
            })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        res_shape = graph.node['conv_output']['shape']
        self.assertIsNone(res_shape)
Ejemplo n.º 17
0
    def replace_op(self, graph: nx.MultiDiGraph, node: Node):
        input_node = node.in_node(0)
        port = graph.get_edge_data(input_node.id, node.id)[0]['out']
        input_reshape_node = Reshape(
            graph, {
                'name': '/Reshape/' + node.name,
                'axis': 1,
                'infer': Reshape.kaldi_infer
            }).create_node([(input_node, port)])

        convolution_node = Convolution(graph, node.attrs()).create_node(
            [input_reshape_node])

        output_reshape_node = Reshape(
            graph, {
                'name': node.name + '/Reshape/',
                'axis': 1,
                'infer': Reshape.kaldi_infer
            }).create_node([convolution_node])

        return [output_reshape_node.id]
Ejemplo n.º 18
0
 def backend_attrs(self):
     # the same attributes as in a regular convolution and one additional attribute 'deformable_group' and 'group'
     return Convolution(self.graph, {}).backend_attrs() + ['deformable_group', 'group']
Ejemplo n.º 19
0
    def extract(cls, node):
        pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array)
        auto_pad = onnx_attr(node,
                             'auto_pad',
                             's',
                             default=None,
                             dst_type=get_onnx_autopad)

        if pads is not None:
            if len(pads) % 2 != 0:
                raise Error(
                    'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.',
                    node.soft_get('name'), pads)
            pads = pads.reshape([2, -1])
            pads = np.transpose(pads)

        final_pads = int64_array([[0, 0], [0, 0], *pads
                                  ]) if pads is not None else None

        dilations = onnx_attr(node, 'dilations', 'ints', default=None)
        final_dilations = int64_array([1, 1, *dilations
                                       ]) if dilations is not None else None

        strides = onnx_attr(node, 'strides', 'ints', default=None)
        final_strides = int64_array([1, 1, *strides
                                     ]) if strides is not None else None

        kernel_shape = onnx_attr(node,
                                 'kernel_shape',
                                 'ints',
                                 dst_type=int64_array)

        if kernel_shape is None:
            raise Error(
                'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.',
                node.soft_get('name'))

        output_padding = onnx_attr(node,
                                   'output_padding',
                                   'ints',
                                   default=None)
        final_output_padding = int64_array(
            [0, 0, *output_padding]) if output_padding is not None else None

        output_shape = onnx_attr(node,
                                 'output_shape',
                                 'ints',
                                 default=None,
                                 dst_type=int64_array)

        attrs = {
            'type':
            'Deconvolution',
            'op':
            'Deconv2D',
            'auto_pad':
            auto_pad,
            'bias_addable':
            True,
            'bias_term':
            None,  # will be deduced later; not really needed
            'pad':
            final_pads,
            'dilation':
            final_dilations,
            'output_spatial_shape':
            output_shape,
            'output_shape':
            None,
            'output_padding':
            final_output_padding,
            'stride':
            final_strides,
            'group':
            onnx_attr(node, 'group', 'i', default=1),
            'output':
            None,
            'spatial_dims':
            None,  # Will be calculated in infer function
            'channel_dims':
            int64_array([1]),
            'batch_dims':
            int64_array([0]),
            'layout':
            'NCHW',
            'input_feature_channel':
            0,
            'output_feature_channel':
            1,
            'get_pad':
            ConvTransposeFrontExtractor.get_pad,
            'get_output_feature_dim':
            lambda node: node.kernel_shape[node.output_feature_channel] * node.
            group,
        }

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return cls.enabled
Ejemplo n.º 20
0
    def extract(node):

        int64array = lambda x: np.array(x, dtype=np.int64)

        pads = onnx_attr(node, 'pads', 'ints', dst_type=int64array)
        auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad)

        if pads is None:
            pads = np.array([0, 0, 0, 0], dtype=np.int64)

        if len(pads) % 2 != 0:
            raise Error(
                'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.',
                node.soft_get('name'),
                pads
            )

        pads = pads.reshape([2, -1])
        pads = np.transpose(pads)
        dilations = int64array(onnx_attr(node, 'dilations', 'ints', default=[1, 1]))
        strides = int64array(onnx_attr(node, 'strides', 'ints', default=[1, 1]))
        kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', dst_type=int64array)

        if kernel_shape is None:
            raise Error(
                'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.',
                node.soft_get('name')
            )

        output_padding = onnx_attr(node, 'output_padding', 'ints', default=[0, 0])

        output_shape = onnx_attr(node, 'output_shape', 'ints', default=None, dst_type=int64array)

        attrs = {
            'type': 'Deconvolution',
            'op': 'Deconv2D',
            'auto_pad': auto_pad,
            'bias_addable': True,
            'bias_term': None,  # will be deduced later; not really needed
            'pad': int64array([[0, 0], [0, 0], pads[0], pads[1]]),
            'pad_spatial_shape': int64array([pads[0], pads[1]]),
            'dilation': int64array([1, 1, dilations[0], dilations[1]]),
            'output_spatial_shape': output_shape,
            'output_shape': None,
            'output_padding': int64array([0, 0, output_padding[0], output_padding[1]]),
            'stride': int64array([1, 1, strides[0], strides[1]]),
            'group': onnx_attr(node, 'group', 'i', default=1),
            'output': None,
            'spatial_dims': int64array([2, 3]),
            'channel_dims': int64array([1]),
            'batch_dims': int64array([0]),
            'kernel_spatial': int64array([kernel_shape[0], kernel_shape[1]]),  # TODO WARNING Don't misuse X/Y

            'input_feature_channel': 0,
            'output_feature_channel': 1,
            'kernel_spatial_idx': np.array([2, 3]),
            'get_pad': ConvTransposeFrontExtractor.get_pad
        }
        attrs.update(layout_attrs())

        # update the attributes of the node
        Convolution.update_node_stat(node, attrs)
        return __class__.enabled
Ejemplo n.º 21
0
    def test_conv_infer_3D_convolution(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': int64_array([1, 3, 16, 224, 224])
                },
                'conv_weights': {
                    'shape':
                    int64_array([3, 64, 1, 7, 7]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'type': 'Convolution',
                    'bias_term': None,
                    'stride': None,
                    'dilation': None,
                    'batch_dims': int64_array([0]),
                    'channel_dims': int64_array([1]),
                    'output_spatial_shape': None,
                    'input_feature_channel': 0,
                    'output_feature_channel': 1,
                    'group': 1,
                    'output_shape': None,
                    'layout': 'NCHW'
                }
            })

        conv_node = Node(graph, 'conv_node')
        conv_output = Node(graph, 'conv_output')

        Convolution.infer(conv_node)

        # Check bias_term attribute
        self.assertTrue(conv_node.has_valid('bias_term'))
        self.assertTrue(not conv_node.bias_term)
        # Check kernel_spatial_idx attr detection
        self.assertTrue(conv_node.has_valid('kernel_spatial_idx'))
        self.assertTrue(
            np.array_equal(int64_array([2, 3, 4]),
                           conv_node.kernel_spatial_idx))
        # Check spatial_dims attr detection
        self.assertTrue(conv_node.has_valid('spatial_dims'))
        self.assertTrue(
            np.array_equal(int64_array([2, 3, 4]), conv_node.spatial_dims))
        # Check kernel_spatial attr detection
        self.assertTrue(conv_node.has_valid('kernel_spatial'))
        self.assertTrue(
            np.array_equal(int64_array([1, 7, 7]), conv_node.kernel_spatial))
        # Check output attribute
        self.assertTrue(conv_node.has_valid('output'))
        self.assertEqual(64, conv_node.output)
        # Check dilation value. Should be set to default
        self.assertTrue(conv_node.has_valid('dilation'))
        self.assertTrue(
            np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.dilation))
        # Check stride value. Should be set to default
        self.assertTrue(conv_node.has_valid('stride'))
        self.assertTrue(
            np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.stride))
        # Check pad value. Should be set to default
        self.assertTrue(conv_node.has_valid('pad'))
        self.assertTrue(
            np.array_equal(
                int64_array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]),
                conv_node.pad))
        # Check pad_spatial_shape
        self.assertTrue(conv_node.has_valid('pad_spatial_shape'))
        self.assertTrue(
            np.array_equal(int64_array([[0, 0], [0, 0], [0, 0]]),
                           conv_node.pad_spatial_shape))
        # Check resulting output shape
        self.assertTrue(
            np.array_equal(int64_array([1, 64, 16, 218, 218]),
                           conv_output.shape))
Ejemplo n.º 22
0
 def backend_attrs(self):
     # the same attributes as in a regular convolution and additional attributes 'deformable_group', 'group'
     # and 'bilinear_interpolation_pad'
     return Convolution(self.graph, {}).backend_attrs() + [
         'deformable_group', 'group', 'bilinear_interpolation_pad'
     ]
Ejemplo n.º 23
0
    def replace_pattern(self, graph: Graph, match: dict):
        """
        Converts specific for NasNet topology subgraph Pad->StridedSlice->AvgPool to Conv->Crop->AvgPool
        """
        input = match['input']

        pad_node = match['pad_op']
        pad_node_name = pad_node.soft_get('name', pad_node.id)

        sslice_node = match['sslice']
        begin = []
        end = []
        stride = []
        for s in sslice_node.slices:
            begin.append(s.start)
            end.append(s.stop)
            stride.append(s.step)

        pads_begin = pad_node.in_port(1).data.get_value()
        pads_end = pad_node.in_port(2).data.get_value()
        if pads_begin is None or pads_end is None:
            log.error('Pad values for node "{}" are not constants'.format(
                pad_node_name))
            return

        if not np.array_equal(pads_begin, int64_array([0, 0, 0, 0])):
            log.error('Pad begin values doesn\'t match for node {}!'.format(
                pad_node_name))
            return

        if not np.array_equal(pads_end, int64_array([0, 1, 1, 0])):
            log.error('Pad end values doesn\'t match for node {}!'.format(
                pad_node_name))
            return

        if not np.array_equal(begin, int64_array([0, 1, 1, 0])):
            log.error("StridedSlice has wrong begin")
            return

        if not np.array_equal(sslice_node.end_mask, int64_array(
            [0, 0, 0, 0])) or not np.array_equal(sslice_node.begin_mask,
                                                 int64_array([0, 1, 1, 0])):
            log.error("StridedSlice has wrong masks")
            return

        # Pad -> Conv
        conv_name = graph.unique_id(pad_node.name + '/Conv_')
        conv_weights_name = graph.unique_id(pad_node.name + '/ConvW_')
        conv_weights = np.ones((input.shape[3], 1, 1, 1))
        output_shape = int64_array([
            input.shape[0], input.shape[1] + 1, input.shape[2] + 1,
            input.shape[3]
        ])

        conv_node = Convolution(
            graph,
            dict(
                name=conv_name,
                stride=int64_array([1, 1, 1, 1]),
                dilation=int64_array([1, 1, 1, 1]),
                group=input.shape[3],
                bias_addable=True,
                bias_term=False,
                spatial_dims=int64_array([1, 2]),
                kernel_spatial=int64_array([1, 1]),
                pad=int64_array([[0, 0], [0, 1], [0, 1], [0, 0]]),
                output_shape=output_shape,
                batch_dims=int64_array([0]),
                channel_dims=int64_array([3]),
                output=input.shape[3],
                input_feature_channel=1,
                output_feature_channel=0,
            )).create_node()

        weights_const_node = Const(
            graph,
            dict(name=conv_weights_name,
                 value=conv_weights,
                 shape=int64_array(conv_weights.shape))).create_node()

        # StridedSlice -> Crop
        crop_node = Crop(
            graph,
            dict(name=sslice_node.name + '/Crop_',
                 axis=int64_array([1, 2]),
                 dim=int64_array([output_shape[1] - 1, output_shape[2] - 1]),
                 offset=int64_array([1, 1]))).create_node()

        # Connect nodes
        pad_node.in_port(0).get_connection().set_destination(
            conv_node.in_port(0))
        weights_const_node.out_port(0).connect(conv_node.in_port(1))
        conv_node.out_port(0).connect(crop_node.in_port(0))
        sslice_node.out_port(0).get_connection().set_source(
            crop_node.out_port(0))

        conv_node.in_port(1).bin = 'weights'

        # Remove Pad and StridedSlice nodes from graph
        graph.remove_node(pad_node.id)
        graph.remove_node(sslice_node.id)
    def replace_timeheightconv(self, graph: Graph, node: Node):
        req_time_offsets = node.soft_get('time_offsets')
        offsets = node.soft_get("offsets", [[]])
        all_time_offsets = list(set(offsets[:, 0]))
        all_time_offsets.sort()
        in_name = node.soft_get('name', node.id)
        rename_node(node, in_name + '/to_delete')

        # create memoryoffsets for context gathering
        # we need concat if time offsets more than 1
        concat = Concat(graph,
                        attrs={
                            'name': in_name + '/Concat',
                            'in_ports_count': len(all_time_offsets)
                        }).create_node()
        i = 0
        for t in all_time_offsets:
            # if time offset included in required_time_offsets we don't need default value
            has_default = t not in req_time_offsets
            memoff = MemoryOffset(graph,
                                  attrs={
                                      'name':
                                      in_name + '/MemoryOffset_' + str(i),
                                      't':
                                      t,
                                      'has_default':
                                      has_default,
                                      'splitted':
                                      False,
                                      'pair_name':
                                      in_name + '/MemoryOffset_pair_' + str(i)
                                  }).create_node()
            concat.in_port(i).connect(memoff.out_port(0))
            memoff.in_port(0).connect(node.in_port(0).get_source())
            i = i + 1

        stride = node.soft_get("height_subsample", 1)

        kernel = int64_array([0, 0])
        kernel[0] = len(set(offsets[:, 0]))
        kernel[1] = len(set(offsets[:, 1]))

        pad_h = int64_array([0, 0])
        pad_h[0] = -min(offsets[:, 1]) if min(offsets[:, 1]) < 0 else 0
        pad_h[1] = stride * node.height_out - (node.height_in -
                                               max([max(offsets[:, 1]), 0]))

        dilation_t = (max(offsets[:, 0]) - min(offsets[:, 0])) / (
            kernel[0] - 1) if kernel[0] > 1 else 1
        dilation_h = (max(offsets[:, 1]) - min(offsets[:, 1])) / (
            kernel[1] - 1) if kernel[0] > 1 else 1

        conv_attrs = {
            'name':
            in_name,
            'output':
            node['out_channels'],
            'height_in':
            node.height_in,
            'bias_term':
            None,
            'pad':
            int64_array([[0, 0], [0, 0], [0, 0], pad_h]),
            'pad_spatial_shape':
            int64_array([[0, 0], pad_h]),
            'dilation':
            int64_array([1, 1, dilation_t, dilation_h]),
            'kernel':
            int64_array(
                [node.out_channels, node.in_channels, kernel[0], kernel[1]]),
            'stride':
            int64_array([1, 1, 1, stride]),
            'kernel_spatial':
            kernel,
            'input_feature_channel':
            1,
            'output_feature_channel':
            0,
            'channel_dims':
            int64_array([1]),
            'spatial_dims':
            int64_array([2, 3]),
            'batch_dims':
            int64_array([0]),
            'kernel_spatial_idx':
            int64_array([2, 3]),
            'group':
            1,
            'reshape_kernel':
            True,
            'bias_addable':
            True,
        }
        conv = Convolution(graph, attrs=conv_attrs).create_node()
        conv.in_port(0).connect(concat.out_port(0))
        conv.in_port(1).connect(node.in_port(1).get_source())

        # change layout for weights from OHWI to OIHW
        # in future should be replaced by common Permute mechanics
        weights = conv.in_port(1).get_source().node.value
        weights = weights.reshape(
            int64_array([node.out_channels, -1, node.in_channels]))
        weights = weights.transpose(int64_array([0, 2, 1]))
        weights = weights.flatten()
        conv.in_port(1).get_source().node.value = weights

        conv.in_port(2).connect(node.in_port(2).get_source())
        node.out_port(0).get_connection().set_source(conv.out_port(0))
        graph.remove_node(node.id)