コード例 #1
0
    def reverse_infer(node: Node):
        out_shape = node.out_port(0).data.get_shape()
        data_shape = node.in_port(0).data.get_shape()
        indices_shape = node.in_port(1).data.get_shape()
        batch_dims = node.batch_dims
        batch_dims = batch_dims + len(
            indices_shape) if batch_dims < 0 else batch_dims

        axis = node.in_port(2).data.get_value()
        # axis of Gather could be accepted as both scalar and 1D tensor
        if isinstance(axis, np.ndarray):
            axis = axis.item()
        assert axis is not None, 'axis input is undefined'

        # we can deduce data or indices partial shapes from output shape calculation formula
        # out_shape = Concat(data_shape[:axis], indices_shape[batch_dims:batch_dims + indices_rank], data_shape[axis + 1:])

        # data partial shape is unknown
        if out_shape is not None and data_shape is None and indices_shape is not None:
            out_rank = len(out_shape)
            indices_rank = len(indices_shape)

            deduced_data_shape = out_shape.tolist(dynamic_dimension_value)
            for i in range(indices_rank):
                deduced_data_shape.pop(axis)
            deduced_data_shape.insert(axis, dynamic_dimension_value)
            node.in_port(0).data.set_shape(shape_array(deduced_data_shape))

        # indices partial shape is unknown
        if out_shape is not None and indices_shape is None and data_shape is not None:
            out_rank = len(out_shape)
            data_rank = len(data_shape)
            indices_rank = out_rank + 1 - data_rank + batch_dims

            indices_shape = out_shape[axis:axis + indices_rank]
            node.in_port(1).data.set_shape(indices_shape)
コード例 #2
0
    def array_infer(node: Node):
        assert len(node.in_nodes()) == 4

        handle = node.in_node(0)
        index = node.in_node(1)
        value = node.in_node(2)
        flow_in = node.in_node(3)

        value_shape = value.shape

        ta_node = Node(node.graph, str(handle.value))
        if ta_node.has_valid('element_shape') and len(
                ta_node.element_shape) > 0:
            assert match_shapes(ta_node['element_shape'], value.shape), \
                'Shapes are not compatible: {} and {}'.format(ta_node['element_shape'], value.shape)
        ta_node['element_shape'] = value_shape

        output_value = flow_in.value

        for _, out_node in node.graph.out_edges(node.id):
            node.graph.node[out_node]['shape'] = shape_array(flow_in.shape)
            node.graph.node[out_node][
                'value'] = None if output_value is None else output_value.copy(
                )
コード例 #3
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)
        connected_input_ports = [
            in_port.idx for in_port in node.in_ports().values()
            if not in_port.disconnected()
        ]
        assert len(connected_input_ports) == 3 and [0, 1, 2] == sorted(connected_input_ports), \
            'Range operation should have 3 inputs, {} found for {}'.format(len(connected_input_ports), name)

        start = node.in_port(0).data.get_value()
        limit = node.in_port(1).data.get_value()
        delta = node.in_port(2).data.get_value()

        for input in (start, limit, delta):
            if input is not None and not node.has_valid('output_type'):
                node['output_type'] = input.dtype

        if not is_fully_defined(start) or not is_fully_defined(
                limit) or not is_fully_defined(delta):
            node.out_port(0).data.set_shape(
                shape_array([dynamic_dimension_value]))
        else:
            node.out_port(0).data.set_value(
                np.arange(start, limit, delta, dtype=node['output_type']))
コード例 #4
0
    def array_infer(node: Node):
        assert len(node.in_nodes()) == 3

        handle = node.in_node(0)

        ta_node = Node(node.graph, str(handle.value))

        if ta_node.has_valid(
                'element_shape') and ta_node.element_shape is not None and len(
                    ta_node.element_shape) > 0:
            assert symm_match_shapes(ta_node['element_shape'],
                                     node.element_shape)
        else:
            ta_node['element_shape'] = node.element_shape
        data_shape = ta_node['element_shape']

        assert ta_node.has_valid('size')
        size = ta_node['size']

        output_shape = [size] + [data_shape[i] for i in range(len(data_shape))]

        for _, out_node in node.graph.out_edges(node.id):
            node.graph.node[out_node]['shape'] = shape_array(output_shape)
            node.graph.node[out_node]['value'] = None
コード例 #5
0
    def extend(op: Node):
        assert op.has_valid(
            'element_type'
        ), 'Parameter node {} has missed element_type attr!'.format(op.name)
        op['data_type'] = destination_type_to_np_data_type(op.element_type)
        if op.shape == '':
            op.shape = int64_array([])
        else:
            Extender.attr_to_list(op, 'shape')
            shape = op.shape.copy()
            has_shapes_with_boundaries = False
            for i, dim in enumerate(op.shape):
                if dim == -1 or (isinstance(dim, str) and ".." in dim):
                    shape[i] = -1
                    if ".." in dim:
                        has_shapes_with_boundaries = True
            shape = shape_array(
                [d if d != -1 else dynamic_dimension_value for d in shape])

            if has_shapes_with_boundaries:
                shape_list = []
                for i, dim in enumerate(op.shape):
                    if not isinstance(dim, str):
                        shape_list.append(dim)
                    else:
                        shape_list.append(parse_dimension(dim))

                # This value is used only for serialization of partial shapes with boundaries
                # for Parameter node.
                # 'user_shape' is not used in shape inference, as propagation of partial shapes with boundaries
                # is not implemented in MO.
                op['user_shape'] = tuple(shape_list)

            # If 'user_shape' is not set, 'shape' attribute is used for serialization.
            # 'shape' is also used for shape inference.
            op.shape = shape
コード例 #6
0
ファイル: upsample.py プロジェクト: mikhailk62/openvino
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(layout) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has('height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(shape_for_layout(layout,
                                                             batch=input_shape[get_batch_dim(layout, 4)],
                                                             features=input_shape[get_features_dim(layout, 4)],
                                                             height=out_height,
                                                             width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array([dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int((input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
コード例 #7
0
    def test_eltwise_infer(self, value1, shape1, value2, shape2, shape_infer,
                           exp_value, exp_shape):
        graph = build_graph(
            nodes_attributes, [('node_1', 'eltw_1'), ('node_2', 'eltw_1'),
                               ('eltw_1', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None
                },
                'node_1': {
                    'shape':
                    shape_array(value1).shape
                    if value1 is not None else shape_array(shape1),
                    'value':
                    value1
                },
                'node_2': {
                    'shape':
                    shape_array(value2).shape
                    if value2 is not None else shape_array(shape2),
                    'value':
                    value2
                }
            })

        graph.graph['layout'] = 'NCHW'

        eltwise_node = Node(graph, 'eltw_1')

        eltwise_infer(eltwise_node, shape_infer)
        res_shape = graph.node['node_3']['shape']
        res_value = eltwise_node.out_node().value
        if exp_value is not None:
            self.assertTrue(
                strict_compare_tensors(res_value, shape_array(exp_value)))
        self.assertTrue(
            strict_compare_tensors(res_shape, shape_array(exp_shape)))
コード例 #8
0
ファイル: roialign.py プロジェクト: yury-intel/openvino
 def reverse_infer(node):
     set_input_shapes(node,
                      undefined_shape_of_rank(4),
                      shape_array([dynamic_dimension_value, 4]),
                      undefined_shape_of_rank(1))
コード例 #9
0
    def infer(node: Node):
        """
        Infers shape of convolution node as it is done in ONNX.
        It is very similar to one that Caffe does, but slightly different.
        We made a complete fork of this function because they are supposed to be
        supported differently by different people.
        Args:
            node: graph convolution node
        """
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            raise Error('Input data shape is None for node {}'.format(
                node.soft_get('name', node.id)))

        # bias_term cannot be deduced earlier for frameworks that represent
        # convolution weights/biases as regular inputs; so the number of inputs
        # is being checked here and restore correct value for bias_term to
        # have the rest of the code unchanged. It will be used after we merge
        # several infer functions for convolution in different FWs to a single one.
        if not node.has_valid('bias_term'):
            node['bias_term'] = len(node.in_nodes()) == 3

        weights_index = node.weights_index if node.has_valid(
            'weights_index') else 1
        # Reshape weights kernel to original shape
        # In case of caffe or MXNet framework, values for weights have no structured shape like OIHW
        # so we have to reshape weights to normal shape
        # For this case, Convolution node should have attribute reshape_kernel = True
        if node.has_valid('reshape_kernel') and node.reshape_kernel:
            if not (node.has_valid('output') and node.has_valid('channel_dims')
                    and node.has_valid('group')
                    and node.has_valid('kernel_spatial')):
                log.error(
                    'Cannot reshape kernel due to not all required attrs was set to {} node'
                    .format(node.id))
                return
            # layout for Convolution weights is OIHW
            kernel_shape = shape_array([
                node.output,
                input_shape[node.channel_dims].item() / node.group, *[
                    node.kernel_spatial[i]
                    for i in range(len(node.kernel_spatial))
                ]
            ])
            if node.type == 'Deconvolution':  # layout for Deconvolution weights is IOHW
                kernel_shape[[0, 1]] = kernel_shape[[1, 0]]

            if is_fully_defined(
                    kernel_shape) and np.prod(kernel_shape) != np.prod(
                        node.in_node(weights_index).value.shape):
                log.error(
                    "Size of weights {} does not match kernel shape: {}\n"
                    "".format(np.prod(node.in_node(weights_index).value.shape),
                              kernel_shape) +
                    "    Possible reason is wrong channel number in input shape\n"
                )
                raise Error("Cannot reshape weights to kernel shape")

            if not is_fully_defined(kernel_shape):
                num_undefined = np.count_nonzero(kernel_shape.mask is True)  # pylint: disable=no-member
                if num_undefined > 1:
                    raise Error(
                        'Too many undefined dimensions of the kernel shape for node {}. Use --input_shape '
                        'command line parameter to specify model input shapes'.
                        format(node.soft_get('name', node.id)))
                kernel_size = np.prod(node.in_node(weights_index).value.shape)
                # calculate undefined dimension using fully defined shape of the weights input and known kernel_shape
                # dimensions
                kernel_shape[np.where(kernel_shape == np.ma.masked)[0]
                             [0]] = kernel_size // np.prod(kernel_shape)

            node.in_node(weights_index).shape = shape_array(kernel_shape)
            node.in_node(weights_index).value = np.reshape(
                node.in_node(weights_index).value, kernel_shape)
            node.reshape_kernel = False

        # Pass weights shape to node attribute kernel_shape
        kernel_shape = node.in_node(weights_index).shape
        node['kernel_shape'] = kernel_shape
        # Calculate kernel_spatial_idx and spatial_dims if it is not specified
        # It is necessary for ONNX dut to convolution can be 1D/2D/3D
        if not node.has_valid('kernel_spatial_idx'):
            node['kernel_spatial_idx'] = np.delete(
                [x for x in range(len(kernel_shape))],
                (node.input_feature_channel, node.output_feature_channel))

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                (node.channel_dims[0], node.batch_dims[0]))

        node['kernel_spatial'] = kernel_shape[node.kernel_spatial_idx]

        if not node.has_valid('output'):
            # restore the number of output feature maps from the second argument that is weights
            if node.type in [
                    'Convolution', 'Deconvolution', 'DeformableConvolution',
                    'BinaryConvolution'
            ]:
                node['output'] = kernel_shape[node.output_feature_channel]
            else:
                raise Error(
                    'Convolution infer function was called for a node {} with unsupported type {}',
                    node.soft_get('name'), node.type)

        # Set default values for dilation, strides and pads if not set
        if not node.has_valid('dilation'):
            node['dilation'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('stride'):
            node['stride'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]] * len(input_shape))
        node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('output_padding'):
            node['output_padding'] = np.full([len(input_shape)],
                                             0,
                                             dtype=np.int64)

        if node.has_valid('output_padding') and len(input_shape) > len(
                node['output_padding']):
            output_padding = np.zeros(len(input_shape), dtype=np.int64)
            for i in range(len(node['output_padding'])):
                output_padding[i] = node['output_padding'][i]
            node['output_padding'] = output_padding

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.stride[node.spatial_dims]

        kernel_extent = node.dilation[node.spatial_dims] * (
            node.kernel_spatial - 1) + 1
        # TensorFlow always has auto_pad attribute that can be either valid or same_upper
        # In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower)
        # Caffe do not use auto_pad attribute
        if node.has_valid(
                'auto_pad'
        ) and node.auto_pad != 'explicit' and not node.has_valid(
                'output_spatial_shape'):
            node['pad_spatial_shape'], node[
                'output_spatial_shape'] = tf_window_op_pad_infer(
                    input_spatial_shape, kernel_extent, stride_spatial_shape,
                    node.auto_pad, node.type == 'Deconvolution')

            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:
            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
            if node.type in ('Convolution', 'BinaryConvolution'):
                float_spatial = Convolution.calc_convolution(
                    input_spatial_shape, stride_spatial_shape,
                    pad_spatial_shape, kernel_extent)
                node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'Deconvolution':
                # In case of given output_spatial_shape we calculate pads spatial
                if node.has_valid('output_spatial_shape'):
                    if node.has_valid('get_pad'):
                        node['pad'] = node.get_pad(node, input_shape,
                                                   kernel_shape)
                    else:
                        log.debug(
                            'Can\'t calculate paddings due to missing lambda get_pad in {} node'
                            .format(node.id))
                        return
                else:
                    output_padding = node.output_padding[
                        node.spatial_dims] if node.has_valid(
                            'output_padding') else None
                    if output_padding is not None and any(output_padding):
                        pad_spatial_shape -= output_padding
                        for dim in range(len(pad_spatial_shape)):
                            node.pad_spatial_shape[dim][
                                1] -= pad_spatial_shape[dim]

                    float_spatial = Convolution.calc_deconvolution(
                        node, input_spatial_shape, pad_spatial_shape,
                        kernel_extent)
                    node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'DeformableConvolution':
                # get the output spatial shape from the second input with offsets
                node['output_spatial_shape'] = int64_array(
                    [node.in_node(1).shape[2:4]])
            else:
                assert 'Unsupported layer type "{}"'.format(node.type)

        # For cases when group attribute wasn't set in extractor we should specify get_group attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_group'):
            node['group'] = node.get_group(node)
        output_shape = shape_array(
            [dynamic_dimension_value for _ in range(len(input_shape))])
        output_shape[node.batch_dims] = input_shape[node.batch_dims]  # pylint: disable=unsupported-assignment-operation
        output_shape[node.spatial_dims] = node.output_spatial_shape  # pylint: disable=unsupported-assignment-operation

        # For cases when output attribute wasn't set in extractor we should specify get_output_feature_dim attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_output_feature_dim'):
            node['output'] = node.get_output_feature_dim(node)
        output_shape[node.channel_dims] = node.output  # pylint: disable=unsupported-assignment-operation
        node['output_shape'] = output_shape

        node.out_port(0).data.set_shape(output_shape)

        # bin attribute is used for pre-processing, but it will be deleted in BlobNormalizer transformation
        # and the blobs (weights, biases) will be represented as inputs to the node
        mark_input_bins(
            node, start_port=1 if node.type != 'DeformableConvolution' else 2)
        assign_dims_to_weights(node.in_node(weights_index),
                               node.kernel_spatial_idx,
                               node.input_feature_channel,
                               node.output_feature_channel, len(kernel_shape))

        PermuteAttrs.create_permute_attrs(
            node,
            attrs=[
                ('pad', 'input:0'),
                ('stride', 'input:0'),
                ('dilation', 'input:0'),
                ('output_shape', 'input:0'),
                ('batch_dims', 'input:0'),
                ('channel_dims', 'input:0'),
                ('spatial_dims', 'input:0'),
                ('kernel_shape', 'input:{}'.format(weights_index)),
                ('kernel_spatial_idx', 'input:{}'.format(weights_index)),
                ('input_feature_channel', 'input:{}'.format(weights_index)),
                ('output_feature_channel', 'input:{}'.format(weights_index)),
            ])

        # is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into IE [C_OUT, C_IN, H, W]
        # but for other nodes in weights subgraph permutations must turned off
        # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
        PermuteAttrs.set_permutation(
            node.in_node(weights_index), node,
            node.soft_get('get_weights_permute', None))
        PermuteInputs().set_input_permutation(node.in_node(weights_index),
                                              node,
                                              'input:{}'.format(weights_index),
                                              'transpose')
コード例 #10
0
 def test_shape_array(self, data, ref, result):
     self.assertEqual(strict_compare_tensors(shape_array(data), ref),
                      result)
コード例 #11
0
class TestIf(unittest.TestCase):
    @generate(*[
        (np.array([True], dtype=np.bool), shape_array([3]), shape_array([3])),
        (np.array([False], dtype=np.bool), shape_array([3]), shape_array([2])),
        (shape_array(dynamic_dimension_value), shape_array([3]),
         shape_array([dynamic_dimension_value])),
    ])
    def test_simple_shape_inf(self, cond, output_port_0_shape,
                              output_port_1_shape):
        then_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'param_2', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 2,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'add', {
                    'type': 'Add',
                    'kind': 'op',
                    'op': 'Add',
                    'infer': lambda node: eltwise_infer(node, Add.operation)
                }),
            **regular_op_with_empty_data(
                'mul', {
                    'type': 'Mul',
                    'kind': 'op',
                    'op': 'Mul',
                    'infer': lambda node: eltwise_infer(node, Mul.operation)
                }),
            **regular_op_with_empty_data(
                'res1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                }),
            **regular_op_with_empty_data(
                'res2', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 1
                })
        }
        then_graph_edges = [
            *connect('param_1', '0:add'),
            *connect('param_2', '1:add'),
            *connect('param_1', '1:mul'),
            *connect('param_2', '0:mul'),
            *connect('add', 'res1'),
            *connect('mul', 'res2'),
        ]

        else_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'param_2', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 3,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data('identity', {
                'kind': 'op',
                'op': 'Identity',
                'infer': Identity.infer
            }),
            **regular_op_with_empty_data('identity_1', {
                'kind': 'op',
                'op': 'Identity',
                'infer': Identity.infer
            }),
            **regular_op_with_empty_data(
                'res1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                }),
            **regular_op_with_empty_data(
                'res2', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 1
                })
        }
        else_graph_edges = [
            *connect('param_1', 'identity'),
            *connect('param_2', 'identity_1'),
            *connect('identity_1', 'res2'),
            *connect('identity', 'res1'),
        ]
        then_graph = build_graph_with_edge_attrs(then_graph_nodes,
                                                 then_graph_edges)
        else_graph = build_graph_with_edge_attrs(else_graph_nodes,
                                                 else_graph_edges)
        external_graph_nodes = {
            **valued_const_with_data('cond', cond),
            **valued_const_with_data('input_2', int64_array([3, 2, 1])),
            **valued_const_with_data('input_1', int64_array([1, 2, 3])),
            **valued_const_with_data('input_3', int64_array([8, 4])),
            **regular_op(
                'if', {
                    'kind': 'op',
                    'op': 'If',
                    'then_graph': then_graph,
                    'else_graph': else_graph,
                    'infer': If.infer
                }),
            **empty_data('if_d_1'),
            **empty_data('if_d_2'),
            **result('res_1'),
            **result('res_2')
        }
        external_graph_edges = [
            *connect('cond', '0:if'), *connect('input_1', '1:if'),
            *connect('input_2', '2:if'), *connect('input_3', '3:if'),
            ('if', 'if_d_1', {
                'out': 0
            }), ('if', 'if_d_2', {
                'out': 1
            }), ('if_d_1', 'res_1'), ('if_d_2', 'res_2')
        ]

        graph = build_graph(external_graph_nodes, external_graph_edges)
        graph.stage = 'middle'
        partial_infer(graph)
        if_node = Node(graph, 'if')
        self.assertTrue(
            strict_compare_tensors(
                if_node.out_port(0).data.get_shape(), output_port_0_shape))
        # shape of the "then" branch is [3] and shape of the "else" branch is [2], so the output shape is "[dynamic]"
        self.assertTrue(
            strict_compare_tensors(
                if_node.out_port(1).data.get_shape(), output_port_1_shape))

    def test_fake_results(self):
        then_graph_nodes = {
            **valued_const_with_data('fake_const', int64_array(0)),
            **regular_op_with_empty_data(
                'shapeof', {
                    'kind': 'op',
                    'type': 'ShapeOf',
                    'op': 'ShapeOf',
                    'infer': Shape.infer,
                    'output_type': np.int64
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        then_graph_edges = [
            *connect('fake_const', 'shapeof'),
            *connect('shapeof', 'res_1'),
        ]

        else_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        else_graph_edges = [*connect('param_1', 'res_1')]
        then_graph = build_graph_with_edge_attrs(then_graph_nodes,
                                                 then_graph_edges)
        else_graph = build_graph_with_edge_attrs(else_graph_nodes,
                                                 else_graph_edges)
        external_graph_nodes = {
            **valued_const_with_data('cond',
                                     shape_array([dynamic_dimension_value])),
            **valued_const_with_data(
                'input_1',
                int64_array([1, 2, 3, 3, 2, 3]).reshape((2, 3))),
            **regular_op_with_empty_data(
                'if', {
                    'kind': 'op',
                    'op': 'If',
                    'then_graph': then_graph,
                    'else_graph': else_graph,
                    'infer': If.infer
                }),
            **result('res_1')
        }
        external_graph_edges = [
            *connect('cond', '0:if'), *connect('input_1', '1:if'),
            *connect('if', 'res_1')
        ]

        graph = build_graph(external_graph_nodes, external_graph_edges)
        graph.stage = 'middle'
        partial_infer(graph)
        npt.assert_array_equal(
            Node(graph, 'if').out_port(0).data.get_shape(), int64_array([2,
                                                                         3]))
コード例 #12
0
    def test_v10_group_convolution_resolver_for_dynamic_weights(self):
        num_groups = 2
        C_OUT = 8

        nodes = {
            **regular_op_with_shaped_data(
                'input', shape_array([1, dynamic_dimension_value, 224, 224]), {
                    'type': 'Parameter'
                }),
            **valued_const_with_data('weights',
                                     np.ones([num_groups, C_OUT, 7, 7])),
            **regular_op_with_empty_data('reshape', {'type': 'Reshape'}),
            **regular_op_with_empty_data(
                'ss', {
                    'type': 'StridedSlice',
                    'begin_mask': [1],
                    'end_mask': [0],
                    'new_axis_mask': [0],
                    'shrink_axis_mask': [0],
                    'ellipsis_mask': [0]
                }),
            **regular_op_with_empty_data('weights_shape', {'type': 'ShapeOf'}),
            **regular_op_with_empty_data('input_shape', {'type': 'ShapeOf'}),
            **regular_op_with_empty_data('gather', {'type': 'Gather'}),
            **regular_op_with_empty_data('concat', {'type': 'Concat'}),
            **regular_op_with_empty_data('div', {'type': 'Divide'}),
            **valued_const_with_data(
                'channels_const', int64_array([num_groups, C_OUT / num_groups])),
            **valued_const_with_data('num_groups', int64_array(num_groups)),
            **valued_const_with_data('begin', int64_array([2])),
            **valued_const_with_data('end', int64_array([-1])),
            **valued_const_with_data('channel_index', int64_array([1])),
            **valued_const_with_data('axis', int64_array(0)),
            **regular_op_with_shaped_data('convolution', None, {
                'type': 'Convolution',
                'group': num_groups,
                'output': C_OUT
            }),
            **result(),
        }
        graph = build_graph(nodes, [
            *connect('input', '0:convolution'),
            *connect('weights', '1:convolution'),
            *connect('convolution', 'output'),
        ],
                            nodes_with_edges_only=True)

        V10ConvolutionWithGroupsResolver().find_and_replace_pattern(graph)

        nodes['convolution']['type'] = 'GroupConvolution'
        del nodes['convolution']['group']

        graph_ref = build_graph(nodes, [
            *connect('input', '0:convolution'),
            *connect('weights', '0:reshape'),
            ('input_d', 'input_shape', {
                'in': 0,
                'out': 0
            }),
            ('weights_d', 'weights_shape', {
                'in': 0,
                'out': 0
            }),
            *connect('input_shape', '0:gather'),
            *connect('channel_index', '1:gather'),
            *connect('axis', '2:gather'),
            *connect('weights_shape', '0:ss'),
            *connect('begin', '1:ss'),
            *connect('end', '2:ss'),
            *connect('gather', '0:div'),
            *connect('num_groups', '1:div'),
            *connect('channels_const', '0:concat'),
            *connect('div', '1:concat'),
            *connect('ss', '2:concat'),
            *connect('concat', '1:reshape'),
            *connect('reshape', '1:convolution'),
            *connect('convolution', 'output'),
        ],
                                nodes_with_edges_only=True)

        Const.infer(Node(graph, 'convolution/GroupsAndOutputChannelsSize'))
        Const.infer(Node(graph, 'convolution/Div_input_port_1/value'))

        (flag, resp) = compare_graphs(graph, graph_ref, last_node='output')
        self.assertTrue(flag, resp)
コード例 #13
0
def tf_tensor_shape(pb):
    return shape_array([dim.size if dim.size >= 0 else dynamic_dimension_value for dim in pb.dim])
コード例 #14
0
class TestUnsqueezeOp(unittest.TestCase):
    nodes_attributes = {
        'data_1': {
            'kind': 'data',
            'shape': None,
            'value': None,
        },
        'unsq': {
            'op': 'Unsqueeze',
            'kind': 'op',
        },
        'unsq_dims_const': {
            'op': 'Const',
            'kind': 'op',
        },
        'unsq_dims': {
            'kind': 'data',
        },
        'data_2': {
            'kind': 'data',
            'shape': None,
            'value': None,
        }
    }

    @generate(*[
        (shape_array([1, 3, 64, 64]), int64_array([0, 4]),
         shape_array([1, 1, 3, 64, 1, 64]), int64_array([0, 4]), None, None),
        (shape_array([2, 3, 64, 64]), int64_array([-1]),
         shape_array([2, 3, 64, 64, 1]), int64_array([4]), None, None),
        (shape_array([2, 3, dynamic_dimension_value, 64]), int64_array([0]),
         shape_array([1, 2, 3, dynamic_dimension_value,
                      64]), int64_array([0]), None, None),
        (shape_array([1, 2]), int64_array([-1]), shape_array([1, 2, 1]),
         int64_array([2]), shape_array([5, dynamic_dimension_value]).reshape(
             (1, 2)), shape_array([5, dynamic_dimension_value]).reshape(
                 (1, 2, 1))),
    ])
    def test_unsqueeze_infer(self, input_shape, unsq_dims, output_shape,
                             ref_uns_dims, input_value, output_value):
        graph = build_graph(
            self.nodes_attributes, [('data_1', 'unsq'),
                                    ('unsq_dims_const', 'unsq_dims'),
                                    ('unsq_dims', 'unsq'), ('unsq', 'data_2')],
            {
                'data_1': {
                    'shape': input_shape,
                    'value': input_value
                },
                'unsq_dims': {
                    'value': unsq_dims,
                    'shape': unsq_dims.shape
                },
                'unsq_dims_const': {
                    'value': unsq_dims,
                    'shape': unsq_dims.shape
                },
            })

        graph_ref = build_graph(
            self.nodes_attributes, [('data_1', 'unsq'),
                                    ('unsq_dims_const', 'unsq_dims'),
                                    ('unsq_dims', 'unsq'), ('unsq', 'data_2')],
            {
                'data_1': {
                    'shape': input_shape,
                    'value': input_value
                },
                'unsq_dims': {
                    'value': ref_uns_dims,
                    'shape': ref_uns_dims.shape
                },
                'unsq_dims_const': {
                    'value': ref_uns_dims,
                    'shape': ref_uns_dims.shape
                },
                'data_2': {
                    'shape': output_shape,
                    'value': output_value
                },
            })

        unsqueeze_node = Node(graph, 'unsq')
        Unsqueeze.infer(unsqueeze_node)

        (flag, resp) = compare_graphs(graph, graph_ref, 'data_2')
        self.assertTrue(flag, resp)
        self.assertTrue(
            strict_compare_tensors(
                Node(graph, 'data_2').shape,
                Node(graph_ref, 'data_2').shape))
        if Node(graph_ref, 'data_2').value is not None:
            self.assertTrue(
                strict_compare_tensors(
                    Node(graph, 'data_2').value,
                    Node(graph_ref, 'data_2').value))
コード例 #15
0
ファイル: eltwise_test.py プロジェクト: mikhailk62/openvino
class TestEltwiseInfer(unittest.TestCase):
    @generate(*[
        (np.array(2), [], np.array(3), [], lambda a, b: np.multiply(a, b), np.array(6), []),
        (np.array(2), [], np.array(3), [], lambda a, b: np.maximum(a, b), np.array(3), []),
        (np.array(2), [], np.array(3), [], lambda a, b: np.add(a, b), np.array(5), []),
        (None, [1, 5], None, [1, 1], lambda a, b: np.add(a, b), None, [1, 5]),
        (None, [dynamic_dimension_value, 3], None, [1, 1], lambda a, b: np.add(a, b), None,
         [dynamic_dimension_value, 3]),
        (None, [dynamic_dimension_value, 3], None, [1, dynamic_dimension_value], lambda a, b: np.add(a, b), None,
         [dynamic_dimension_value, 3]),
        (None, [4, 5, dynamic_dimension_value, 3], None, [1, dynamic_dimension_value], lambda a, b: np.add(a, b), None,
         [4, 5, dynamic_dimension_value, 3]),
        (None, [1, 10, 20, 30], None, [dynamic_dimension_value, 10, 20, 30], lambda a, b: np.add(a, b), None,
         [dynamic_dimension_value, 10, 20, 30]),
        # dynamic value propagation
        (shape_array([dynamic_dimension_value, 5]), [2], np.array(3), [], lambda a, b: np.add(a, b),
         shape_array([dynamic_dimension_value, 8]), [2]),
        (shape_array([dynamic_dimension_value, 5]), [2], np.array([3, 7]), [], lambda a, b: np.add(a, b),
         shape_array([dynamic_dimension_value, 12]), [2]),
    ])
    def test_eltwise_infer(self, value1, shape1, value2, shape2, shape_infer, exp_value, exp_shape):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'eltw_1'),
                             ('node_2', 'eltw_1'),
                             ('eltw_1', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None},
                             'node_1': {'shape': shape_array(value1).shape if value1 is not None else shape_array(shape1),
                                        'value': value1},
                             'node_2': {'shape': shape_array(value2).shape if value2 is not None else shape_array(shape2),
                                        'value': value2}
                             })

        graph.graph['layout'] = 'NCHW'

        eltwise_node = Node(graph, 'eltw_1')

        eltwise_infer(eltwise_node, shape_infer)
        res_shape = graph.node['node_3']['shape']
        res_value = eltwise_node.out_node().value
        if exp_value is not None:
            self.assertTrue(strict_compare_tensors(res_value, shape_array(exp_value)))
        self.assertTrue(strict_compare_tensors(res_shape, shape_array(exp_shape)))

    def test_eltwise_infer_none_val(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'eltw_1'),
                             ('node_2', 'eltw_1'),
                             ('eltw_1', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None},
                             'node_1': {'shape': np.array([1, 3, 256, 256]), 'value': None},
                             'node_2': {'shape': np.array([1, 3, 256, 256])}
                             })
        graph.graph['layout'] = 'NCHW'
        eltwise_node = Node(graph, 'eltw_1')

        eltwise_infer(eltwise_node, lambda a, b: a * b)
        exp_shape = np.array([1, 3, 256, 256])
        res_shape = graph.node['node_3']['shape']
        res_value = eltwise_node.out_node().value
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])

        self.assertIsNone(res_value)

    def test_eltwise_infer_none_min_max(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'eltw_1'),
                             ('node_2', 'eltw_1'),
                             ('eltw_1', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None},
                             'node_1': {'shape': np.array([1, 3, 257, 256])},
                             'node_2': {'shape': np.array([1, 3, 256, 257])}
                             })
        graph.graph['layout'] = 'NCHW'
        eltwise_node = Node(graph, 'eltw_1')

        with self.assertRaisesRegex(Error, 'Input shapes mismatch*'):
            eltwise_infer(eltwise_node)
コード例 #16
0
ファイル: graph.py プロジェクト: yury-intel/openvino
    return {name: {'kind': 'data', 'value': value, 'shape': shape}}


regular_op = lambda name, kwargs: {
    name: {
        'kind': 'op',
        'type': 'NoType',
        **kwargs
    }
}

shaped_data = lambda name, shape: {
    name: {
        'kind': 'data',
        'value': None,
        'shape': shape_array(shape) if shape is not None else None
    }
}
empty_data = lambda name: valued_data(name, None)

shaped_parameter = lambda name, shape, kwargs={}: {
    **regular_op(
        name, {
            'op': 'Parameter',
            'type': 'Parameter',
            'shape': shape,
            'infer': Parameter.infer,
            **kwargs
        }),
    **shaped_data(name + '_d', shape)
}
コード例 #17
0
ファイル: graph.py プロジェクト: yury-intel/openvino
def valued_data(name, value, shape=None):
    if value is not None:
        shape = int64_array(value.shape)
    elif value is None and shape is not None:
        shape = shape_array(shape)
    return {name: {'kind': 'data', 'value': value, 'shape': shape}}
コード例 #18
0
class TestUpsampleOp(unittest.TestCase):
    @generate(*[
        (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])),
        (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, dynamic_dimension_value, 713]),
         shape_array([1, 14, dynamic_dimension_value, 499])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, dynamic_dimension_value]),
         shape_array([1, 14, 1329, dynamic_dimension_value])),
    ])
    def test_upsample_with_scales_infer(self, scales, input_shape, expected_shape):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'upsample'),
                             ('upsample', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None, 'value': None},
                             'node_1': {'shape': input_shape},
                             'upsample': {'mode': 'linear',
                                          'height_scale': scales[2],
                                          'width_scale': scales[3]}
                             })

        graph.graph['layout'] = 'NCHW'
        upsample_node = Node(graph, 'upsample')
        UpsampleOp.upsample_infer(upsample_node)
        res_shape = graph.node['node_3']['shape']
        self.assertTrue(strict_compare_tensors(expected_shape, res_shape))

    @generate(*[
        (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])),
        (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, dynamic_dimension_value, 713]),
         shape_array([1, 14, dynamic_dimension_value, 499])),
        (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, dynamic_dimension_value]),
         shape_array([1, 14, 1329, dynamic_dimension_value])),
    ])
    def test_upsample_with_second_input_infer(self, scales, input_shape, expected_shape):
        nodes_attributes['scales'] = {'kind': 'data', 'value': scales}
        graph = build_graph(nodes_attributes,
                            [('node_1', 'upsample'),
                             ('scales', 'upsample'),
                             ('upsample', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None, 'value': None},
                             'node_1': {'shape': input_shape},
                             'upsample': {'mode': 'linear',
                                          'height_scale': None,
                                          'width_scale': None}
                             })

        graph.graph['layout'] = 'NCHW'
        upsample_node = Node(graph, 'upsample')
        UpsampleOp.upsample_infer(upsample_node)
        res_shape = graph.node['node_3']['shape']
        self.assertTrue(strict_compare_tensors(expected_shape, res_shape))
コード例 #19
0
    def pool_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                [node.batch_dims[0], node.channel_dims[0]])

        input_spatial_shape = input_shape[node.spatial_dims]

        # Setting default pad and stride attrs in case if None specified
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]
                                       for x in range(len(input_shape))])
        if not node.has_valid('pad_spatial_shape'):
            node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('stride'):
            node['stride'] = int64_array([1 for x in range(len(input_shape))])

        if node.has_and_set('global_pool'):
            node['window'] = np.zeros(len(input_shape), dtype=np.int64)
            node.window[node.spatial_dims] = input_spatial_shape

        if not node.has_valid('dilation'):
            node['dilation'] = np.ones(len(input_shape), dtype=np.float32)

        if not node.has_valid('axis'):
            node['axis'] = 0

        if not node.has_valid('index_element_type'):
            node['index_element_type'] = np.int64

        window_spatial_shape = node.window[node.spatial_dims]
        stride_spatial = node.stride[node.spatial_dims]
        dilation_spatial = node.dilation[node.spatial_dims]
        assert any(stride_spatial), 'Stride can not be zero in node {}'.format(
            node.id)

        if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
            node.pad_spatial_shape, node.output_spatial_shape = tf_window_op_pad_infer(
                input=input_spatial_shape,
                window=window_spatial_shape,
                stride=stride_spatial,
                auto_pad=node.auto_pad,
                dilation=dilation_spatial)
            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:

            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)

            rounding = np.floor
            if node.soft_get('pooling_convention') == 'full' or node.soft_get(
                    'rounding_type') == 'ceil':
                rounding = np.ceil

            padded_spatial_shape = input_spatial_shape + pad_spatial_shape - (
                (window_spatial_shape - 1) * dilation_spatial + 1)
            if np.any(padded_spatial_shape < 0):
                raise Error(
                    "Data after padding has dimension less than window size. "
                    +
                    "Possible reason of error is incorrectly specified model input shape(s)."
                )

            output_spatial_shape = shape_array([
                dynamic_dimension_value
                for _ in range(len(padded_spatial_shape))
            ])
            for idx in range(len(padded_spatial_shape)):
                if padded_spatial_shape[
                        idx] is not dynamic_dimension and stride_spatial[
                            idx] is not dynamic_dimension:
                    output_spatial_shape[idx] = int(
                        rounding(padded_spatial_shape[idx] /
                                 stride_spatial[idx])) + 1

            original_pads = mo_array([i[1] for i in node.pad_spatial_shape])

            for i in range(len(input_spatial_shape)):
                if original_pads[i] and (output_spatial_shape[i] - 1) * stride_spatial[i] >= \
                        input_spatial_shape[i] + original_pads[i]:
                    output_spatial_shape[i] -= 1

            node['output_spatial_shape'] = output_spatial_shape

        output_shape = input_shape.copy()
        output_shape[node.spatial_dims] = node.output_spatial_shape
        node.out_port(0).data.set_shape(output_shape)

        if len(node.out_ports()) == 2 and not node.out_port(1).disconnected():
            node.out_port(1).data.set_shape(output_shape)

        if node.has_and_set('pool_method') and node['pool_method'] == 'max':
            node['remove_values_output'] = True

        # Add permute_attrs
        PermuteAttrs.create_permute_attrs(node,
                                          attrs=[('pad', 'input:0'),
                                                 ('stride', 'input:0'),
                                                 ('window', 'input:0'),
                                                 ('spatial_dims', 'input:0'),
                                                 ('dilation', 'input:0')])
コード例 #20
0
ファイル: topkrois_onnx.py プロジェクト: yury-intel/openvino
 def reverse_infer(node):
     set_input_shapes(node, shape_array([dynamic_dimension_value, 4]),
                      shape_array([dynamic_dimension_value]))
コード例 #21
0
    def test_fake_results(self):
        then_graph_nodes = {
            **valued_const_with_data('fake_const', int64_array(0)),
            **regular_op_with_empty_data(
                'shapeof', {
                    'kind': 'op',
                    'type': 'ShapeOf',
                    'op': 'ShapeOf',
                    'infer': Shape.infer,
                    'output_type': np.int64
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        then_graph_edges = [
            *connect('fake_const', 'shapeof'),
            *connect('shapeof', 'res_1'),
        ]

        else_graph_nodes = {
            **regular_op_with_empty_data(
                'param_1', {
                    'type': 'Parameter',
                    'kind': 'op',
                    'input_id': 1,
                    'shape': None,
                    'infer': Parameter.infer
                }),
            **regular_op_with_empty_data(
                'res_1', {
                    'kind': 'op',
                    'type': 'Result',
                    'op': 'Result',
                    'infer': lambda x: 0,
                    'output_id': 0
                })
        }
        else_graph_edges = [*connect('param_1', 'res_1')]
        then_graph = build_graph_with_edge_attrs(then_graph_nodes,
                                                 then_graph_edges)
        else_graph = build_graph_with_edge_attrs(else_graph_nodes,
                                                 else_graph_edges)
        external_graph_nodes = {
            **valued_const_with_data('cond',
                                     shape_array([dynamic_dimension_value])),
            **valued_const_with_data(
                'input_1',
                int64_array([1, 2, 3, 3, 2, 3]).reshape((2, 3))),
            **regular_op_with_empty_data(
                'if', {
                    'kind': 'op',
                    'op': 'If',
                    'then_graph': then_graph,
                    'else_graph': else_graph,
                    'infer': If.infer
                }),
            **result('res_1')
        }
        external_graph_edges = [
            *connect('cond', '0:if'), *connect('input_1', '1:if'),
            *connect('if', 'res_1')
        ]

        graph = build_graph(external_graph_nodes, external_graph_edges)
        graph.stage = 'middle'
        partial_infer(graph)
        npt.assert_array_equal(
            Node(graph, 'if').out_port(0).data.get_shape(), int64_array([2,
                                                                         3]))
コード例 #22
0
ファイル: reshape_test.py プロジェクト: yury-intel/openvino
class TestReshapeShapeInfer(unittest.TestCase):
    @generate(*[
        (None, shape_array([1, 100, 4]), shape_array([-1, 25]), None, [16,
                                                                       25]),
        (None, shape_array([5, 100, 4]), shape_array([0, -1,
                                                      25]), None, [5, 16, 25]),
        (None, shape_array([5, dynamic_dimension_value,
                            4]), shape_array([4, -1, 5]), None,
         shape_array([4, dynamic_dimension_value, 5])),
        (None, shape_array([5, dynamic_dimension_value, 4]),
         shape_array([4, dynamic_dimension_value,
                      5]), None, shape_array([4, dynamic_dimension_value, 5])),
        (None, shape_array([dynamic_dimension_value, 4, 5]),
         shape_array([0, -1]), None, shape_array([dynamic_dimension_value,
                                                  20])),
        (None, shape_array([dynamic_dimension_value, 4,
                            5]), shape_array([5, -1,
                                              dynamic_dimension_value]), None,
         shape_array([5, dynamic_dimension_value, dynamic_dimension_value])),
        (None, shape_array([dynamic_dimension_value, 1, 546]),
         shape_array([dynamic_dimension_value, -1, 91]), None,
         shape_array([dynamic_dimension_value, dynamic_dimension_value, 91])),
        (None, shape_array([5, dynamic_dimension_value, 8]),
         shape_array([4, -1]), None, shape_array([4,
                                                  dynamic_dimension_value])),
        (None, shape_array([dynamic_dimension_value]), shape_array([5]), None,
         shape_array([5])),
        (None, shape_array([dynamic_dimension_value]), shape_array([0]), None,
         shape_array([dynamic_dimension_value])),
        (None, shape_array([dynamic_dimension_value]), shape_array([-1]), None,
         shape_array([dynamic_dimension_value])),
        (None, shape_array([dynamic_dimension_value]),
         shape_array([dynamic_dimension_value]), None,
         shape_array([dynamic_dimension_value])),
        # even though the target shape is dynamic since all the inputs are static so we can calculate output
        (None, shape_array([5, 3, 8]),
         shape_array([4, dynamic_dimension_value]), None, shape_array([4,
                                                                       30])),
        (None, shape_array([3, 14, 5]),
         shape_array([dynamic_dimension_value, 2,
                      0]), None, shape_array([21, 2, 5])),
        (shape_array([1, 2, dynamic_dimension_value, 4, 5,
                      6]), shape_array([6]), shape_array([-1, 2]),
         shape_array([1, 2, dynamic_dimension_value, 4, 5, 6]).reshape(
             (3, 2)), shape_array([3, 2])),
    ])
    def test_reshape_infer(self, input_value, input_shape, output_shape,
                           ref_value, ref_shape):
        graph = build_graph(
            nodes_attributes, [('input', 'data'), ('data', 'reshape'),
                               ('output_shape', 'output_shape_data'),
                               ('output_shape_data', 'reshape'),
                               ('reshape', 'reshape_out')], {
                                   'data': {
                                       'shape': input_shape,
                                       'value': input_value
                                   },
                                   'output_shape': {
                                       'value': output_shape,
                                       'shape': output_shape.shape
                                   },
                                   'output_shape_data': {
                                       'value': output_shape,
                                       'shape': output_shape.shape
                                   },
                               })
        node = Node(graph, 'reshape')
        Reshape.infer(node)
        if ref_value is not None:
            self.assertTrue(
                strict_compare_tensors(
                    node.out_port(0).data.get_value(), shape_array(ref_value)))
        self.assertTrue(
            strict_compare_tensors(
                node.out_port(0).data.get_shape(), shape_array(ref_shape)))
コード例 #23
0
def replace_sequence(seq: List[Node], graph: Graph):
    """
    This function replaces a sequence of consecutive Interpolate layers with one Interpolate layer,
    if modes of all nodes of a sequence are the same.
    :param seq: sequence of Interpolate layers
    :param graph: graph to which nodes of seq belong
    :return: Nothing
    """
    if not seq:
        return
    if len(seq) == 1:
        return

    modes = set([n.mode for n in seq])
    if len(modes) != 1:
        return

    dims_and_scales_ = []
    # Each element of the list dims_and_scales_ is a pair
    #      (axis, output size for this axis) (opset1)
    # or
    #      (axis, output size for this axis, output scales for this axis) (opset4)
    if seq[0].get_opset() == 'opset1':
        for interp in seq:
            dims_and_scales_.extend(
                zip(
                    Interpolate.get_axes(interp),
                    interp.in_port(
                        1).get_connection().get_source().data.get_value()))

        axis_to_size = sorted(list(dict(dims_and_scales_).items()),
                              key=lambda x: x[0])
        axes_of_node = int64_array([z[0] for z in axis_to_size])
        sizes = shape_array([z[1] for z in axis_to_size])
        scales = np.ones(len(axis_to_size), dtype=np.float32)
    else:
        for interp in seq:
            dims_and_scales_.extend(
                zip(
                    Interpolate.get_axes(interp),
                    interp.in_port(
                        1).get_connection().get_source().data.get_value(),
                    interp.in_port(
                        2).get_connection().get_source().data.get_value()))

        axis_to_size = sorted(dims_and_scales_, key=lambda x: x[0])
        axes_of_node = int64_array([z[0] for z in axis_to_size])
        sizes = shape_array([z[1] for z in axis_to_size])
        scales = mo_array([z[2] for z in axis_to_size])

    fst_interp_node = seq[0]
    last_interp_node = seq[-1]
    last_interp_node_name = last_interp_node.soft_get('name',
                                                      last_interp_node.id)
    attributes = get_interpolate_attributes(fst_interp_node)

    opset = fst_interp_node.get_opset()
    if opset == 'opset1':
        attributes['axes'] = axes_of_node
        interp_node = create_op_with_const_inputs(graph, Interpolate,
                                                  {1: sizes}, attributes)

        fst_interp_connection = fst_interp_node.in_port(0).get_connection()
        fst_interp_connection.set_destination(interp_node.in_port(0))

        last_interp_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))
    else:
        attributes['in_ports_count'] = 4
        interp_node = create_op_with_const_inputs(graph, Interpolate, {
            1: sizes,
            2: scales,
            3: axes_of_node
        }, attributes)

        fst_interp_connection = fst_interp_node.in_port(0).get_connection()
        fst_interp_connection.set_destination(interp_node.in_port(0))

        last_interp_node.out_port(0).get_connection().set_source(
            interp_node.out_port(0))

    rename_nodes([(last_interp_node, last_interp_node_name + '/delete'),
                  (interp_node, last_interp_node_name)])
コード例 #24
0
    def test_two_outputs_v2(self):
        graph = build_graph_with_edge_attrs(
            {
                'queue_dequeue': {
                    'kind': 'op',
                    'op': 'QueueDequeueV2',
                    'shapes': [shape_array([2, 2]),
                               shape_array([1, 1])],
                    'types': [np.int32, np.float32]
                },
                'sub': {
                    'kind': 'op',
                    'op': 'Sub'
                },
                'add': {
                    'kind': 'op',
                    'op': 'Add'
                },
                'concat': {
                    'kind': 'op',
                    'op': 'Concat'
                }
            }, [('queue_dequeue', 'sub', {
                'out': 0,
                'in': 0
            }), ('queue_dequeue', 'add', {
                'out': 1,
                'in': 0
            }), ('sub', 'concat', {
                'out': 0,
                'in': 0
            }), ('add', 'concat', {
                'out': 0,
                'in': 1
            })])

        graph_ref = build_graph_with_edge_attrs(
            {
                'parameter_1': {
                    'kind': 'op',
                    'op': 'Parameter',
                    'shape': shape_array([2, 2]),
                    'data_type': np.int32
                },
                'parameter_2': {
                    'kind': 'op',
                    'op': 'Parameter',
                    'shape': shape_array([1, 1]),
                    'data_type': np.float32
                },
                'sub': {
                    'kind': 'op',
                    'op': 'Sub'
                },
                'add': {
                    'kind': 'op',
                    'op': 'Add'
                },
                'concat': {
                    'kind': 'op',
                    'op': 'Concat'
                }
            }, [('parameter_1', 'sub', {
                'out': 0,
                'in': 0
            }), ('parameter_2', 'add', {
                'out': 0,
                'in': 0
            }), ('sub', 'concat', {
                'out': 0,
                'in': 0
            }), ('add', 'concat', {
                'out': 0,
                'in': 1
            })])

        FIFOQueueDequeueCut().find_and_replace_pattern(graph)

        flag, msg = compare_graphs(graph,
                                   graph_ref,
                                   last_node='concat',
                                   check_op_attrs=True)
        self.assertTrue(flag, msg)
コード例 #25
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['node']
        node_name = node.soft_get('name', node.id)

        if 2 in node.in_ports() and not node.in_port(2).disconnected():
            # Third input represents output shape. Cutting its value according to scheme:
            # [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n]
            in_rank = node.in_port(0).data.get_shape().size

            shape_src = node.in_port(2).get_source()
            node.in_port(2).disconnect()

            ss_0 = create_op_with_const_inputs(
                graph, StridedSlice, {
                    1: mo_array([2], dtype=np.int32),
                    2: mo_array([in_rank], dtype=np.int32),
                    3: mo_array([1], dtype=np.int32)
                }, {
                    'name': node_name + '/ss_0_port',
                    'begin_mask': mo_array([1], dtype=np.int32),
                    'end_mask': mo_array([0], dtype=np.int32),
                    'new_axis_mask': mo_array([0], dtype=np.int32),
                    'shrink_axis_mask': mo_array([0], dtype=np.int32),
                    'ellipsis_mask': mo_array([0], dtype=np.int32)
                })

            shape_src.connect(ss_0.in_port(0))
            ss_0.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        elif node.has_valid('original_output_spatial_shape'):
            # node had fixed output spatial shape set in original framework, so we restore it here
            const = Const(
                graph, {
                    'value': int64_array(node.original_output_spatial_shape),
                    'name': node_name + '/original_spatial_shape'
                }).create_node()
            node.add_input_port(2, skip_if_exist=True)
            const.out_port(0).connect(node.in_port(2))

            # Specification: *padding amount* is deduced from relation of input and output spatial shapes
            del node['pad']

        group = node.soft_get('group', 1)

        if group != 1:
            assert group > 1

            weights_shape = node.in_port(1).data.get_shape()
            assert weights_shape is not None
            I = node.in_port(0).data.get_shape()[1]
            assert I % group == 0
            assert node.output % group == 0

            new_shape = shape_array(
                [group, I // group, node.output // group, *weights_shape[2:]])

            assert not is_fully_defined(new_shape) or not is_fully_defined(weights_shape) or \
                   np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                                 ''.format(weights_shape, new_shape)
            reshape = create_op_node_with_second_input(
                graph, Reshape, new_shape, {'override_output_shape': True},
                node.in_port(1).get_source().node)

            node.in_port(1).get_connection().set_source(reshape.out_port(0))

            node['type'] = 'GroupConvolutionBackpropData'
        else:
            node['type'] = 'ConvolutionBackpropData'
コード例 #26
0
    def infer(node: Node):
        num_of_inputs = len(node.in_ports())
        opset = node.get_opset()
        max_num_of_inputs = 6 if opset == 'opset5' else 5
        input_msg_fmt = 'NonMaxSuppression node {} from {} must have from 2 to {} inputs'
        node_name = node.soft_get('name', node.id)
        inputs_msg = input_msg_fmt.format(node_name, opset, max_num_of_inputs)
        assert 2 <= num_of_inputs <= max_num_of_inputs, inputs_msg

        boxes_shape = node.in_port(0).data.get_shape()
        assert boxes_shape is not None, 'The shape of tensor with boxes is not defined'
        scores_shape = node.in_port(1).data.get_shape()
        assert scores_shape is not None, 'The shape of tensor with scores is not defined'
        assert len(boxes_shape
                   ) == 3, 'Length of tensors with boxes must be equal to 3'
        assert len(scores_shape
                   ) == 3, 'Length of tensors with scores must be equal to 3'

        # According to the specification of the operation NonMaxSuppression,
        # the input 'max_output_boxes_per_class' (port 2) is optional, with default value 0.
        if num_of_inputs >= 3:
            max_output_boxes_per_class = node.in_port(2).data.get_value()
        else:
            max_output_boxes_per_class = 0

        if not max_output_boxes_per_class:
            log.info(
                'Set default "max_output_boxes_per_class" for node {} to number of boxes'
                .format(node.name))
            max_output_boxes_per_class = boxes_shape[1]

        # convert the np.array value to a scalar to avoid issue with ragged numpy array generation in the shape
        # calculation formulas below
        if isinstance(max_output_boxes_per_class, np.ndarray):
            max_output_boxes_per_class = max_output_boxes_per_class.item()

        num_classes = scores_shape[1]
        num_input_boxes = boxes_shape[1]
        assert scores_shape[2] is dynamic_dimension or scores_shape[2] == num_input_boxes or scores_shape[2] is None \
               or num_input_boxes is None, 'Number of boxes mismatch for operation {}'.format(node_name)

        if node.get_opset() in ['opset4', 'opset5']:
            max_number_of_boxes = min(
                num_input_boxes,
                max_output_boxes_per_class) * boxes_shape[0] * num_classes
        else:
            max_number_of_boxes = min(
                num_input_boxes,
                boxes_shape[0] * max_output_boxes_per_class * num_classes)
        node.out_port(0).data.set_shape(shape_array([max_number_of_boxes, 3]))

        if opset == 'opset5':
            node.out_port(0).data.set_shape(
                shape_array([dynamic_dimension_value, 3]))
            num_of_outputs = len([
                port for port in node.out_ports().values()
                if not port.disconnected()
            ])
            if num_of_outputs >= 2 and node.has_port('out', 1):
                node.out_port(1).data.set_shape(
                    shape_array([dynamic_dimension_value, 3]))
            if num_of_outputs >= 3 and node.has_port('out', 2):
                node.out_port(2).data.set_shape(shape_array([1]))
コード例 #27
0
    def find_and_replace_pattern(self, graph: Graph):
        # Iterate over all data nodes and find all with >= 1 consumers
        for input_data in list(graph.get_data_nodes()):
            # We don't use constant data nodes
            if input_data.value is not None:
                continue

            if input_data.shape is None:
                continue
            input_shape = shape_array(input_data.shape)

            # Get all unique StridedSlice consumers
            out_nodes = [node for node in input_data.out_nodes() if node.op == 'StridedSlice' and
                         node.in_node(0).id == input_data.id]

            if len(out_nodes) <= 1:
                continue

            valid_for_replacement = True
            for n in out_nodes:
                if any(not isinstance(s, slice) for s in n.slices):
                    # this is a slice with dynamic dimension. Such operation is not valid for replacement
                    valid_for_replacement = False
            if not valid_for_replacement:
                continue

            sorted_out_nodes = sorted(out_nodes, key=lambda n: list(n.slices))
            out_nodes = unique_by(sorted_out_nodes, strided_slices_equality)

            for node in out_nodes:
                if len(node.slices) != len(out_nodes[0].slices):
                    valid_for_replacement = False

            # Detect dimension for splitting
            split_channel_dim = None
            for dim_id, s in enumerate(out_nodes[0].slices):
                l, r, stride = s.start, s.stop, s.step
                # if both l and r are None then the dimension is not sliced
                if (l != 0 or r != input_shape[dim_id]) and (l is not None or r is not None):
                    if split_channel_dim is None:
                        split_channel_dim = dim_id
                    else:
                        valid_for_replacement = False

            if split_channel_dim is None:
                valid_for_replacement = False

            # split_dims contains tuples with split range and output data node
            split_dims = []
            for out_id, node in enumerate(out_nodes):
                # Check that StridedSlice op has stride eq 1 and splits only feature channel
                for id, s in enumerate(node.slices):
                    l, r, stride = s.start, s.stop, s.step
                    # We don't support StridedSlice with stride != 1
                    if stride != 1:
                        valid_for_replacement = False
                    if id == split_channel_dim:
                        split_dims.append((s.start, s.stop, node.out_node()))

            if not valid_for_replacement:
                continue

            # Check feature split intersection
            final_data_nodes_list = []
            sorted_split_dims = sorted(split_dims, key=lambda item: (item[0], item[1]))

            # check if we have similar StridedSlice operations with different outputs
            prev_sd = sorted_split_dims[0]
            to_remove = []
            for i in range(1, len(sorted_split_dims)):
                if sorted_split_dims[i][0] == prev_sd[0] and sorted_split_dims[i][1] == prev_sd[1] and sorted_split_dims[i][2].name != prev_sd[2].name:
                    cur_node = sorted_split_dims[i][2]
                    for out in cur_node.out_nodes():
                        attrs = deepcopy(graph.get_edge_data(cur_node.id, out.id)[0])
                        graph.remove_edge(cur_node.id, out.id)
                        graph.add_edge(prev_sd[2].id, out.id, **attrs)
                    to_remove.append(i)

            for ind in reversed(to_remove):
                sorted_split_dims.pop(ind)

            size_splits = []
            prev_r = 0
            for l, r, out in sorted_split_dims:
                # Split dims shouldn't intersect
                if l < prev_r:
                    valid_for_replacement = False
                prev_r = r

            if prev_r > input_shape[split_channel_dim]:
                valid_for_replacement = False

            if not valid_for_replacement:
                continue

            prev_r = 0
            for l, r, out in sorted_split_dims:
                # Save missing tensor part
                if l > prev_r:
                    shape = mo_array(input_shape)
                    size_splits.append(l - prev_r)
                    shape[split_channel_dim] = l - prev_r
                    data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})
                    add_opoutput(graph, data_node.id, 0, False, keep_output_port=True)
                    final_data_nodes_list.append(data_node)

                prev_r = r
                size_splits.append(r - l)
                final_data_nodes_list.append(out)

            if prev_r < input_shape[split_channel_dim]:
                # Add last part of tensor
                shape = input_shape.copy()
                shape[split_channel_dim] = input_shape[split_channel_dim] - prev_r
                size_splits.append(input_shape[split_channel_dim] - prev_r)
                data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape})
                add_opoutput(graph, data_node.id, 0, False, keep_output_port=True)
                final_data_nodes_list.append(data_node)

            for node in out_nodes:
                if not np.all([x == 0 for x in node.shrink_axis_mask]):
                    out_node = node.out_node()
                    if np.any(node['shrink_axis_mask']):
                        self.add_squeeze_for_shrink(graph, node)
                    if np.any(node['new_axis_mask']):
                        self.add_unsqueeze_for_new(graph, node)

                    for i in range(len(final_data_nodes_list)):
                        if final_data_nodes_list[i].name == out_node.name:
                            final_data_nodes_list[i] = node.out_node()
                            break

            # Insert Split layer and remove old StridedSlice layers
            # 1. Remove connections from input_data to StridedSlice ops
            out_data_nodes = []
            name_for_future_split = out_nodes[0].name
            for node in out_nodes:
                out_data_nodes.append(node.out_node())
                graph.remove_edge(input_data.id, node.id)
                graph.remove_edge(node.id, node.out_node().id)
                graph.remove_node(node.id)
                log.debug("Removed: {}".format(node.id))

            # 2. Create Split layer and reorder outputs
            name = name_for_future_split + "/Split"
            axis_const = Const(graph, {'value': int64_array(split_channel_dim),
                                       'name': name + '/Axis'}).create_node_with_data()
            size_splits_const = Const(graph, {'value': int64_array(size_splits),
                                              'name': name + '/Sizes'}).create_node_with_data()
            split = VariadicSplit(graph, dict(name=name, out_ports_count=len(size_splits)))

            split.create_node_with_data(inputs=[input_data, axis_const, size_splits_const],
                                        data_nodes=final_data_nodes_list)
コード例 #28
0
                  'infer': Const.infer, 'type_infer': Const.type_infer, **kwargs}}
    return res


def valued_data(name, value, shape=None):
    if value is not None:
        shape = int64_array(value.shape)
    elif value is None and shape is not None:
        shape = shape_array(shape)
    return {name: {'kind': 'data', 'value': value, 'shape': shape}}


regular_op = lambda name, kwargs: {name: {'kind': 'op', 'type': 'NoType', **kwargs}}

shaped_data = lambda name, shape: {name: {'kind': 'data', 'value': None,
                                          'shape': shape_array(shape) if shape is not None else None}}
empty_data = lambda name: valued_data(name, None)

shaped_parameter = lambda name, shape, kwargs={}: {**regular_op(name, {'op': 'Parameter', 'type': 'Parameter',
                                                                       'shape': shape, 'infer': Parameter.infer,
                                                                       **kwargs}),
                                                   **shaped_data(name + '_d', shape)}

result = lambda name='output': {name: {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: 0}}

regular_op_with_shaped_data = lambda name, shape, kwargs: {**regular_op(name, kwargs),
                                                           **shaped_data(name + '_d', shape)}
regular_op_with_empty_data = lambda name, kwargs: {**regular_op(name, kwargs), **empty_data(name + '_d')}

fake_const = lambda name, shape, kwargs={}: {name: {'kind': 'op', 'op': 'Const', 'type': 'Const',
                                                    'value': None, 'infer': Const.infer, **kwargs,
コード例 #29
0
class TestConcatPartialInfer(unittest.TestCase):
    @generate(*[
        ([1, 3, 227, 227], [1, 3, 220, 227], [1, 3, 447, 227], 2),
        ([1, 3, 227, 227], [1, 3, 227, 220], [1, 3, 227, 447], -1),
        ([1, 3, dynamic_dimension_value,
          227], [1, dynamic_dimension_value, 227, 220], [1, 3, 227, 447], -1),
        ([1, 3, 10,
          227], [1, 3, 10,
                 dynamic_dimension_value], [1, 3, 10,
                                            dynamic_dimension_value], -1),
    ])
    def test_concat_infer(self, shape1, shape2, output_shape, axis):
        graph = build_graph(
            nodes_attributes, [('node_1', 'concat'), ('node_2', 'concat'),
                               ('concat', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None,
                    'value': None
                },
                'node_1': {
                    'shape': shape_array(shape1)
                },
                'node_2': {
                    'shape': shape_array(shape2)
                },
                'concat': {
                    'axis': axis
                }
            })

        concat_node = Node(graph, 'concat')
        concat_infer(concat_node)
        res_shape = graph.node['node_3']['shape']
        self.assertTrue(strict_compare_tensors(output_shape, res_shape))

    @generate(*[
        (shape_array([1]), shape_array([4]), shape_array([1, 4]), 0),
        (shape_array([dynamic_dimension_value]), shape_array([4]),
         shape_array([dynamic_dimension_value, 4]), -1),
    ])
    def test_concat_value_infer(self, value1, value2, output_value, axis):
        graph = build_graph(
            nodes_attributes, [('node_1', 'concat'), ('node_2', 'concat'),
                               ('concat', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': output_value.shape,
                    'value': output_value
                },
                'node_1': {
                    'shape': value1.shape,
                    'value': value1
                },
                'node_2': {
                    'shape': value2.shape,
                    'value': value2
                },
                'concat': {
                    'axis': axis
                }
            })

        concat_node = Node(graph, 'concat')
        concat_infer(concat_node)
        res_value = graph.node['node_3']['value']
        self.assertTrue(strict_compare_tensors(output_value, res_value))

    def test_concat_infer_not_match(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'concat'), ('node_2', 'concat'),
                               ('concat', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None,
                    'value': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 227, 227])
                },
                'node_2': {
                    'shape': np.array([1, 2, 227, 227])
                },
                'concat': {
                    'axis': 2
                }
            })

        concat_node = Node(graph, 'concat')
        with self.assertRaisesRegex(
                Error, "Concat input shapes do not match for node*"):
            concat_infer(concat_node)

    def test_concat_infer_no_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'concat'), ('node_2', 'concat'),
                               ('concat', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 227, 227])
                },
                'node_2': {
                    'shape': None
                },
                'concat': {
                    'axis': 2
                }
            })

        concat_node = Node(graph, 'concat')
        with self.assertRaisesRegex(
                Error, "One of the input shapes is not defined for node *"):
            concat_infer(concat_node)
コード例 #30
0
ファイル: squeeze_test.py プロジェクト: yury-intel/openvino
class TestSqueezeInfer(unittest.TestCase):
    @generate(*[
        (None, shape_array([1, 2, 1, 4]), shape_array([2]), None, [1, 2, 4]),
        # allow squeezing dynamic dimensions
        (None, shape_array([1, 2, dynamic_dimension_value,
                            4]), shape_array([2]), None, [1, 2, 4]),
        (None, shape_array([1, 2, 1, 4]), shape_array([]), None, [2, 4]),
        (None, shape_array([1, dynamic_dimension_value, 1,
                            4]), shape_array([]), None,
         shape_array([dynamic_dimension_value, 4])),
        # do not allow squeeze dimensions not equal to 1
        (None, shape_array([1, 2, 1, 4]), shape_array([1]), None, None),
        # do not allow squeeze input shape to be None
        (None, None, shape_array([1]), None, None),
    ])
    def test_squeeze_squeeze_dims(self, input_value, input_shape, squeeze_dims,
                                  ref_value, ref_shape):
        graph = build_graph(
            nodes_attributes, [('data', 'squeeze'),
                               ('squeeze_dims', 'squeeze_dims_data'),
                               ('squeeze_dims_data', 'squeeze'),
                               ('squeeze', 'data_out')], {
                                   'data': {
                                       'shape': input_shape,
                                       'value': input_value
                                   },
                                   'squeeze_dims': {
                                       'value': squeeze_dims,
                                       'shape': squeeze_dims.shape
                                   },
                                   'squeeze_dims_data': {
                                       'value': squeeze_dims,
                                       'shape': squeeze_dims.shape
                                   },
                               })
        node = Node(graph, 'squeeze')
        if ref_shape is None:  # the test should fail
            with self.assertRaises(Error):
                Squeeze.infer(node)
        else:
            Squeeze.infer(node)
            if ref_value is not None:
                self.assertTrue(
                    strict_compare_tensors(
                        node.out_port(0).data.get_value(), ref_value))
            self.assertTrue(
                strict_compare_tensors(
                    node.out_port(0).data.get_shape(), ref_shape))