Пример #1
0
 def reverse_infer(node: Node):
     input_shape_1 = node.in_port(0).data.get_shape()
     input_shape_2 = node.in_port(1).data.get_shape()
     if input_shape_1 is None:
         node.in_port(0).data.set_shape(undefined_shape_of_rank(4))
     if input_shape_2 is None:
         node.in_port(1).data.set_shape(undefined_shape_of_rank(4))
Пример #2
0
 def reverse_infer(node: Node):
     input_shape = node.in_port(0).data.get_shape()
     window_shape = node.in_port(1).data.get_shape()
     # use the value of the 'window' input to determine input tensor rank
     if input_shape is None and window_shape is not None:
         node.in_port(0).data.set_shape(
             undefined_shape_of_rank(window_shape[0]))
Пример #3
0
 def reverse_infer(node: Node):
     input_shape = node.in_port(0).data.get_shape()
     output_shape = node.out_port(0).data.get_shape()
     unsqueeze_dims = node.in_port(1).data.get_value()
     if input_shape is None and output_shape is not None and unsqueeze_dims is not None:
         num_unsqueeze_dims = 1 if int64_array(unsqueeze_dims).ndim == 0 else len(unsqueeze_dims)
         shape = undefined_shape_of_rank(len(output_shape) - num_unsqueeze_dims)
         node.in_port(0).data.set_shape(shape)
Пример #4
0
    def reverse_infer(node):
        transformation_values_shape = shape_array([
            dynamic_dimension_value, dynamic_dimension_value,
            int(node.group_size),
            int(node.group_size)
        ])

        set_input_shapes(node, undefined_shape_of_rank(4),
                         shape_array([dynamic_dimension_value, 5]),
                         transformation_values_shape)
Пример #5
0
 def reverse_infer(node: Node):
     input_shape = node.in_port(0).data.get_shape()
     if input_shape is None:
         shape = None
         # TODO FIXME this is ugly solution based on various attributes which may not be set in some cases
         for attr in ['dilation', 'stride', 'pad']:
             if node.has_valid(attr):
                 shape = undefined_shape_of_rank(len(node.soft_get(attr)))
                 break
         if shape is not None:
             node.in_port(0).data.set_shape(shape)
Пример #6
0
 def reverse_infer(node):
     num_in_ports = len(node.in_ports())
     assert num_in_ports in [
         3, 6
     ], 'incorrect number of input ports for DetectionOutput node {}'.format(
         node.soft_get('name', node.id))
     if num_in_ports == 3:
         set_input_shapes(node, undefined_shape_of_rank(2),
                          undefined_shape_of_rank(2),
                          undefined_shape_of_rank(3))
     elif num_in_ports == 6:
         set_input_shapes(node, undefined_shape_of_rank(2),
                          undefined_shape_of_rank(2),
                          undefined_shape_of_rank(3),
                          undefined_shape_of_rank(2),
                          undefined_shape_of_rank(2))
Пример #7
0
 def reverse_infer(node):
     set_input_shapes(node,
                      undefined_shape_of_rank(4),
                      shape_array([dynamic_dimension_value, 4]),
                      undefined_shape_of_rank(1))
Пример #8
0
 def reverse_infer(node):
     if node.in_port(0).data.get_shape() is None:
         node.in_port(0).data.set_shape(undefined_shape_of_rank(4))
Пример #9
0
 def reverse_infer(node: Node):
     input_shape = node.in_port(0).data.get_shape()
     window = node.soft_get('window', None)
     if input_shape is None and window is not None:
         node.in_port(0).data.set_shape(undefined_shape_of_rank(
             len(window)))
Пример #10
0
 def reverse_infer(node: Node):
     input_shape = node.in_port(0).data.get_shape()
     if input_shape is None and node.is_in_port_connected(2) and node.in_port(2).data.get_shape() is not None:
         shape = undefined_shape_of_rank(node.in_port(2).data.get_shape()[0])
         node.in_port(0).data.set_shape(shape)
Пример #11
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)

        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        target_shape_shape = node.in_port(1).data.get_shape()
        target_shape = node.in_port(1).data.get_value()
        assert node.has_and_set(
            'mode'), 'Broadcasting mode is not defined for node "{}"'.format(
                node_name)
        # Dynamic target shape is possible to infer only if shape of target shape is static and 1D
        if target_shape is None and len(target_shape_shape) == 1 and (
                len(input_shape) <= 1 or node.mode == 'explicit'):
            assert is_fully_defined(target_shape_shape)
            new_shape = undefined_shape_of_rank(target_shape_shape.item(0))
            node.out_port(0).data.set_shape(new_shape)
            return
        assert target_shape is not None, 'Output shape is not defined for node "{}"'.format(
            node_name)

        PermuteInputs().set_input_permutation(node.in_node(1), node,
                                              'output:0', 'shape')

        if input_value is not None and not node.has_and_set('stop_value_propagation') and \
                is_fully_defined(target_shape):
            if node.mode == 'numpy':
                node.out_port(0).data.set_value(
                    uni_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_value(
                    bi_directional_broadcasting(input_value, target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                node.out_port(0).data.set_value(
                    explicit_broadcasting(input_value, target_shape,
                                          axes_mapping))
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
        else:
            if node.mode == 'numpy':
                node.out_port(0).data.set_shape(
                    uni_directional_shape_broadcasting(input_shape,
                                                       target_shape))
            elif node.mode == 'bidirectional':
                node.out_port(0).data.set_shape(
                    bi_directional_shape_broadcasting(input_shape,
                                                      target_shape))
            elif node.mode == 'explicit':
                axes_mapping = node.in_port(2).data.get_value()
                assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \
                                                 'is not supported. Node: `{}`'.format(node_name)
                PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                      'output:0', 'axis')
                axes_mapping = node.in_port(2).data.get_value()
                new_shape, _ = explicit_shape_broadcasting(
                    input_shape, target_shape, axes_mapping)
                node.out_port(0).data.set_shape(new_shape)
            else:
                raise Error('The node "{}" has unsupported mode "{}"'.format(
                    node_name, node.mode))
Пример #12
0
def eltwise_reverse_infer(node: Node):
    input_1_shape = node.in_port(0).data.get_shape()
    input_2_shape = node.in_port(1).data.get_shape()
    if input_1_shape is not None and input_2_shape is not None:
        return

    output_shape = node.out_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)

    if node['auto_broadcast'] is 'none':
        # input_1, input_2 and output shapes must match
        # therefore undefined partial shapes can be exactly defined from output shape
        if output_shape is not None:
            most_defined_shape = output_shape

            # if out_shape = [4, dyn] and input_1_shape = [dyn, 13]
            # then missing shape must be [4, 13]
            if input_1_shape is not None and not compatible_shapes(
                    output_shape, input_1_shape):
                raise Error("shapes are not compatible for node '{}'".format(
                    node_name))
            elif input_1_shape is not None:
                most_defined_shape = find_common_partial_shape(
                    output_shape, input_1_shape)

            if input_2_shape is not None and not compatible_shapes(
                    output_shape, input_2_shape):
                raise Error("shapes are not compatible for node '{}'".format(
                    node_name))
            elif input_2_shape is not None:
                most_defined_shape = find_common_partial_shape(
                    most_defined_shape, input_2_shape)

            if input_1_shape is None:
                node.in_port(0).data.set_shape(most_defined_shape)
            if input_2_shape is None:
                node.in_port(1).data.set_shape(most_defined_shape)
    elif node['auto_broadcast'] == 'numpy':
        if output_shape is not None:
            out_rank = len(output_shape)
            deduced_in_shape = undefined_shape_of_rank(out_rank)

            if input_1_shape is not None and input_2_shape is None and out_rank > len(
                    input_1_shape):
                in_port_to_update = 1
                defined_in_shape = input_1_shape
            elif input_2_shape is not None and input_1_shape is None and out_rank > len(
                    input_2_shape):
                in_port_to_update = 0
                defined_in_shape = input_2_shape
            else:
                return
            defined_in_rank = len(defined_in_shape)

            for i in range(-1, -defined_in_rank - 1, -1):
                assert defined_in_shape[i] == 1 or np.ma.is_masked(defined_in_shape[i]) \
                       or compatible_dims(defined_in_shape[i], output_shape[i]), \
                    "Shapes of Elementwise node '{}' are not compatible for reverse_infer.".format(node_name)

                # if defined_input_shape = [1] and output_shape = [N, 400, 400, 3]
                # partial shape information about sizes should not be lost
                if defined_in_shape[i] == 1 or output_shape[i] == 1:
                    deduced_in_shape[i] = output_shape[i]
            deduced_in_shape[:
                             -defined_in_rank] = output_shape[:
                                                              -defined_in_rank]

            node.in_port(in_port_to_update).data.set_shape(deduced_in_shape)
Пример #13
0
class BroadcastTest(unittest.TestCase):
    @generate(*[
        ([1], [3, 3], None, 'numpy', [[1, 1, 1], [1, 1, 1], [1, 1, 1]]),
        ([1], [3, 3], None, 'numpy'),

        # shape broadcasting
        ([1], [1, 2], [0], 'explicit'),
        ([1], [1, 2], [-2], 'explicit'),
        ([1, 7], [5, 1, 7, 3], [1, 2], 'explicit'),
        ([2, 1, 3], [2, 1, 3, 3], [0, 1, 2], 'explicit'),
        ([2, 1, 3], [5, 2, 1, 3], [1, 2, 3], 'explicit'),

        # value broadcasting
        ([1], [1, 2], [0], 'explicit', [[1, 1]]),

        ([[3, 1]], [2, 1, 2], [1, 2], 'explicit', [[[3, 1]], [[3, 1]]]),  # ref_shape (2, 1, 2)

        ([[3, 1]], [2, 1, 2], [-2, -1], 'explicit', [[[3, 1]], [[3, 1]]]),  # ref_shape (2, 1, 2)

        ([[[9, 5, 7]], [[9, 5, 7]]], [2, 2, 1, 3], [1, 2, 3], 'explicit',  # in_shape (2, 1, 3)
         [[[[9, 5, 7]], [[9, 5, 7]]], [[[9, 5, 7]], [[9, 5, 7]]]]),  # ref_out_shape (2, 2, 1, 3)

        ([[[9, 5, 7]], [[3, 4, 8]]], [2, 1, 3, 3], [0, 1, 2], 'explicit',  # in_shape (2, 1, 3)
         [[[[9, 9, 9], [5, 5, 5], [7, 7, 7]]], [[[3, 3, 3], [4, 4, 4], [8, 8, 8]]]]),  # ref_out_shape (2, 1, 3, 3)

        # negative tests
        ([1], [2, 2], [0], 'explicit', None, True),
        ([1, 7], [5, 2, 7, 3], [1, 2], 'explicit', None, True),
        ([1, 7], [5, 2, 7, 3], [2, 1], 'explicit', None, True),
        ([1, 7], [5, 2, 7, 3], [-3, -2], 'explicit', None, True),
    ])
    def test_broadcast(self, data, target_shape, axes_mapping=None, mode='numpy', ref_out=None, test_raising=False):
        if ref_out is not None:
            input = valued_const_with_data('data', int64_array(data))
        else:
            input = shaped_data('data', int64_array(data))

        nodes = {
            **input,
            **valued_const_with_data('target_shape', int64_array(target_shape)),
            **regular_op_with_empty_data('broadcast', {'op': 'Broadcast', 'mode': mode}),
        }

        edges = [('data', 'broadcast'),
                 ('target_shape', 'broadcast'),
                 ('broadcast', 'broadcast_d')]

        if axes_mapping is not None:
            nodes.update(**valued_const_with_data('axes_mapping', int64_array(axes_mapping)))
            edges.append(('axes_mapping', 'broadcast'))
        graph = build_graph(nodes, edges)

        broadcast_node = Node(graph, 'broadcast')
        if test_raising:
            self.assertRaises(AssertionError, Broadcast.infer, broadcast_node)
            return

        Broadcast.infer(broadcast_node)
        if ref_out is not None:
            self.assertTrue(np.array_equal(broadcast_node.out_node().value, np.array(ref_out)))
        else:
            self.assertTrue(np.array_equal(broadcast_node.out_node().shape, np.array(target_shape)))

    @generate(*[
        ([1], [3], 'numpy', undefined_shape_of_rank(3)),
        ([1], [3], 'explicit', undefined_shape_of_rank(3)),
        ([1, 2], [3], 'numpy', None, True),
    ])
    def test_broadcast_dynamic(self, data, target_shape_shape, mode='numpy', ref_out_shape=None, test_raising=False):
        nodes = {
            **shaped_data('data', int64_array(data)),
            **shaped_data('target_shape', int64_array(target_shape_shape)),
            **regular_op_with_empty_data('broadcast', {'op': 'Broadcast', 'mode': mode}),
        }

        edges = [('data', 'broadcast'),
                 ('target_shape', 'broadcast'),
                 ('broadcast', 'broadcast_d')]

        graph = build_graph(nodes, edges)

        broadcast_node = Node(graph, 'broadcast')
        if test_raising:
            self.assertRaises(AssertionError, Broadcast.infer, broadcast_node)
            return

        Broadcast.infer(broadcast_node)
        self.assertTrue(np.array_equal(broadcast_node.out_node().shape, ref_out_shape))
Пример #14
0
 def reverse_infer(node):
     set_input_shapes(node, undefined_shape_of_rank(3),
                      undefined_shape_of_rank(3))