Exemplo n.º 1
0
def remove_op_node_with_data_node(graph, node_to_remove, input_data_node=None):
    from openvino.tools.mo.graph.graph import Node
    assert node_to_remove.kind == 'op'
    if input_data_node is None:
        input_data_node = node_to_remove.in_node()
    output_node = [v for _, v in graph.out_edges(node_to_remove.id)]
    assert len(
        output_node
    ) == 1, "Cannot remove node producing two or more output tensors"
    output_node = Node(graph, output_node[0])
    assert output_node.kind == 'data', "The function must be used after partial infer"

    graph.remove_edge(input_data_node.id, node_to_remove.id)
    graph.remove_edge(node_to_remove.id, output_node.id)

    merge_data_nodes(graph, output_node, input_data_node)

    # we just have saved all output edges from 'input' by reconnecting them to 'output', now we can delete 'input'
    log.debug('Removing op node: {}'.format(node_to_remove.id))
    graph.remove_nodes_from([node_to_remove.id, input_data_node.id])
Exemplo n.º 2
0
    def test_eltwise_infer_none_min_max(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'eltw_1'), ('node_2', 'eltw_1'),
                               ('eltw_1', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 257, 256])
                },
                'node_2': {
                    'shape': np.array([1, 3, 256, 257])
                }
            })
        graph.graph['layout'] = 'NCHW'
        eltwise_node = Node(graph, 'eltw_1')

        with self.assertRaisesRegex(Error, 'Input shapes mismatch*'):
            eltwise_infer(eltwise_node)
Exemplo n.º 3
0
def type_infer(graph: Graph):
    nodes = list(nx.topological_sort(graph))
    for n in nodes:
        node = Node(graph, n)
        if node.kind == 'op':
            node_name = node.soft_get('name')
            node_type_infer(node)
            log.debug('Type infer for node {}: {}'.format(node_name,
                                                          [port.get_data_type() for port in node.out_ports().values()]))
            """
            Save the precision of input ports in the nodes. It is not possible to get the precision after the port
            re-numbering because the port precision is defined for output port only and for input port it is determined
            with the output port producing data to the input port. When output port id is changed it is not possible to
            determine input port precision.
            """
            for out_port in node.out_ports().values():
                for dest_port in out_port.get_destinations():
                    if not dest_port.node.has_valid('_in_port_precision'):
                        dest_port.node['_in_port_precision'] = {}
                    dest_port.node['_in_port_precision'][dest_port.idx] = out_port.get_data_type()
Exemplo n.º 4
0
    def test_upsample_with_second_input_infer(self, scales, input_shape, expected_shape):
        nodes_attributes['scales'] = {'kind': 'data', 'value': scales}
        graph = build_graph(nodes_attributes,
                            [('node_1', 'upsample'),
                             ('scales', 'upsample'),
                             ('upsample', 'node_3'),
                             ('node_3', 'op_output')
                             ],
                            {'node_3': {'shape': None, 'value': None},
                             'node_1': {'shape': input_shape},
                             'upsample': {'mode': 'linear',
                                          'height_scale': None,
                                          'width_scale': None}
                             })

        graph.graph['layout'] = 'NCHW'
        upsample_node = Node(graph, 'upsample')
        UpsampleOp.upsample_infer(upsample_node)
        res_shape = graph.node['node_3']['shape']
        self.assertTrue(strict_compare_tensors(expected_shape, res_shape))
Exemplo n.º 5
0
def tf_native_tf_node_infer(node: Node):
    """
    The infer function should be used to infer shape and data type of the TF operation not supported by IE.
    :param node: node to infer.
    :return: None
    """
    log.info('Called "tf_native_tf_node_infer" for node "{}"'.format(node.id))

    # create a sub-graph only to make inference. The sub-graph contains desired node and it's inputs neighbourhood of
    # depth 10. The number 10 is quite big to be sure that determine_data_type function will be able to identify the
    # data type of input tensors, but not too huge to contain the whole graph.
    # Also the sub-graph contains names of the output nodes of the node to perform native infer.
    nodes_to_extract = node_incoming_neighbourhood(node.graph, node.id, 10) + node_outcoming_neighbourhood(node.graph,
                                                                                                           node.id, 1)
    tmp_graph = node.graph.create_sub_graph_copy(nodes_to_extract)

    tmp_node_attrs = tmp_graph.node[node.id]
    tmp_node = Node(tmp_graph, node.id)

    # node attributes that are required by 'infer_subgraph_output_nodes' function
    lists_to_init = ['input_nodes_names', 'output_tensors_names', 'nodes_order', 'internal_output_node_name',
                     'real_input_dims']

    for item in lists_to_init:
        tmp_node_attrs[item] = list()
    tmp_node_attrs['pbs'] = {tmp_node.name: tmp_node.pb}
    tmp_node_attrs['nodes_order'].append(tmp_node.id)
    for ind in range(len(tmp_node.out_edges())):
        tmp_node_attrs['output_tensors_names'].append(tmp_node.id + ":" + str(ind))

    tf_subgraph_infer(tmp_node)
    # the shape and value has been inferred and saved to the tmp_node's out nodes attribute. Let's copy it back!
    for tmp_out_port, tmp_out_node in tmp_node.out_nodes().items():
        if tmp_out_node.value is not None:
            node.out_node(tmp_out_port).value = np.array(tmp_out_node.value)
        if tmp_out_node.shape is not None:
            node.out_node(tmp_out_port).shape = np.array(tmp_out_node.shape)
        if tmp_out_node.data_type is not None:
            node.out_node(tmp_out_port).data_type = tmp_out_node.data_type
    # lets cleanup the temporary graph
    tmp_graph.clear()
Exemplo n.º 6
0
 def test_attr_getter(self):
     nodes = {
         'input': {
             'kind': 'data'
         },
         'reshape': {
             'type': 'Reshape',
             'kind': 'op'
         },
         'output': {
             'kind': 'data'
         },
         'op_output': {
             'type': 'Result',
             'kind': 'op'
         },
     }
     input_shape = np.array([1, 125, 13, 13])
     params = {'dim': [1, 1, 2, 3], 'max_size': np.array([3, 2, 1, 0])}
     expect_params = {
         'dim': "1,1,2,3",
         'max_size': "3,2,1,0",
     }
     graph = build_graph(
         nodes, [('input', 'reshape'), ('reshape', 'output'),
                 ('output', 'op_output')], {
                     'input': {
                         'shape': input_shape
                     },
                     'reshape': {
                         **params, 'spatial_dims': [2, 3]
                     },
                     'output': {
                         'shape': None
                     }
                 })
     pool_1_node = Node(graph, 'reshape')
     for param in params.keys():
         if type(params[param]) is list:
             self.assertEqual(expect_params[param],
                              attr_getter(pool_1_node, param))
Exemplo n.º 7
0
    def test_splitv_dynamic_input(self):
        ref_input_shape = [7, 4, 11]
        axis = 2
        num_splits = 2
        output_shape_1 = [dynamic_dimension, 4, 3]
        output_shape_2 = [7, dynamic_dimension, 3]
        output_shape_3 = [7, dynamic_dimension, 5]

        graph = build_graph(
            TestAttributedVariadicSplitOp.nodes,
            TestAttributedVariadicSplitOp.edges, {
                'split_input_data': {
                    'shape': None
                },
                'split_op': {
                    'axis': np.array(2),
                    'split_lengths': np.array([3, 3, 5]),
                    'out_ports_count': 2
                },
                'split_output_0_data': {
                    'shape': shape_array(output_shape_1),
                    'value': None
                },
                'split_output_1_data': {
                    'shape': shape_array(output_shape_2),
                    'value': None
                },
                'split_output_2_data': {
                    'shape': shape_array(output_shape_3),
                    'value': None
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        AttributedVariadicSplit.reverse_infer(node)

        actual_input_shape = node.in_port(0).data.get_shape()
        self.assertTrue(
            strict_compare_tensors(ref_input_shape, actual_input_shape))
Exemplo n.º 8
0
    def test_axpy(self):
        nodes = {
            'node_1': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'},
            'node_2': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'},
            'node_3': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'},
            'axpy': {'type': 'Axpy', 'kind': 'op', 'op': 'Axpy'},
            'node_4': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'}}
        edges = [
            ('node_1', 'axpy', {'in': 0, 'out': 0}),
            ('node_2', 'axpy', {'in': 1, 'out': 0}),
            ('node_3', 'axpy', {'in': 2, 'out': 0}),
            ('axpy', 'node_4', {'in': 0, 'out': 0})]
        graph = build_graph_with_edge_attrs(nodes, edges)
        node = Node(graph, 'axpy')
        replacer = AxpyToSSandAdd()
        replacer.replace_op(graph, node)

        scale_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'ScaleShift']
        self.assertEqual(len(scale_node), 1)
        add_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Add']
        self.assertEqual(len(add_node), 1)
Exemplo n.º 9
0
    def test_grn_infer(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'grn'), ('grn', 'node_3'),
                               ('node_3', 'op_output')], {
                                   'node_3': {
                                       'shape': None
                                   },
                                   'node_1': {
                                       'shape': np.array([1, 3, 227, 227])
                                   },
                                   'grn': {
                                       'bias': 1
                                   }
                               })

        grn_node = Node(graph, 'grn')
        copy_shape_infer(grn_node)
        exp_shape = np.array([1, 3, 227, 227])
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
Exemplo n.º 10
0
    def test_expand_dims_infer_value(self, axis, in_shape, ref_out_shape):
        in_value = np.random.rand(*in_shape)
        graph = build_graph(nodes_attributes, [('data_1', 'expand_dims'),
                                               ('expand_dims', 'data_2')], {
                                                   'data_1': {
                                                       'value': in_value
                                                   },
                                                   'expand_dims': {
                                                       'expand_axis': axis
                                                   }
                                               })
        expand_dims_node = Node(graph, 'expand_dims')

        ExpandDims.infer(expand_dims_node)

        self.assertTrue(
            np.array_equal(expand_dims_node.out_node().shape,
                           np.array(ref_out_shape)))
        self.assertTrue(
            np.array_equal(expand_dims_node.out_node().value,
                           np.array(in_value.reshape(ref_out_shape))))
Exemplo n.º 11
0
    def test_deconv_infer_one_group(self):
        graph = create_deconv_graph(int64_array([1, 21, 18, 18]),
                                    int64_array([21, 50, 4, 4]),
                                    int64_array([1, 50, 35, 35]))

        Deconvolution.infer(Node(graph, 'deconv_node'))
        res_shape = graph.node['deconv_output']['shape']
        exp_shape = np.array([1, 50, 35, 35])

        res_group = graph.node['deconv_node']['group']
        exp_group = int64_array([1])

        self.assertTrue(
            np.array_equal(exp_shape, res_shape),
            'values do not match expected: {} and computed: {}'.format(
                exp_shape, res_shape))

        self.assertTrue(
            np.array_equal(exp_group, res_group),
            'group number values do not match expected: {} and computed: {}'.
            format(exp_group, res_group))
Exemplo n.º 12
0
    def test_crop_infer_one_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'crop_1'), ('crop_1', 'node_3'),
                               ('node_3', 'op_output')], {
                                   'node_3': {
                                       'shape': None
                                   },
                                   'node_1': {
                                       'shape': np.array([1, 2, 500, 500])
                                   },
                                   'crop_1': {
                                       'axis': 2,
                                       'offset': [0],
                                       'dim': None
                                   }
                               })

        crop_node = Node(graph, 'crop_1')

        crop_infer(crop_node)
        self.assertIsNone(graph.node['node_3']['shape'])
Exemplo n.º 13
0
    def test_slice_axis_infer_layer(self):
        graph = build_graph(
            {'node_1': {'name': 'data', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Parameter'},
             'slice_axis_node': {'name': 'slice_axis_node', 'type': 'sigmoid', 'value': None,
                                 'kind': 'op', 'op': 'slice_axis', },
             'node_3': {'name': 'node_3', 'type': 'Identity', 'value': None, 'kind': 'op'},
             },
            [
                ('node_1', 'slice_axis_node'),
                ('slice_axis_node', 'node_3'),
            ],
            {
                'node_1': {'shape': np.array([1, 1024, 19, 19])},
                'slice_axis_node': {'axis': 1, 'offset': 10, 'dim': 25},
            })

        slice_axis_node = Node(graph, 'slice_axis_node')
        mxnet_slice_axis_infer(slice_axis_node)
        res_shape = [1, 15, 19, 19]
        for i in range(0, len(graph.node['node_3']['shape'])):
            self.assertEqual(graph.node['node_3']['shape'][i], res_shape[i])
Exemplo n.º 14
0
    def test_partial_infer_just_unique(self):
        edges = [('input', 'unique_node', {
            'in': 0
        }), ('unique_node', 'output_uniques', {
            'out': 0
        })]
        graph = build_graph(nodes_attributes, edges, inputs1)

        unique_node = Node(graph, 'unique_node')
        Unique.infer(unique_node)

        # prepare reference results
        ref_output_uniques_shape = int64_array([20])

        # get resulted shapes
        res_output_uniques_shape = graph.node['output_uniques']['shape']

        self.assertTrue(
            np.array_equal(ref_output_uniques_shape, res_output_uniques_shape),
            'shapes do not match expected: {} and given: {}'.format(
                ref_output_uniques_shape, res_output_uniques_shape))
Exemplo n.º 15
0
    def test_cumsum_axis(self):
        graph = build_graph(nodes_attributes, [
            *connect('data', '0:cumsum'),
            *connect('axis', '1:cumsum'),
            *connect('cumsum', '0:identity'),
            ('identity', 'identity_d', {
                'out': 0
            }),
            ('identity_d', 'output'),
        ], {'cumsum': {
            'reverse': False,
            'exclusive': False
        }},
                            nodes_with_edges_only=True)

        cumsum_node = Node(graph, 'cumsum')
        CumSum.infer(cumsum_node)
        self.assertTrue(
            np.array_equal(
                cumsum_node.out_port(0).data.get_shape(),
                int64_array([1, 3, 224, 224])))
Exemplo n.º 16
0
    def test_split(self):
        graph = build_graph(nodes_attributes,
                            [('placeholder_1', 'placeholder_data'), ('placeholder_data', 'split'),
                             ('split', 'split_data_1'), ('split_data_1', 'last'),
                             ('last', 'last_data'), ('last_data', 'res'),
                             ], nodes_with_edges_only=True)
        node = Node(graph, 'split')

        # extractor should do it
        node['out_ports_count'] = node.num_splits
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        replacer = AddFakeOutputsToSplit()
        replacer.find_and_replace_pattern(graph)

        for n in graph.get_op_nodes():
            n['need_shape_inference'] = False
        graph_clean_up(graph)

        self.assertTrue(len(node.out_edges()) == node.num_splits)
Exemplo n.º 17
0
    def merge_nodes_permutations(graph: Graph):
        # Iterate over all data nodes and check all permutations for similarity
        # In case of equal permutations, this permutation will be set as attribute for data node
        # otherwise exception will be raised
        for node in graph.nodes():
            node = Node(graph, node)
            if node.kind != 'data':
                continue

            permutations = []

            # Get all permutations from in edges
            for in_node in node.in_nodes():
                edge_attrs = node.graph.get_edge_data(in_node.id, node.id)[0]
                if 'permutation' in edge_attrs:
                    permutations.append(edge_attrs['permutation'])

            # Get all permutations from out edges
            for out_node in node.out_nodes():
                edge_attrs = node.graph.get_edge_data(node.id, out_node.id)[0]
                if 'permutation' in edge_attrs:
                    permutations.append(edge_attrs['permutation'])

            final_permutations = []
            for p in permutations:
                if p is not None:
                    final_permutations.append(p.perm)
                else:
                    final_permutations.append(int64_array(np.arange(node.shape.size)))

            if len(final_permutations) == 0:
                continue

            # Check that all permutations are equal
            if not all([np.array_equal(final_permutations[0], perm) for perm in final_permutations]):
                raise Error('Permutations requested for {} data node are not equal! List of permutations: {}'
                            ''.format(node.name, [p.perm for p in permutations]))

            assert not node.has_valid('permutation') or np.array_equal(node.permutation, permutations[0])
            node['permutation'] = permutations[0]
Exemplo n.º 18
0
    def test_pooling_dynamic_infer(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'pool'), ('pool', 'node_2'),
                               ('node_2', 'op_output')],
            {
                'node_2': {
                    'shape': None
                },
                'node_1': {
                    'shape':
                    shape_array([
                        1, dynamic_dimension_value, dynamic_dimension_value,
                        256
                    ])
                },
                'pool': {
                    'window': np.array([1, 1, 1, 1]),
                    'stride': np.array([1, 1, 2, 2]),
                    'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
                    'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
                    'pool_method': 'avg',
                    'exclude_pad': False,
                    'global_pool': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'pooling_convention': 'full'
                }
            })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = shape_array(
            [1, dynamic_dimension_value, dynamic_dimension_value, 131])
        res_shape = graph.node['node_2']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
Exemplo n.º 19
0
    def test_variadic_split_value_inference_with_uint32(self):
        axis = int64_array(2)
        # because sum of Python int and Numpy np.uint64 gives float64

        # but np.split accepts only integers and raises error for floats
        # therefore needed to explicitly cast np.split arguments into integer
        # added this test for that case
        lengths = mo_array([2, 13, 10], dtype=np.uint64)
        input_shape = mo_array([2, 12, 25, 30])
        input_value = np.zeros(input_shape)

        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': input_shape,
                    'value': input_value
                },
                'split_axis_data': {
                    'value': axis
                },
                'split_lengths_data': {
                    'value': lengths
                },
                'split_op': {
                    'out_ports_count': 4
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        VariadicSplit.infer(node)

        ont_nodes_count = len(node.out_edges())
        self.assertTrue(ont_nodes_count == 3)
        for out in range(ont_nodes_count):
            self.assertTrue(
                np.all(
                    node.out_node(out).shape == int64_array(
                        [2, 12, lengths[out], 30])))
Exemplo n.º 20
0
    def test_value_propagation(self, a_shape, a_value, b_shape, b_value,
                               elem_type):
        graph = build_graph(nodes_attrs=graph_nodes_attrs,
                            edges=graph_edges,
                            update_attributes={
                                'A': {
                                    'shape': int64_array(a_shape),
                                    'value': a_value.astype(elem_type)
                                },
                                'A_data': {
                                    'shape': int64_array(a_shape),
                                    'value': a_value.astype(elem_type)
                                },
                                'B': {
                                    'shape': int64_array(b_shape),
                                    'value': b_value.astype(elem_type)
                                },
                                'B_data': {
                                    'shape': int64_array(b_shape),
                                    'value': b_value.astype(elem_type)
                                },
                            })
        node = Node(graph, 'div')
        node['infer'] = Div(graph, node.attrs()).create_node().infer
        node.infer(node)
        node_data = node.out_port(0).get_destination().data.get_value()

        def func_for_ref():
            if np.issubdtype(elem_type, np.integer):
                return lambda a, b: a // b
            else:
                return lambda a, b: a / b

        ref_data = func_for_ref()(a_value, b_value)
        node_data_shape = node_data.shape
        ref_data_shape = ref_data.shape
        msg = "Value propagation for 'div' node is not correct."
        self.assertTrue(
            node_data_shape == ref_data_shape
            and np.all(node_data == ref_data), msg)
Exemplo n.º 21
0
    def test_component_map_loading_offset(self):
        test_map = "input-node name=input dim=16\n" + \
                   "component-node name=lda component=lda input=Offset(input, -3)\n" + \
                   "component-node name=tdnn1.affine component=tdnn1.affine input=Append(Offset(input, -1), Offset(lda, 1))\n" + \
                   "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine\n" + \
                   "\n"
        graph = Graph(name="test_graph_component_map_loading_offset")

        test_top_map= load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph)

        ref_map = {b"lda": ["lda"],
                   b"tdnn1.affine": ["tdnn1.affine"],
                   b"tdnn1.relu": ["tdnn1.relu"]}
        self.assertEqual(test_top_map, ref_map)
        self.assertTrue("input" in graph.nodes())
        self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16])

        ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'},
                                 'lda': {'kind': 'op'},
                                 'tdnn1.affine': {'kind': 'op'},
                                 'tdnn1.relu': {'kind': 'op'},
                                 'append_input_lda': {'kind': 'op', 'op': 'Concat'},
                                 'offset_in_input_3': {'kind': 'op', 'op': 'memoryoffset', 't': -3, 'pair_name': 'offset_out_input_3'},
                                 'offset_in_input_1': {'kind': 'op', 'op': 'memoryoffset', 't': -1, 'pair_name': 'offset_out_input_1'},
                                 'offset_in_lda_1': {'kind': 'op', 'op': 'memoryoffset', 't': -1, 'pair_name': 'offset_out_lda_1'},
                                 },
                                [
                                    ('input', 'offset_in_input_3', {'out': 0}),
                                    ('offset_in_input_3', 'lda', {'out': 0}),
                                    ('lda', 'offset_in_lda_1', {'out': 0}),
                                    ('input', 'offset_in_input_1', {'out': 1}),
                                    ('offset_in_lda_1', 'append_input_lda', {'in': 1, 'out': 0}),
                                    ('offset_in_input_1', 'append_input_lda', {'in': 0, 'out': 0}),
                                    ('append_input_lda', 'tdnn1.affine', {'out': 0}),
                                    ('tdnn1.affine', 'tdnn1.relu', {'out': 0}),
                                ]
                                )

        (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu')
        self.assertTrue(flag, resp)
Exemplo n.º 22
0
    def test_more_output_ports(self):
        nodes_attributes1 = {
            'input': {
                'shape': None,
                'value': None,
                'kind': 'data'
            },
            'unique_node': {
                'op': 'Unique',
                'kind': 'op'
            },
            'output_uniques': {
                'shape': None,
                'value': None,
                'kind': 'data'
            },
            'output_indices': {
                'shape': None,
                'value': None,
                'kind': 'data'
            },
            'output3': {
                'shape': None,
                'value': None,
                'kind': 'data'
            },
        }
        edges = [('input', 'unique_node', {
            'in': 0
        }), ('unique_node', 'output_uniques', {
            'out': 0
        }), ('unique_node', 'output_indices', {
            'out': 1
        }), ('unique_node', 'output3', {
            'out': 2
        })]
        graph = build_graph(nodes_attributes1, edges, inputs1)

        unique_node = Node(graph, 'unique_node')
        self.assertRaises(AssertionError, Unique.infer, unique_node)
Exemplo n.º 23
0
    def test_reduce_dynamic(self, shape, axes, keepdims, p):
        false_mask = np.zeros(shape)
        false_mask[0][1][1] = True
        data = np.ma.masked_array(np.ones(shape), mask=false_mask)
        assert not is_fully_defined(data)
        reduced_tensor = np.sum(data, axis=tuple(axes), keepdims=keepdims)
        # create an array of all masked elements which is the expected result of the reduce of the tensor with dynamic
        # values
        fully_undefined = np.ma.masked_array(reduced_tensor,
                                             mask=np.ones(
                                                 reduced_tensor.shape))
        axis = int64_array(axes)
        p = int64_array(p)
        graph = build_graph(nodes_attributes, [
            *connect('data', '0:reduce_lp'), *connect('axis', '1:reduce_lp'),
            *connect('reduce_lp', '0:identity'),
            ('identity', 'identity_d', {
                'out': 0
            }), ('identity_d', 'output')
        ], {
            'data_d': {
                'value': data,
                'shape': data.shape
            },
            'axis_d': {
                'value': axis,
                'shape': axis.shape
            },
            'reduce_lp': {
                'keep_dims': keepdims
            }
        },
                            nodes_with_edges_only=True)

        reduce_node = Node(graph, 'reduce_lp')
        reduce_node.op = reduce_node.type = 'ReduceL' + str(p)
        reduce_infer(reduce_node)
        self.assertTrue(
            strict_compare_tensors(
                reduce_node.out_port(0).data.get_value(), fully_undefined))
Exemplo n.º 24
0
def common_bfs(start_node: Node, allowed_ops: list, op_name: list, is_backward: bool = True, allowed_all: bool = False,
               attr_to_check='type', follow_multi_consumer_data_nodes=False):
    """
    The purpose of this algorithm is to find layers with 'op_name' located in given direction.
    In case of branching algorithm goes into each branch, but if it can't find layer in one of them it returns
    empty list.

    :param start_node: Start node for BFS algorithm
    :param allowed_ops: List of operations that we can jump over
    :param op_name: The list with names of operations for searching
    :param is_backward: The direction of BFS algorithm
    :param allowed_all: Bool flag meaning we can jump over all operations
    :param attr_to_check: the attribute to check when looking if the node is in "op_name" list
    :param follow_multi_consumer_data_nodes: for backward traversal allow to follow data nodes with multiple consumers
    """
    ret = []
    q = deque([start_node])
    used = []
    while len(q) != 0:
        node = q.popleft()
        if node.id in used:
            log.debug("[BFS:ERROR] Graph contains cycle! BFS starts from {} node".format(start_node.id))
            return []
        used.append(node.id)
        in_nodes_size = len(node.in_nodes()) if is_backward else len(node.out_nodes())
        for id in range(in_nodes_size):  # in_nodes() can return either list or dict
            pnode = node.in_node(id) if is_backward else node.out_node(id)
            if pnode.has_valid(attr_to_check):
                if pnode[attr_to_check] in op_name:
                    if pnode.id not in ret:
                        ret.append(pnode.id)
                elif allowed_all or pnode.op in allowed_ops:
                    q.append(pnode)
                else:
                    return []
            elif pnode.kind == 'data' and pnode.value is None:
                # If we go backward we don't use data node that have more than one consumer
                if not is_backward or (len(pnode.out_nodes()) == 1 or follow_multi_consumer_data_nodes):
                    q.append(pnode)
    return [Node(start_node.graph, x) for x in ret]
Exemplo n.º 25
0
    def array_infer(node: Node):
        handle = node.in_node(0)
        value = node.in_node(2)
        flow_in = node.in_node(3)

        ta_node = Node(node.graph, str(handle.value))
        if ta_node.has_valid('element_shape') and len(
                ta_node.element_shape) > 0:
            assert match_shapes(ta_node['element_shape'], value.shape[1:]), \
                'Shapes are not compatible: {} and {}'.format(ta_node['element_shape'], value.shape[1:])
        else:
            ta_node['element_shape'] = value.shape[1:]

        # Assign element_shape anyway, because the original element_shape can contain -1
        ta_node['element_shape'] = value.shape[1:]

        output_value = flow_in.value
        for _, out_node in node.graph.out_edges(node.id):
            node.graph.node[out_node]['shape'] = shape_array(flow_in.shape)
            node.graph.node[out_node][
                'value'] = None if output_value is None else mo_array(
                    output_value)
Exemplo n.º 26
0
def apply_pattern(graph: Graph,
                  nodes: list,
                  edges: list,
                  action: callable,
                  node_attrs: list = None,
                  edge_attrs: list = None):
    """
    Search for all matches of a given subgraph defined by [nodes, edges] in graph,
    then apply action for each such match.
    """
    if not all_edges_in_nodes([node[0] for node in nodes], edges):
        log.warning(
            "Incorrect pattern attributes: not all nodes from edges are in nodes. "
            "Please, mention all nodes you need in pattern in nodes attribute. "
        )

    matches = []
    for match in find_pattern_matches(graph, nodes, edges, node_attrs,
                                      edge_attrs):
        matches.append(match)

    for match in matches:
        match = inverse_dict(match)
        still_valid = True
        for k in match:
            if not graph.has_node(match[k]):
                # Graph changed significantly
                still_valid = False
                log.warning(
                    "The graph has changed significantly during applying pattern:\n"
                    "nodes: {}\n"
                    "edges: {}\n"
                    "node_attrs: {}\n"
                    "edge_attrs: {}".format(nodes, edges, node_attrs,
                                            edge_attrs))
                break
            match[k] = Node(graph, match[k])
        if still_valid:
            action(graph, match)
Exemplo n.º 27
0
    def test_prior_box_infer_ideal(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'prior_box_1'),
                               ('node_2', 'prior_box_1'),
                               ('prior_box_1', 'node_3')],
            {
                'node_1': {
                    'shape': np.array([1, 1024, 19, 19])
                },
                'node_2': {
                    'shape': np.array([1, 3, 300, 300])
                },
                'prior_box_1': {
                    'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333],
                    'min_size': [0.2, 0.272],
                    'max_size': '',
                    'offset': 0.5,
                    'step': 0.2,
                    'sizes': [0.2, 0.272]
                },
                'node_3': {
                    'shape': np.array([1, 2, 3])
                },
            })

        multi_box_prior_node = Node(graph, 'prior_box_1')

        multi_box_prior_infer_mxnet(multi_box_prior_node)
        exp_shape = np.array([1, 2, 8664])
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])

        self.assertEqual(multi_box_prior_node.min_size, [0.2, 0.272])
        self.assertEqual(multi_box_prior_node.max_size, '')
        self.assertEqual(multi_box_prior_node.aspect_ratio,
                         [1.0, 2.0, 0.5, 3.0, 0.333333333333])
        self.assertEqual(round(multi_box_prior_node.step, 1), 0.2)
        self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5)
Exemplo n.º 28
0
def reverse_infer(graph: Graph, nodes: list):
    nodes = reversed(nodes)
    debug_logger = log.getLogger().isEnabledFor(log.DEBUG)
    for n in nodes:
        node = Node(graph, n)
        if node.has_and_set('reverse_infer'):
            log.debug("Executed reverse infer for node '{}'".format(
                node.soft_get('name', node.id)))
            node.reverse_infer(node)

            if debug_logger:
                log.debug('-' * 20)
                log.debug('Reverse infer for {}'.format(node.soft_get('name')))
                log.debug('Op: {}'.format(node.soft_get('op')))
                log.debug('Outputs:')
                log_debug_dict(node.out_nodes(), 'outputs')

                log.debug('Inputs:')
                log_debug_dict(node.in_nodes(), 'inputs')

    parameters_with_no_shape = []
    for node in graph.get_op_nodes(op='Parameter'):
        if not node.has_valid('shape'):
            parameters_with_no_shape.append(node)

    if len(parameters_with_no_shape) == 0:
        return

    parameters_names = ''
    for idx, node in enumerate(parameters_with_no_shape):
        parameters_names += "'{}'".format(node.soft_get('name', node.id))
        if idx < len(parameters_with_no_shape) - 1:
            parameters_names += ', '

    if len(parameters_with_no_shape) > 0:
        raise Error(
            "Model Optimizer is unable to deduce input shapes for the following Parameter nodes: {}. "
            "Please use cli options --input or --input_shape to set model input shape."
            .format(parameters_names))
Exemplo n.º 29
0
    def replace_sub_graph(self, graph: Graph, match: dict):
        slice_like = match['slice_like']
        const = slice_like.in_nodes()[0]
        crop_shape = slice_like.in_nodes()[1]

        variants_dict = {
            'mul_scalar1x': 0.1,
            'mul_scalar2x': 0.2,
            'mul_scalar1y': 0.1,
            'mul_scalar2y': 0.2
        }
        for matches in find_pattern_matches(graph,
                                            self.variants_pattern['nodes'],
                                            self.variants_pattern['edges'],
                                            None, None):
            for k, v in matches.items():
                if v in variants_dict.keys():
                    variants_dict[v] = Node(graph, k).in_nodes()[1].value[0]

        variants = mo_array([
            variants_dict['mul_scalar1x'], variants_dict['mul_scalar1y'],
            variants_dict['mul_scalar2x'], variants_dict['mul_scalar2y']
        ] * int(const.value.size / 4)).reshape(const.value.shape)
        priorbox_variants = Const(
            graph, dict(value=variants,
                        name=const.id + '/priorbox_variants')).create_node()
        variants_slice_like = SliceLike(
            graph,
            dict(axes=slice_like.axes,
                 name=slice_like.id + '/variants_slice_like')).create_node()
        variants_slice_like.in_port(0).connect(priorbox_variants.out_port(0))
        variants_slice_like.in_port(1).connect(crop_shape.out_port(0))

        concat = match['reshape3'].out_port(0).get_destination().node
        assert concat.op == 'Concat'
        concat_nodes_count = len(concat.in_nodes())
        concat.add_input_port(concat_nodes_count)
        concat.in_port(concat_nodes_count).get_connection().set_source(
            variants_slice_like.out_port(0))
Exemplo n.º 30
0
 def test_tile_infer_values_const_propagation(self):
     """
     Test for constant propagation even if tile with multiple tile indices is not supported
     """
     input_data = np.arange(-30, 60, 0.25).reshape([2, 4, 3, -1])
     tile_values = np.array([4, 3, 2, 5])
     graph = build_graph(
         nodes_attributes, edges, {
             'data': {
                 'shape': np.array(input_data.shape),
                 'value': input_data
             },
             'tile_values': {
                 'value': tile_values
             }
         })
     tile_node = Node(graph, 'tile')
     Tile.infer(tile_node)
     self.assertTrue(
         np.all(
             np.tile(input_data, tile_values) == graph.node['tile_out']
             ['value']))