示例#1
0
    def test_set_value_and_shape_with_force_shape_attribute_in_op(self):
        import numpy as np
        graph = build_graph(
            {
                **valued_const_with_data('const', np.array([1, 2, 3])),
                **result()
            }, [*connect('const', 'output')])

        node = Node(graph, 'const')
        node['force_shape'] = np.array([2, 5, 7], dtype=np.int64)
        node.out_port(0).data.set_value(np.zeros(35))
        self.assertTrue(
            np.array_equal(
                node.out_port(0).data.get_shape(),
                np.array([2, 5, 7], dtype=np.int64)),
            "node.out_port(0).data.get_shape()={} != [2, 5, 7]".format(
                node.out_port(0).data.get_shape()))
    def extend_inputs(node: Node, num_insertions: int):
        graph = node.graph
        node_name = node.soft_get('name', node.id)

        for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]:
            if i == 3 and not node.is_in_port_connected(3):
                continue  # no need to extend strides if they are not connected

            blank_values_arr = np.zeros(
                num_insertions) if input_name != 'strides' else np.ones(
                    num_insertions)
            blank_values_node = Const(
                graph, {
                    'name': node_name + '/extend_{}_const'.format(input_name),
                    'value': int64_array(blank_values_arr)
                }).create_node()

            if node.in_port(i).get_source().node.soft_get('type') == 'Concat':
                # concat already exists
                concat = node.in_port(i).get_source().node
                last_in_port = max(concat.in_ports().keys())
                assert not concat.in_port(last_in_port).disconnected(), 'The last in_port of Concat node {} ' \
                                                                        'should be connected'. \
                    format(concat.soft_get('name', node.id))

                concat.add_input_port(last_in_port + 1)
                concat.in_port(last_in_port + 1).connect(
                    blank_values_node.out_port(0))
            else:
                # have to create concat
                concat = Concat(
                    graph, {
                        'axis': 0,
                        'name': node_name + '/concat_{}'.format(input_name),
                        'in_ports_count': 2
                    }).create_node()
                node.in_port(i).get_connection().set_destination(
                    concat.in_port(0))
                concat.in_port(1).connect(blank_values_node.out_port(0))
                concat.out_port(0).get_connection().set_destination(
                    node.in_port(i))
示例#3
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        input_shape = node.in_port(0).data.get_shape()
        input_value = node.in_port(0).data.get_value()
        if input_shape is None:
            raise Error('Input shape for node "{}" is None'.format(node_name))

        assert len(node.in_nodes()) == 1, 'Wrong number of inputs to the layer {}'.format(node_name)

        if not node.has_valid('expand_axis'):
            raise Error('ExpandDims axis is not defined for node {}'.format(node_name))

        expand_axes = node.expand_axis
        if expand_axes is None:
            raise Error('The "expand_axis" attribute is None for node "{}"'.format(node_name))

        if isinstance(expand_axes, int):
            expand_axes = int64_array([expand_axes])
        elif expand_axes.ndim == 0:
            expand_axes = expand_axes.reshape([1])

        # expand_axis is a position where the new axis is placed so expand_dims works for negative axis in a different
        # way not as insert operation
        for expand_axis in expand_axes:
            if expand_axis < 0:
                expand_axis += len(input_shape) + 1

        expand_axes = sorted(expand_axes)
        output_shape = input_shape.copy()
        for expand_axis in expand_axes:
            output_shape = shape_insert(output_shape, expand_axis, 1)

        if input_value is not None and is_fully_defined(output_shape):
            node.out_port(0).data.set_value(input_value.reshape(output_shape))
        else:
            node.out_port(0).data.set_shape(output_shape)
示例#4
0
    def replace_op(self, graph: Graph, node: Node):
        out_node = Concat(graph, {
            'axis': node.axis,
            'in_ports_count': len(node.in_ports())
        }).create_node()
        pack_name = node.soft_get('name', node.id)

        for ind in node.in_ports():
            unsqueeze_node = create_op_with_const_inputs(
                graph, Unsqueeze, {1: int64_array([node.axis])},
                {'name': node.soft_get('name', node.id) + '/Unsqueeze'})
            node.in_port(ind).get_connection().set_destination(
                unsqueeze_node.in_port(0))
            unsqueeze_node.out_port(0).connect(out_node.in_port(ind))

        rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)])
        return [out_node.id]
示例#5
0
    def reverse_infer(node: Node):
        out_shape = node.out_port(0).data.get_shape()
        data_shape = node.in_port(0).data.get_shape()
        indices_shape = node.in_port(1).data.get_shape()
        batch_dims = node.batch_dims
        batch_dims = batch_dims + len(
            indices_shape) if batch_dims < 0 else batch_dims

        axis = node.in_port(2).data.get_value()
        # axis of Gather could be accepted as both scalar and 1D tensor
        if isinstance(axis, np.ndarray):
            axis = axis.item()
        assert axis is not None, 'axis input is undefined'

        # we can deduce data or indices partial shapes from output shape calculation formula
        # out_shape = Concat(data_shape[:axis], indices_shape[batch_dims:batch_dims + indices_rank], data_shape[axis + 1:])

        # data partial shape is unknown
        if out_shape is not None and data_shape is None and indices_shape is not None:
            out_rank = len(out_shape)
            indices_rank = len(indices_shape)

            deduced_data_shape = out_shape.tolist(dynamic_dimension_value)
            for i in range(indices_rank):
                deduced_data_shape.pop(axis)
            deduced_data_shape.insert(axis, dynamic_dimension_value)
            node.in_port(0).data.set_shape(shape_array(deduced_data_shape))

        # indices partial shape is unknown
        if out_shape is not None and indices_shape is None and data_shape is not None:
            out_rank = len(out_shape)
            data_rank = len(data_shape)
            indices_rank = out_rank + 1 - data_rank + batch_dims

            indices_shape = out_shape[axis:axis + indices_rank]
            node.in_port(1).data.set_shape(indices_shape)
示例#6
0
def grouped_convolutions_fusing(graph: Graph):
    while True:
        is_fused = False
        graph.clean_up()
        for node in graph.pseudo_topological_sort():
            if node.kind == 'op' and len(node.out_nodes()) > 1:
                if node.soft_get('can_be_fused') == False:
                    continue

                is_valid_convolutions = True
                last_layer = None

                next_nodes = get_next_operation(node)
                # Check that all operation after this one are Convolutions
                # and all convolutions has same output
                if len(next_nodes) > 1 and all(
                        _node.soft_get('type') in
                    ['Convolution', 'Deconvolution'] for _node in next_nodes):
                    for conv in next_nodes:
                        conv_outputs = get_next_operation(conv)
                        if conv.soft_get('can_be_fused') == False:
                            is_valid_convolutions = False
                        if len(conv_outputs) != 1:
                            is_valid_convolutions = False
                        if last_layer is None:
                            last_layer = conv_outputs[0].id
                        # TODO: this check is not working for V10 where Biases appears as separate operations
                        elif conv_outputs[0].id != last_layer:
                            is_valid_convolutions = False

                    if is_valid_convolutions:
                        is_fused = concat_convolutions(graph, node,
                                                       Node(graph, last_layer))
                        if is_fused:
                            break

        if not is_fused:
            break
示例#7
0
    def test_reconnect_middle_case2(self):
        graph = build_graph(nodes, [('input', 'input_data'),
                                    ('input_data', 'Op1', {
                                        'out': 0
                                    }), ('input_data', 'Op1', {
                                        'out': 1
                                    }), ('Op3', 'Op3_data')])
        input_node = Node(graph, 'input')

        input_node_out_port = input_node.out_port(0)
        self.assertTrue(
            input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input'])

        op3_node = Node(graph, 'Op3')
        input_node_out_port.get_connection().set_source(
            op3_node.out_port(0), "merge")

        self.assertTrue(input_node_out_port.get_tensor_names() == [])
        self.assertTrue(
            op3_node.out_port(0).get_tensor_names() ==
            ['Op1\\,Op2', 'Op3', 'input'])
示例#8
0
    def test_pooling_infer_with_dilations(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'pool'), ('pool', 'node_2'),
                               ('node_2', 'op_output')],
            {
                'node_2': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 256, 256])
                },
                'pool': {
                    'window': np.array([1, 1, 2, 2]),
                    'stride': np.array([1, 1, 2, 2]),
                    'pad': np.array([[0, 0], [0, 0], [0, 0], [1, 1]]),
                    'pad_spatial_shape': np.array([[0, 0], [1, 1]]),
                    'pool_method': 'max',
                    'exclude_pad': False,
                    'global_pool': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'kernel_spatial': np.array([2, 2]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'pooling_convention': 'full',
                    'dilation': np.array([1, 1, 2, 2]),
                    'auto_pad': 'valid'
                }
            })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = np.array([1, 3, 127, 127])
        res_shape = graph.node['node_2']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
示例#9
0
def get_dim_from_layout(node: Node, dim: str):
    """
    Gets index of dimension from layout specified for node.
    :param node: node to get dim for.
    :param dim: name of dimension to get index for.
    :return: tuple with index of the dimension and bool flag if the node has layout specified or no.
    """
    layout = None
    graph = node.graph
    if 'layout_values' in graph.graph['cmd_params'] and graph.graph[
            'cmd_params'].layout_values:
        layout_values = graph.graph['cmd_params'].layout_values.copy()
        if '' in layout_values:
            in_nodes = graph.get_op_nodes(op='Parameter')
            if len(in_nodes) == 1:
                in_node = in_nodes[0]
                layout_values[in_node.soft_get('name',
                                               in_node.id)] = layout_values['']
                del layout_values['']
        name = node.soft_get('name', node.id)
        if name in layout_values:
            if layout_values[name]['source_layout']:
                layout = layout_values[name]['source_layout']

    if layout:
        from openvino.runtime import Layout  # pylint: disable=no-name-in-module,import-error

        layout_parsed = Layout(layout)
        has_dim = layout_parsed.has_name(dim)
        if has_dim:
            idx = layout_parsed.get_index_by_name(dim)
            if idx < 0:
                idx = len(node.shape) + idx
            return idx, True
        else:
            return None, True
    else:
        return None, False
示例#10
0
    def test_deconv_infer_ideal(self):
        graph = build_graph(nodes_attributes,
                            [('conv_input', 'conv_node'),
                             ('conv_weights', 'conv_node'),
                             ('conv_node', 'conv_output'),
                             ('conv_output', 'op_output')
                             ],
                            {'conv_output': {'shape': None},
                             'conv_input': {'shape': np.array([1, 21, 16, 16])},
                             'conv_weights': {'shape': np.array([1, 21, 4, 4]),
                                              'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']},
                             'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                                           'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False,
                                           'batch_dims': np.array([0]),
                                           'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                                           'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None,
                                           'kernel_spatial_idx': np.array([2, 3]),
                                           'input_feature_channel': 1,
                                           'output_feature_channel': 0,
                                           'output_padding': np.array([0, 0, 1, 1]),
                                           'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]),
                                           'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None}
                             })

        deconv_node = Node(graph, 'conv_node')

        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        exp_shape = np.array([1, 21, 35, 35])

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])

        # Check that after double infer shape and pad attrs do not changes
        Convolution.infer(deconv_node)

        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
示例#11
0
    def test_component_map_loading_append(self):
        test_map = "input-node name=input dim=16 \n" + \
                   "component-node name=lda component=lda input=input \n" + \
                   "component-node name=tdnn1.affine component=tdnn1.affine input=Append(input, lda) \n" + \
                   "component-node name=tdnn1.relu component=tdnn1.relu input=Append(tdnn1.affine, input, lda) \n" + \
                   "\n"
        graph = Graph(name="test_graph_component_map_loading_append")

        test_top_map= load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph)

        ref_map = {b"lda": ["lda"],
                   b"tdnn1.affine": ["tdnn1.affine"],
                   b"tdnn1.relu": ["tdnn1.relu"]}
        self.assertEqual(test_top_map, ref_map)
        self.assertTrue("input" in graph.nodes())
        self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16])

        ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'},
                                 'lda': {'kind': 'op'},
                                 'tdnn1.affine': {'kind': 'op'},
                                 'tdnn1.relu': {'kind': 'op'},
                                 'append_input_lda': {'kind': 'op', 'op': 'Concat'},
                                 'append_affine_input_lda': {'kind': 'op', 'op': 'Concat'},
                                 },
                                [
                                    ('input', 'lda', {'out': 0}),
                                    ('lda', 'append_input_lda', {'in': 1, 'out': 0}),
                                    ('input', 'append_input_lda', {'in': 0, 'out': 1}),
                                    ('append_input_lda', 'tdnn1.affine', {'out': 0}),
                                    ('input', 'append_affine_input_lda', {'in': 1, 'out': 2}),
                                    ('lda', 'append_affine_input_lda', {'in': 2, 'out': 1}),
                                    ('tdnn1.affine', 'append_affine_input_lda', {'in': 0, 'out': 0}),
                                    ('append_affine_input_lda', 'tdnn1.relu', {'out': 0}),
                                ]
                                )

        (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu')
        self.assertTrue(flag, resp)
示例#12
0
    def infer(node: Node):
        StridedSlice.align_mask_with_slice_rank(
            node,
            node.in_port(1).data.get_shape()[0])

        data_shape = node.in_port(0).data.get_shape()
        data_value = node.in_port(0).data.get_value()
        slices = StridedSlice.get_slices(node, data_shape)

        if data_value is not None and dynamic_dimension_value not in slices and \
                all(not is_dynamic_slice(s) for s in slices):
            node.out_port(0).data.set_value(data_value[tuple(slices)])
        else:
            node.out_port(0).data.set_shape(
                get_shape_from_slice(data_shape, slices))

        node['slices'] = slices
        node['force_precision_in_ports'] = {
            port: 'int64'
            for port in range(1, len(node.in_nodes()))
        }
示例#13
0
    def test_variadic_split_value_inference_with_uint32(self):
        axis = int64_array(2)
        # because sum of Python int and Numpy np.uint64 gives float64

        # but np.split accepts only integers and raises error for floats
        # therefore needed to explicitly cast np.split arguments into integer
        # added this test for that case
        lengths = mo_array([2, 13, 10], dtype=np.uint64)
        input_shape = mo_array([2, 12, 25, 30])
        input_value = np.zeros(input_shape)

        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': input_shape,
                    'value': input_value
                },
                'split_axis_data': {
                    'value': axis
                },
                'split_lengths_data': {
                    'value': lengths
                },
                'split_op': {
                    'out_ports_count': 4
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        VariadicSplit.infer(node)

        ont_nodes_count = len(node.out_edges())
        self.assertTrue(ont_nodes_count == 3)
        for out in range(ont_nodes_count):
            self.assertTrue(
                np.all(
                    node.out_node(out).shape == int64_array(
                        [2, 12, lengths[out], 30])))
示例#14
0
 def test_spatial_attr_getter(self):
     input_shape = np.array([1, 125, 13, 13])
     params = {
         'kernel': np.array([1, 1, 1, 2]),
         'pad': np.array([1, 1, 3, 4]),
         'stride': np.array([1, 1, 2, 3]),
     }
     graph = build_graph(nodes_attributes,
                         [('input', 'pool_1'),
                          ('pool_1', 'output'),
                          ('output', 'op_output')
                          ],
                         {'input': {'shape': input_shape},
                          'pool_1': {**params, 'spatial_dims': [2, 3]},
                          'output': {'shape': None}})
     pool_1_node = Node(graph, 'pool_1')
     for param in params.keys():
         if type(params[param]) is np.ndarray:
             port_lambda = lambda x: x
             self.assertEqual(params[param][2],
                              spatial_attr_getter(pool_1_node, field=param, dim=0, post=port_lambda))
             self.assertEqual(params[param][3],
                              spatial_attr_getter(pool_1_node, field=param, dim=1, post=port_lambda))
示例#15
0
    def test_caffe_argmax_no_axis(self):
        graph = build_graph(
            nodes_attributes, [('op_input', 'node_1'), ('node_1', 'argmax'),
                               ('argmax', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 1025, 2049])
                },
                'argmax': {
                    'out_max_val': True,
                    'top_k': 100
                }
            })

        argmax_node = Node(graph, 'argmax')
        arg_ops_infer(argmax_node)
        exp_shape = np.array([1, 2, 100, 1])
        res_shape = graph.node['node_3']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
示例#16
0
def convert_graph_inputs_to_parameters(internal_graph, internal_graph_proto):
    # create Parameter nodes for the body graph
    body_parameters = []
    body_parameter_names = []
    for idx, pb_node in enumerate(internal_graph_proto['input_arg']):
        param_id = internal_graph.unique_id(pb_node.name)
        internal_graph.add_node(param_id,
                                name=param_id,
                                kind='op',
                                op='Parameter',
                                pb=None,
                                shape=None)
        parameter_node = Node(internal_graph, pb_node.name)
        Parameter.update_node_stat(
            parameter_node, {
                'data_type':
                tf_dtype_extractor(pb_node.type),
                'permute_attrs':
                PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')])
            })
        body_parameters.append(parameter_node)
        body_parameter_names.append(param_id)
    return body_parameters, body_parameter_names
示例#17
0
    def test_partial_infer1(self):
        graph = build_graph(nodes_attributes, edges1, inputs1)

        unique_node = Node(graph, 'unique_node')
        Unique.infer(unique_node)

        # prepare reference results
        ref_output_uniques_shape = int64_array([20])
        ref_output_indices_shape = int64_array([20])

        # get resulted shapes
        res_output_uniques_shape = graph.node['output_uniques']['shape']
        res_output_indices_shape = graph.node['output_indices']['shape']

        self.assertTrue(
            np.array_equal(ref_output_uniques_shape, res_output_uniques_shape),
            'shapes do not match expected: {} and given: {}'.format(
                ref_output_uniques_shape, res_output_uniques_shape))

        self.assertTrue(
            np.array_equal(ref_output_indices_shape, res_output_indices_shape),
            'shapes do not match expected: {} and given: {}'.format(
                ref_output_indices_shape, res_output_indices_shape))
示例#18
0
    def replace_op(self, graph: Graph, node: Node):
        ss_node = create_op_with_const_inputs(graph, Split, {1: int64_array(1)}, {'name': 'Split_eltwise_' + node.name,
                                                                                  'num_splits': node['num_inputs']})

        inp = node.get_inputs()
        in_node = inp[0][0]
        edge_attrs = inp[0][1]
        graph.add_edge(in_node, ss_node.id, **edge_attrs)
        if ss_node.num_splits == 2:
            if node['operation'] == 'mul':
                eltwise_node = Mul(graph, attrs={'name': 'Eltwise_' + node.name}).create_node()
            elif node['operation'] == 'sum':
                eltwise_node = Add(graph, attrs={'name': 'Eltwise_' + node.name}).create_node()
            else:
                raise Error('Error on replacing Kaldi eltwise: unknown type ' + node['operation'])
        elif ss_node.num_splits > 2:
            eltwise_node = EltwiseN(graph, attrs={'name': 'Eltwise_' + node.name,
                                                  'operation': node['operation']}).create_node()
        else:
            raise Error('Error on replacing Kaldi eltwise')
        for i in range(ss_node.num_splits):
            ss_node.out_port(i).get_connection().set_destination(eltwise_node.in_port(i))
        return [eltwise_node.id]
示例#19
0
def strided_slice(op_node: Node, port_info: str, input_port: int):
    """
    StridedSLice must be permuted even if input or output tensors have rank lesser than 4
    e.g. input_shape = (1, 10, 10), out = input[:, 0:10, :, new_axis], input_rank < 4
    input_shape = (1, 10, 10, 3), out = input[:, 0:5, 0:4, 0], output_rank < 4
    in both examples slice_rank is >= 4
    slice_rank is defined by length of begin, end, strides (they all are of the same length)
    """
    permutation_data_node = get_node_with_permutation(op_node, port_info)
    assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \
                                                             'port_info "{}".'.format(permutation_data_node.id,
                                                                                      op_node.id, port_info)
    permute_indices_for_gather = permutation_data_node.permutation.perm
    if len(permute_indices_for_gather) == 0:
        return
    from openvino.tools.mo.ops.op import PermuteAttrs

    slice_rank = op_node.in_port(input_port).data.get_shape()[
        0]  # length of begin, end or strides
    permute_indices_for_gather = PermuteAttrs.get_nhwc_to_nchw_permutation(
        slice_rank).perm
    reorder_inputs_for_shape_or_slice(op_node, input_port,
                                      permute_indices_for_gather)
示例#20
0
    def test_concat_infer_no_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'concat'), ('node_2', 'concat'),
                               ('concat', 'node_3'), ('node_3', 'op_output')],
            {
                'node_3': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 227, 227])
                },
                'node_2': {
                    'shape': None
                },
                'concat': {
                    'axis': 2
                }
            })

        concat_node = Node(graph, 'concat')
        with self.assertRaisesRegex(
                Error, "One of the input shapes is not defined for node *"):
            concat_infer(concat_node)
    def replace_op(self, graph: Graph, node: Node):
        name = node.soft_get('name', node.id)

        # create range of axes for MVN based on `start_axis` and rank of input
        rank = Rank(graph, {'name': name + '/Rank'}).create_node()
        rng = create_op_with_const_inputs(graph, Range, {
            0: int64_array(2),
            2: int64_array(1)
        }, {
            'name': name + '/Range',
            'output_type': np.int64
        })
        mvn = MVN(
            graph, {
                'eps': node.epsilon,
                'eps_mode': 'inside_sqrt',
                'normalize_variance': 1,
                'name': name + '/Ins_Norm/MVN_',
            }).create_node()
        node.in_port(0).get_connection().set_destination(mvn.in_port(0))
        rng.out_port(0).connect(mvn.in_port(1))
        mul = Mul(graph, {
            'axis': 1,
            'name': name + '/Ins_Norm/mul_'
        }).create_node()
        mvn.out_port(0).connect(mul.in_port(0))
        node.in_port(1).get_connection().set_destination(mul.in_port(1))
        add = Add(graph, {
            'axis': 1,
            'name': name + '/Ins_Norm/add_'
        }).create_node()
        mul.out_port(0).connect(add.in_port(0))
        node.in_port(2).get_connection().set_destination(add.in_port(1))

        mvn.in_port(0).get_connection().add_destination(rank.in_port(0))
        rng.in_port(1).connect(rank.out_port(0))

        rename_nodes([(node, name + '/TBD'), (add, name)])

        return [add.id]
示例#22
0
def determined_sort(outputs: list):
    op_order = []
    data_order = []
    stack = list(outputs)
    visited = set()
    while len(stack) != 0:
        node = stack.pop(0)
        node_id = node.id
        visited.add(node_id)
        has_child = False
        in_names = [n for n, d in node.get_inputs()]
        for in_node_name in in_names:
            if in_node_name not in visited:
                stack.insert(0, node)
                stack.insert(0, Node(node.graph, in_node_name))
                has_child = True
                break
        if not has_child:
            if node.kind == 'op':
                op_order.append(node_id)
            if node.kind == 'data':
                data_order.append(node_id)
    return op_order, data_order
示例#23
0
    def test_proposal_infer_two_outputs(self):
        graph = build_graph(
            nodes_attributes, [('proposal_input', 'proposal'),
                               ('proposal', 'proposal_out_data_1'),
                               ('proposal', 'proposal_out_data_2'),
                               ('proposal_out_data_1', 'op_output'),
                               ('proposal_out_data_2', 'op_output')], {
                                   'proposal_input': {
                                       'shape': int64_array([1, 3, 227, 227])
                                   },
                                   'proposal': {
                                       'post_nms_topn': 2,
                                       **layout_attrs()
                                   }
                               })

        proposal_node = Node(graph, 'proposal')
        ProposalOp.proposal_infer(proposal_node)

        self.assertListEqual(list([1 * 2, 5]),
                             list(graph.node['proposal_out_data_1']['shape']))
        self.assertListEqual(list([1 * 2]),
                             list(graph.node['proposal_out_data_2']['shape']))
示例#24
0
    def test_backward_bfs_for_op_no_ops_detected(self):
        nodes = {
            **regular_op('input', {'op': 'Parameter'}),
            **regular_op('hsigmoid', {'op': 'HSigmoid'}),
            **result('result'),
        }
        edges = [
            ('input', 'hsigmoid', {
                'out': 0,
                'in': 0
            }),
            ('hsigmoid', 'result', {
                'out': 0,
                'in': 0
            }),
        ]

        graph = build_graph_with_edge_attrs(nodes, edges)
        graph.stage = 'front'

        found_nodes = backward_bfs_for_operation(Node(graph, 'result'),
                                                 ['NonExistingOp'])
        self.assertEqual(len(found_nodes), 0)
示例#25
0
    def test_onnx_resize11_using_sizes(self, input_shape, output_shape, sizes,
                                       scales):
        np_scales = np.array(scales)
        np_sizes = int64_array(sizes)
        graph = build_graph(nodes_attrs=graph_node_attrs_sizes,
                            edges=graph_edges_sizes,
                            update_attributes={
                                'input_data': {
                                    'shape': int64_array(input_shape)
                                },
                                'scales': {
                                    'shape': int64_array(np_scales.shape),
                                    'value': np_scales
                                },
                                'scales_data': {
                                    'shape': int64_array(np_scales.shape),
                                    'value': np_scales
                                },
                                'sizes': {
                                    'shape': int64_array(np_sizes.shape),
                                    'value': np_sizes
                                },
                                'sizes_data': {
                                    'shape': int64_array(np_sizes.shape),
                                    'value': np_sizes
                                },
                            })
        node = Node(graph, 'onnx_resize11')
        ONNXResize11Op.onnx_resize_infer(node)

        msg = "ONNXResize11 infer failed for case: sizes={}, scales={}, expected_shape={}, actual_shape={}"

        self.assertTrue(
            np.array_equal(graph.node['onnx_resize11_data']['shape'],
                           int64_array(output_shape)),
            msg.format(sizes, scales, output_shape,
                       graph.node['onnx_resize11_data']['shape']))
示例#26
0
def insert_ExperimentalDetectronROIFeatureExtractor1(graph: Graph, replacement_descriptions: dict):
    if 'ROIFeatureExtractor1_output' not in replacement_descriptions:
        # In case of Faster-RCNN this transformation is not needed and this attribute shouldn't be set
        return
    input_fpn_heads = replacement_descriptions['input_fpn_heads']
    old_output_node = Node(graph, replacement_descriptions['ROIFeatureExtractor1_output'])
    input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads]
    fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': 'ROIFeatureExtractor_1',
                                                                     'output_size': 14,
                                                                     'pyramid_scales': int64_array(
                                                                         [4, 8, 16, 32, 64]),
                                                                     'sampling_ratio': 2,
                                                                     'in_ports_count': 5}).create_node()
    fpn_roi_align.in_port(0).connect(Node(graph, 'DetectionOutput').out_port(0))
    for ind, fpn_node in enumerate(input_fpn_head_nodes):
        fpn_roi_align.in_port(ind + 1).connect(fpn_node.out_port(0))

    old_output_node.out_port(0).get_connection().set_source(fpn_roi_align.out_port(0))
示例#27
0
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [
            port for port in node.in_ports().values()
            if not port.disconnected()
        ]
        assert len(connected_in_ports) == 3, \
            "Incorrect number of inputs for {} node".format(node_name)

        # check shapes of input tensors
        keys_shape = node.in_port(1).data.get_shape()
        values_shape = node.in_port(2).data.get_shape()
        assert np.array_equal(keys_shape, values_shape), \
            'Shapes of tensors with keys and values must be equal for {} node'.format(node_name)

        # set output shape that must be empty
        # since output is not a tensor
        node.out_port(0).data.set_shape([])
示例#28
0
def infer_for_opset1(node: Node):
    assert len([p for p in node.in_ports().values() if not p.disconnected()]) == 2
    assert node.has_valid('mode')
    assert node.has_valid('axes')

    src_shape = node.in_port(0).data.get_shape()

    assert src_shape is not None
    dst_shape = node.in_port(1).data.get_value()
    assert dst_shape is not None

    output_shape = src_shape.copy()
    for ind, axis in enumerate(node.axes):
        output_shape[axis] = dst_shape[ind]

    node.out_port(0).data.set_shape(output_shape)

    PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')])
示例#29
0
    def test_split_shape_infer(self):
        #  test configuration
        input_shape = [2, 10]
        input_value = None
        axis = 1
        num_splits = 2
        output_shape = [2, 5]
        output_value = [None, None]

        # action
        graph = build_graph(self.nodes, self.edges,
                            {
                                'split_input_data': {'shape': int64_array(input_shape),
                                                     'value': input_value},
                                'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},
                            }
                            )

        split_op = Node(graph, 'split_op')
        AttributedSplit.infer(split_op)

        # reference
        graph_ref = build_graph(self.nodes, self.edges,
                                {
                                    'split_input_data': {'shape': int64_array(input_shape),
                                                         'value': input_value},
                                    'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},
                                    'split_output_0_data': {'shape': int64_array(output_shape),
                                                            'value': output_value[0]},
                                    'split_output_1_data': {'shape': int64_array(output_shape),
                                                            'value': output_value[1]},
                                }
                                )

        # check
        (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')
        self.assertTrue(flag, resp)
示例#30
0
    def test_split_value_infer(self):
        #  test configuration
        input_shape = [2, 10]
        input_value = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]]
        axis = 1
        num_splits = 2
        output_shape = [2, 5]
        output_value = [[[0, 1, 2, 3, 4], [10, 11, 12, 13, 14]], [[5, 6, 7, 8, 9], [15, 16, 17, 18, 19]]]

        # action
        graph = build_graph(self.nodes, self.edges,
                            {
                                'split_input_data': {'shape': int64_array(input_shape),
                                                     'value': int64_array(input_value)},
                                'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},
                            }
                            )

        split_op = Node(graph, 'split_op')
        AttributedSplit.infer(split_op)

        # reference
        graph_ref = build_graph(self.nodes, self.edges,
                                {
                                    'split_input_data': {'shape': int64_array(input_shape),
                                                         'value': int64_array(input_value)},
                                    'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)},
                                    'split_output_0_data': {'shape': int64_array(output_shape),
                                                            'value': int64_array(output_value[0])},
                                    'split_output_1_data': {'shape': int64_array(output_shape),
                                                            'value': int64_array(output_value[1])},
                                }
                                )

        # check
        (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')
        self.assertTrue(flag, resp)