コード例 #1
0
ファイル: pooling_test.py プロジェクト: pavel-esir/openvino
    def test_pooling_dynamic_infer(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'pool'),
                             ('pool', 'node_2'),
                             ('node_2', 'op_output')
                             ],
                            {'node_2': {'shape': None},
                             'node_1': {'shape': shape_array([1, dynamic_dimension_value, dynamic_dimension_value,
                                                              256])},
                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]),
                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
                                      'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
                                      'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False,
                                      'output_spatial_shape': None, 'output_shape': None,
                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
                                      'pooling_convention': 'full'}
                             })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = shape_array([1, dynamic_dimension_value, dynamic_dimension_value, 131])
        res_shape = graph.node['node_2']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
コード例 #2
0
def resolve_convolution_with_group(node: Node, group: int, ir_version: str):
    input_shape = node.in_port(0).data.get_shape()
    assert len(input_shape) in [3, 4, 5]

    weights_shape = node.in_port(1).data.get_shape()
    assert weights_shape is not None
    assert len(weights_shape) in [3, 4, 5]
    assert weights_shape[0] % group == 0

    if ir_version == 'V7':
        if weights_shape[0] == node.output:
            # weights are already is in [G*O I X Y] format
            return
        new_shape = shape_array([node.output, -1, *weights_shape[2:]])
    elif ir_version == 'V10':
        # TODO rewrite this transformation to generate a shape-computing sub-graph. Ticket 62076
        I = input_shape[1]
        new_shape = shape_array(
            [group, node.output // group, I // group, *weights_shape[2:]])
        assert is_fully_defined(weights_shape[2:]) and is_fully_defined(I) and \
               np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \
                                                             ''.format(weights_shape, new_shape)
        del node['group']
        node['type'] = 'GroupConvolution'
    else:
        raise Error("Unknown IR version: {}".format(ir_version))

    reshape = create_op_node_with_second_input(node.graph, Reshape,
                                               int64_array(new_shape),
                                               {'override_output_shape': True})

    node.in_port(1).get_connection().insert_node(reshape)
コード例 #3
0
 def test_reshape_infer(self, input_value, input_shape, output_shape,
                        ref_value, ref_shape):
     graph = build_graph(
         nodes_attributes, [('input', 'data'), ('data', 'reshape'),
                            ('output_shape', 'output_shape_data'),
                            ('output_shape_data', 'reshape'),
                            ('reshape', 'reshape_out')], {
                                'data': {
                                    'shape': input_shape,
                                    'value': input_value
                                },
                                'output_shape': {
                                    'value': output_shape,
                                    'shape': output_shape.shape
                                },
                                'output_shape_data': {
                                    'value': output_shape,
                                    'shape': output_shape.shape
                                },
                            })
     node = Node(graph, 'reshape')
     Reshape.infer(node)
     if ref_value is not None:
         self.assertTrue(
             strict_compare_tensors(
                 node.out_port(0).data.get_value(), shape_array(ref_value)))
     self.assertTrue(
         strict_compare_tensors(
             node.out_port(0).data.get_shape(), shape_array(ref_shape)))
コード例 #4
0
    def array_infer(node: Node):
        size = node.in_node(0)
        assert size.value is not None

        # 0 port: handle
        if 0 in node.out_nodes().keys():
            if node.has_valid('element_shape'):
                element_shape = node['element_shape']
            else:
                element_shape = None

            out_node = node.out_node(0).id
            output_value = node.out_node(0).id
            node.graph.node[out_node]['value'] = np.array(output_value)

            output_shape = node.graph.node[out_node]['value'].shape
            node.graph.node[out_node]['shape'] = shape_array(output_shape)

            node.graph.node[out_node]['element_shape'] = shape_array(
                element_shape)
            node.graph.node[out_node]['size'] = size.value
        # 1 port flow
        if 1 in node.out_nodes().keys():
            output_value = None

            out_node = node.out_node(1).id
            node.graph.node[out_node][
                'value'] = None if output_value is None else np.array(
                    output_value)
            node.graph.node[out_node]['shape'] = shape_array(output_shape)
コード例 #5
0
ファイル: regionyolo_test.py プロジェクト: yding10/openvino
 def test_region_infer_dynamic_flatten(self):
     graph = build_graph(
         nodes_attributes, [('node_1', 'region'), ('region', 'node_3'),
                            ('node_3', 'op_output')],
         {
             'node_3': {
                 'shape': None,
                 'value': None
             },
             'node_1': {
                 'shape': shape_array(
                     [1, dynamic_dimension_value, 227, 227])
             },
             'region': {
                 'end_axis': 1,
                 'axis': 0,
                 'do_softmax': 1,
                 **layout_attrs()
             }
         })
     graph.graph['layout'] = 'NCHW'
     reorg_node = Node(graph, 'region')
     RegionYoloOp.regionyolo_infer(reorg_node)
     exp_shape = shape_array([dynamic_dimension_value, 227, 227])
     res_shape = graph.node['node_3']['shape']
     self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
コード例 #6
0
ファイル: einsum.py プロジェクト: yding10/openvino
    def infer(node: Node):
        node_name = node.soft_get('name', node.id)
        connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
        num_inputs = len(connected_in_ports)
        assert node.has_valid('equation'), "Einsum node {} must contain `equation` attribute".format(node_name)
        equation = node.equation

        # parse the equation and extract input and output subscripts
        input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation)

        # check that each operand has the corresponding input subscript
        assert len(input_subscripts) == num_inputs, "The number of input operands of Einsum node {} " \
                                                    "must match the number of input subscripts " \
                                                    "in `equation`".format(node_name)

        # check compatibility of dimension sizes with the same label and generate a dictionary of shapes for labels
        label_to_shape = {}
        for input_ind in range(num_inputs):
            input_shape = node.in_port(input_ind).data.get_shape()
            input_subscript = input_subscripts[input_ind]
            labels = Einsum.extract_subscript_labels(node_name, input_subscript)
            num_dims = len(input_shape)
            num_labels = len(labels)
            num_broadcasted_dims = num_dims - num_labels + 1
            dim_ind = 0
            label_ind = 0
            while label_ind < num_labels and dim_ind < num_dims:
                label = labels[label_ind]
                if label == "...":
                    sub_shape = input_shape[dim_ind:dim_ind + num_broadcasted_dims]
                    if label in label_to_shape.keys():
                        common_shape = bi_directional_shape_broadcasting(sub_shape, label_to_shape[label])
                        assert common_shape is not None, "The dimensions labeled of ellipsis must be broadcastable " \
                                                         "for Einsum node {}".format(node_name)
                        label_to_shape[label] = common_shape
                    else:
                        label_to_shape[label] = sub_shape
                    dim_ind += num_broadcasted_dims
                else:
                    dim_size = input_shape[dim_ind]
                    sub_shape = shape_array([dim_size])
                    assert label not in label_to_shape.keys() or np.array_equal(label_to_shape[label], sub_shape), \
                        "Sizes of dimensions with the same label of Einsum node {} " \
                        "must be compatible".format(node_name)
                    label_to_shape[label] = sub_shape
                    dim_ind += 1
                label_ind += 1

        # generate output shape based on the output subscript
        output_shape = shape_array([])
        labels = Einsum.extract_subscript_labels(node_name, output_subscript)
        for label in labels:
            assert label in label_to_shape.keys(), "The label in the output subscript must appear" \
                                                   " in input subscripts in equation {} " \
                                                   "of Einsum node {}".format(equation, node_name)
            output_shape = np.ma.concatenate((output_shape, label_to_shape[label]))

        node.out_port(0).data.set_shape(output_shape)
コード例 #7
0
ファイル: lstm_sequence.py プロジェクト: yding10/openvino
    def infer(node: Node):
        # there are limitations coming from ONNX LSTM definition and normalization rules
        assert len(node.in_nodes()) >= 3  # X, W and R
        assert len(node.in_nodes()) <= 7
        assert len(node.out_nodes()) <= 3
        assert node.batch_dim <= 1
        assert node.sequence_dim <= 1
        assert node.batch_dim != node.sequence_dim

        assert node.direction in ['forward', 'reverse', 'bidirectional']

        if node.blobs_wrb:
            mark_input_bins(node, ['W', 'R', 'B'])
        else:
            mark_input_bins(node)
        input_shape = node.in_node(0).shape
        assert len(input_shape) == 3

        for port in [2, 3]:
            if port in node.in_nodes() and len(node.in_node(port).in_nodes()) > 0 and \
               'zero_shapes' in node.in_node(port).in_node():
                for i in node.in_node(port).in_node().zero_shapes:
                    if node.in_node(port).shape[i] != input_shape[i]:
                        node.in_node(port).value = np.repeat(
                            node.in_node(port).value, input_shape[i], axis=i)
                        node.in_node(port).shape[i] = input_shape[i]

        out_shape = shape_array([
            input_shape[node.sequence_dim], input_shape[node.batch_dim],
            node.hidden_size
        ])
        assert not node.has_num_directions or node.sequence_dim == 0, \
            'If has_num_directions == True, then node.sequence_dim should be equal 0, but it is {}'.format(
                node.sequence_dim)
        num_directions = 2 if node.direction in ['bidirectional'] else 1
        num_layers = node.num_layers
        if node.has_num_directions:
            # insert extra dimension to output shape for num_directions
            out_shape = shape_insert(out_shape, 1, np.int64(num_directions))
        node.out_node(0).shape = out_shape
        # extra outputs for hidden/cell states
        state_size = shape_array([input_shape[1], node.hidden_size])
        if node.has_num_directions:
            state_size = shape_insert(state_size, 0,
                                      num_directions * num_layers)
        for i in [1, 2]:
            if i not in node.out_nodes():
                data_node = Op._create_data_node(node.graph,
                                                 name=node.node +
                                                 '/ExtraOutput/' + str(i),
                                                 attrs={'executable': True})
                node.graph.add_edge(node.id, data_node.id, key=0, out=i)
                add_opoutput(node.graph, data_node.id, 0, False)
            else:
                data_node = node.out_node(i)
            data_node.shape = state_size.copy()
コード例 #8
0
 def test_tf_space_to_depth_infer_nchw_dynamic(self):
     graph = build_graph(nodes, edges)
     graph.graph['layout'] = 'NCHW'
     graph.node['in_data_node']['shape'] = shape_array(
         [1, 64, dynamic_dimension_value, 1152])
     std_node = Node(graph, 'StD')
     SpaceToDepth.infer(std_node)
     exp_shape = shape_array([1, 256, dynamic_dimension_value, 576])
     res_shape = graph.node['out_data_node']['shape']
     self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
コード例 #9
0
    def shape_alignment(node: Node):
        """
        Specification of MatMul operation allows inputs to be aligned together before matrix multiplication.
        Current method raises an error if input shapes are not valid at any step of alignment process
        :return: aligned copies of both input shapes
        """
        node_name = node.soft_get('name', str(node.id))
        input_shapes = [node.in_port(i).data.get_shape() for i in range(2)]
        transpose_a = node.has_and_set('transpose_a')
        transpose_b = node.has_and_set('transpose_b')

        transformed_shapes = []
        for i, shape in enumerate(input_shapes):
            input_shape = shape.copy()
            # prerequisites check
            assert input_shape is not None, "MatMul has shape=`None` for {} input of `{}` node".format(
                i, node_name)
            assert input_shape.ndim == 1, "MatMul doesn't support scalar inputs. {} input of `{}` node has shape {}" \
                                          "".format(i, node_name, input_shape)
            assert input_shape.size >= 1, "MatMul doesn't support inputs with rank lower than 1. {} input of `{}` " \
                                          "node has shape {}".format(i, node_name, input_shape)
            rank = input_shape.size
            # shape alignment
            if rank != 1 and ((i == 0 and transpose_a) or
                              (i == 1 and transpose_b)):
                input_shape[-2], input_shape[-1] = input_shape[
                    -1], input_shape[-2]
            if rank == 1:
                input_shape = shape_insert(input_shape, int(i == 1), 1)

            max_shape_length = max(input_shapes[0].size, input_shapes[1].size)
            input_shape = shape_insert(input_shape, 0, [1] *
                                       (max_shape_length - input_shape.size))
            transformed_shapes.append(input_shape)

        A_shape = shape_array(transformed_shapes[0])
        B_shape = shape_array(transformed_shapes[1])

        assert A_shape.size == B_shape.size, \
            "Shapes were not aligned by length for MatMul `{}`. Shapes: `{}`".format(node_name, transformed_shapes)

        # batch broadcasting
        batch_len = A_shape.size - 2
        for i in range(batch_len):
            if A_shape[i] != B_shape[i]:
                if A_shape[i] == 1:
                    A_shape[i] = B_shape[i]
                if B_shape[i] == 1:
                    B_shape[i] = A_shape[i]

        assert compatible_shapes(A_shape[:-2], B_shape[:-2]), \
            "MatMul input shapes are incorrect. BATCH_DIMs are not equal. Node: {}. Aligned shapes: {}" \
            "".format(node_name, transformed_shapes)

        return A_shape, B_shape
コード例 #10
0
    def test_expand_dims_infer(self, axis, ref_out_shape):
        graph = build_graph(nodes_attributes,
                            [('data_1', 'expand_dims'),
                             ('expand_dims', 'data_2')],
                            {'expand_dims': {'expand_axis': axis}})
        Node(graph, 'data_1').shape = shape_array([2, 3, dynamic_dimension_value, 224])
        expand_dims_node = Node(graph, 'expand_dims')

        ExpandDims.infer(expand_dims_node)

        self.assertTrue(strict_compare_tensors(expand_dims_node.out_node().shape, shape_array(ref_out_shape)))
コード例 #11
0
 def test_select_infer_no_broadcast_dynamic_shapes(self):
     flag, msg = self.build_select_graph_and_infer(
         condition_value=None,
         condition_shape=shape_array([100, 100]),
         then_value=None,
         then_shape=shape_array([100, dynamic_dimension_value]),
         else_value=None,
         else_shape=shape_array([dynamic_dimension_value, 100]),
         out_value=None,
         out_shape=shape_array([100, 100]),
         auto_broadcast='none')
     self.assertTrue(flag, msg)
コード例 #12
0
ファイル: split_test.py プロジェクト: yding10/openvino
    def test_split_dynamic_shape_infer(self):
        #  test configuration
        input_shape = [2, dynamic_dimension_value]
        input_value = None
        axis = 1
        num_splits = 2
        output_shape = [2, dynamic_dimension_value]
        output_value = [None, None]

        # action
        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': shape_array(input_shape),
                    'value': input_value
                },
                'split_op': {
                    'axis': np.array(axis),
                    'num_splits': np.array(num_splits)
                },
            })

        split_op = Node(graph, 'split_op')
        AttributedSplit.infer(split_op)

        # reference
        graph_ref = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': shape_array(input_shape),
                    'value': input_value
                },
                'split_op': {
                    'axis': np.array(axis),
                    'num_splits': np.array(num_splits)
                },
                'split_output_0_data': {
                    'shape': shape_array(output_shape),
                    'value': output_value[0]
                },
                'split_output_1_data': {
                    'shape': shape_array(output_shape),
                    'value': output_value[1]
                },
            })

        # check
        (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data')
        self.assertTrue(flag, resp)
        self.assertTrue(
            strict_compare_tensors(
                Node(graph, 'split_output_0_data').shape,
                shape_array(output_shape)))
コード例 #13
0
    def test_do_infer_without_top_k_dynamic_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'detection_output_1'),
                               ('node_2', 'detection_output_1'),
                               ('node_3', 'detection_output_1'),
                               ('detection_output_1', 'node_4')],
            {
                'node_1': {
                    'shape': np.array([1, 34928])
                },
                'node_2': {
                    'shape': shape_array([dynamic_dimension_value, 183372])
                },
                'node_3': {
                    'shape': np.array([1, 2, 34928])
                },
                'detection_output_1': {
                    "background_label_id": "0",
                    "clip": "1",
                    "code_type": "caffe.PriorBoxParameter.CENTER_SIZE",
                    "confidence_threshold": "0.01",
                    "keep_top_k": -1,
                    "nms_threshold": "0.5",
                    "num_classes": 21,
                    "share_location": "1",
                    "top_k": -1,
                    "variance_encoded_in_target": "0"
                },
                'node_4': {
                    'shape': np.array([1, 1, 69856, 7])
                },
            })

        multi_box_detection_node = Node(graph, 'detection_output_1')

        multi_box_detection_infer(multi_box_detection_node)
        exp_shape = shape_array([1, 1, dynamic_dimension_value, 7])
        res_shape = graph.node['node_4']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))

        self.assertEqual(multi_box_detection_node.background_label_id, '0')
        self.assertEqual(multi_box_detection_node.clip, '1')
        self.assertEqual(multi_box_detection_node.code_type,
                         'caffe.PriorBoxParameter.CENTER_SIZE')
        self.assertEqual(multi_box_detection_node.confidence_threshold, '0.01')
        self.assertEqual(multi_box_detection_node.keep_top_k, 8732)
        self.assertEqual(multi_box_detection_node.nms_threshold, '0.5')
        self.assertEqual(multi_box_detection_node.num_classes, 21)
        self.assertEqual(multi_box_detection_node.share_location, '1')
        self.assertEqual(multi_box_detection_node.top_k, -1)
        self.assertEqual(multi_box_detection_node.variance_encoded_in_target,
                         '0')
コード例 #14
0
ファイル: convolution_test.py プロジェクト: yding10/openvino
    def test_deconv_dynamic_infer_ideal(self):
        graph = build_graph(
            nodes_attributes,
            [('conv_input', 'conv_node'), ('conv_weights', 'conv_node'),
             ('conv_node', 'conv_output'), ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': shape_array([1, 21, dynamic_dimension_value, 16])
                },
                'conv_weights': {
                    'shape':
                    np.array([1, 21, 4, 4]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node':
                {  #'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]),
                    'channel_dims': np.array([1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'batch_dims': np.array([0]),
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'kernel_spatial': np.array([4, 4]),
                    'output_spatial_shape': None,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output_padding': np.array([0, 0, 1, 1]),
                    'type': 'Deconvolution',
                    'output': 21,
                    'dilation': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'stride': np.array([1, 1, 2, 2]),
                    'output_shape': None
                }
            })

        deconv_node = Node(graph, 'conv_node')

        Convolution.infer(deconv_node)
        res_shape = deconv_node['output_shape']
        exp_shape = shape_array([1, 21, dynamic_dimension_value, 35])

        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))

        # Check that after double infer shape and pad attrs do not changes
        Convolution.infer(deconv_node)

        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
コード例 #15
0
 def test_select_infer_tf_condition(self):
     flag, msg = self.build_select_graph_and_infer(
         condition_value=None,
         condition_shape=shape_array([100]),
         then_value=None,
         then_shape=shape_array([100, 20]),
         else_value=None,
         else_shape=shape_array([100, 20]),
         out_value=None,
         out_shape=shape_array([100, 20]),
         auto_broadcast='numpy',
         fw_format='tf')
     self.assertTrue(flag, msg)
コード例 #16
0
    def test_nms_infer_i64_opset5_2_outs(self):
        nms_node = Node(self.graph_nms_5_2_outs, 'nms')
        nms_node['version'] = 'opset5'
        nms_node['output_type'] = np.int64
        NonMaxSuppression.infer(nms_node)
        NonMaxSuppression.type_infer(nms_node)

        self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(),
                                       shape_array([dynamic_dimension_value, 3])))
        self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(),
                                       shape_array([dynamic_dimension_value, 3])))
        self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64)
        self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32)
コード例 #17
0
    def permute_data_nodes_attrs(graph: Graph):
        # Iterate over all data nodes and apply permutation if exists
        for node in graph.get_data_nodes():
            if not node.has_valid('permutation') or \
                    all([attrs.get('input_permutation', False) for u, v, attrs in graph.out_edges(node.id, data=True)]):
                continue

            if len(
                    node.in_nodes()
            ) != 0:  # there are data nodes without input operation node inside the TensorIterator
                edge_attrs = graph.get_edge_data(node.in_node(0).id,
                                                 node.id)[0]
                if is_output_data_in_correct_layout(node.in_node(0),
                                                    edge_attrs['out']):
                    log.debug(
                        'Do not permute data node attrs for node "{}" output port "{}"'
                        .format(node.in_node(0).id, edge_attrs['out']))
                    continue

            # Apply permutation for shape and value if exists
            if len(node.permutation.perm) == 0:
                continue
            node.shape = shape_array(node.shape)[node.permutation.perm]
            if node.has_valid('value'):
                assert len(node.value.shape) == len(node.permutation.perm), \
                    'Node {} has shape {} and permutation {} that does not match. Their lengths should be equal' \
                    ''.format(node.name, node.value.shape, node.permutation.perm)
                node.value = np.array(
                    node.value.transpose(node.permutation.perm))
コード例 #18
0
ファイル: loop.py プロジェクト: yding10/openvino
    def updated_body_parameters_shape(loop_node: Node):
        """
        Update shape for Loop body parameters.
        The input shape of the "current_iteration" number input is handled separately because it is not connected with
        the Loop operation.

        :param loop_node: The Loop node
        :return: None
        """
        for record in loop_node.input_port_map:
            body_node = Loop.get_body_node_by_internal_id(
                loop_node, record['internal_layer_id'])
            # the Parameter may be removed because it was not used in the body, for example, the current iteration
            # number input
            if body_node is not None:
                assert body_node.soft_get('type') == 'Parameter'

                input_shape = shape_array(
                    [])  # this is a current iteration number input shape
                loop_port_idx = record['external_port_id']
                if loop_port_idx != -1:
                    input_shape = loop_node.in_port(
                        loop_port_idx).get_connection().get_source(
                        ).data.get_shape()
                slice_axis = record['axis']
                body_node.shape = input_shape.copy()
                if slice_axis is not None:
                    body_node.shape[slice_axis] = 1
                log.debug(
                    'Updated shape for the body node with internal_id "{}" with value {}'
                    ''.format(record['internal_layer_id'], body_node.shape))
コード例 #19
0
    def cover_body_input_data_nodes_with_parameter_ops(ti: Node):
        body = ti.body

        op_port_map = []
        for record in ti.input_port_map:
            operation_node = get_internal_node_by_layer_id(
                ti, record['internal_layer_id'])
            real_in_port = TensorIterator.special_port_to_real_port(
                operation_node, copy(record['internal_port_id']))
            op_port_map.append((operation_node, real_in_port))

        for operation_node, in_port in op_port_map:
            data_node = operation_node.in_node(in_port)

            attrs = deepcopy(
                body.get_edge_data(data_node.id, operation_node.id)[0])
            body.remove_edge(data_node.id, operation_node.id)

            assert data_node.has_valid('shape'), \
                'Data node should have `shape` attribute set, but it`s not for node {}'.format(data_node.id)
            shape = data_node['shape'].copy()
            parameter_data_node = Parameter(body, {
                'shape': shape_array(shape)
            }).create_node_with_data()

            body.create_edge(src_node=parameter_data_node,
                             dst_node=operation_node,
                             out_port=0,
                             in_port=in_port,
                             edge_attrs=attrs)
            del body.get_edge_data(parameter_data_node.id,
                                   operation_node.id)[0]['out']
コード例 #20
0
def uni_directional_shape_broadcasting(input_shape: np.array,
                                       target_shape: np.array):
    """
    Uni-directional broadcasting of two shapes following the numpy semantic
    :param input_shape: input shape to broadcast
    :param target_shape: target shape
    :return: broadcasted shape or None if broadcasting cannot be performed
    """
    input = input_shape.copy()

    # in one-directional broadcasting the target shape rank can be higher or equal than input shape
    if len(input_shape) > len(target_shape):
        log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(
            input_shape, target_shape))
        return None

    # prepend input shape with 1s
    input, target_shape = make_equal_rank(input, target_shape)
    result_shape = []
    for left, right in zip(input, target_shape):
        if left != right and left != 1 and right is not dynamic_dimension:
            log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(
                input_shape, target_shape))
            return None
        if right is dynamic_dimension and left is not dynamic_dimension and left != 1:
            result_shape.append(left)
        else:
            result_shape.append(right)
    return shape_array(result_shape)
コード例 #21
0
ファイル: pad_test.py プロジェクト: yding10/openvino
    def test_two_inputs_dynamic_value_infer(self):
        in_value = shape_array([dynamic_dimension_value, 3]).reshape(
            (1, 1, 1, 2))
        graph = build_graph(
            self.node_attrs,
            self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')],
            {'data_in': {
                'value': in_value,
                'shape': in_value.shape
            }},
            nodes_with_edges_only=True,
        )
        out_shape = (1, 1, 5, 8)
        mask = np.zeros(out_shape, dtype=np.bool)
        mask[0][0][1][2] = True
        ref_value = np.ma.masked_array(np.zeros(out_shape, dtype=np.int64),
                                       mask=mask,
                                       dtype=np.int64)
        ref_value[0][0][1][3] = 3

        pad_node = Node(graph, 'pad')
        Pad.infer(pad_node)
        output_value = Node(graph, 'data_out').value
        self.assertTrue(
            np.array_equal(Node(graph, 'data_out').shape, ref_value.shape))
        self.assertTrue(strict_compare_tensors(output_value, ref_value))
        self.assertTrue(isinstance(output_value, np.ma.masked_array))
        self.assertTrue(output_value[0][0][1][2] is dynamic_dimension)
コード例 #22
0
def bi_directional_shape_broadcasting(input_shape_1: np.array,
                                      input_shape_2: np.array):
    """
    Bi-directional broadcasting of two shapes following numpy semantic
    :param input_shape_1: first shape to broadcast
    :param input_shape_2: second shape to broadcast
    :return: broadcasted shape or None if broadcasting cannot be performed
    """
    shape_1 = input_shape_1.copy()
    shape_2 = input_shape_2.copy()
    shape_1, shape_2 = make_equal_rank(shape_1, shape_2)
    result = list()

    for left, right in zip(shape_1, shape_2):
        if left != right and left != 1 and right != 1 and left is not dynamic_dimension and \
                right is not dynamic_dimension:
            log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(
                input_shape_1, input_shape_2))
            return None
        if left is not dynamic_dimension and right is not dynamic_dimension:
            result.append(max(left, right))
        elif left is not dynamic_dimension and left != 1:
            result.append(left)
        elif right is not dynamic_dimension and right != 1:
            result.append(right)
        else:
            result.append(dynamic_dimension_value)

    return shape_array(result)
コード例 #23
0
ファイル: utils_test.py プロジェクト: yding10/openvino
class IsFullyDefinedTest(unittest.TestCase):
    @generate(*[(None, False),
                (int64_array([2, 3, 5, 7]), True),  # int64 array with valid values
                (np.array([2, 3, 5, 7]), True),  # any numpy array with valid values
                (np.array([2, dynamic_dimension_value]), True),  # array with dynamic dimension value is fully defined!
                (shape_array([2, dynamic_dimension_value, 5]), False),  # masked array with at least one masked element
                (shape_array([2, 4, 5]), True),  # masked array with no masked elements is fully defined
                (dynamic_dimension, False),  # dynamic dimension is not fully defined
                (dynamic_dimension_value, True),  # dynamic dimension value is fully defined
                ((dynamic_dimension_value, dynamic_dimension_value), True),  # list with dynamic dimension values is
                # fully defined
                ((dynamic_dimension, 1), False),  # tuple with dynamic dimension is not fully defined
                ([dynamic_dimension, 1], False),  # list with dynamic dimension is not fully defined
                ])
    def test_is_fully_defined(self, data, result):
        self.assertEqual(is_fully_defined(data), result)
コード例 #24
0
    def test_positive_matmul_infer(self, A_shape, B_shape, C_shape, transpose_a, transpose_b):
        graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges,
                                       update_nodes_attributes=[
                                           ('A_d', {'shape': shape_array(A_shape)}),
                                           ('B_d', {'shape': shape_array(B_shape)}),
                                           ('mat_mul', {'transpose_a': transpose_a, 'transpose_b': transpose_b}),
                                       ])
        node = Node(graph, 'mat_mul')
        MatMul.infer(node)

        msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={} " \
              "expected_shape={}, actual_shape={}"

        self.assertTrue(np.array_equal(graph.node['mat_mul_d']['shape'], shape_array(C_shape)),
                        msg.format(A_shape, B_shape, transpose_a, transpose_b, C_shape,
                                   graph.node['mat_mul_d']['shape']))
コード例 #25
0
    def infer(node: Node):
        assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \
            'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id))
        input_shape = node.in_port(0).data.get_shape()
        name = node.soft_get('name', node.id)
        assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format(
            name)

        assert len(
            input_shape
        ) == 4, 'ExtractImagePatches operation supports only 4D tensors'

        layout = node.graph.graph['layout']
        N = input_shape[get_batch_dim(layout, 4)]
        C = input_shape[get_features_dim(layout, 4)]

        size_spatial = shape_array(node.sizes)[node.spatial_dims]

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.strides[node.spatial_dims]

        size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1

        pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer(
            input_spatial_shape, size_extent, stride_spatial_shape,
            node.auto_pad, False)

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * np.prod(size_spatial),
                                     height=output_spatial_shape[0],
                                     width=output_spatial_shape[1])

        node.out_port(0).data.set_shape(out_shape)
コード例 #26
0
    def merge_infer(node: Node):
        # we infer only through executable input nodes
        inferred_nodes = [
            n for n in node.in_nodes().values() if n['is_partial_inferred']
        ]
        assert len(inferred_nodes) != 0
        tensor = inferred_nodes[0]

        if len(inferred_nodes) < len(node.in_nodes()):
            node['is_not_fully_inferred'] = True
        else:
            node['is_not_fully_inferred'] = False
            assert np.all(
                compatible_shapes(node.shape, inferred_nodes[0].shape)
                for node in inferred_nodes)

            inferred_and_executable = [
                n for n in node.in_nodes().values() if n['is_partial_inferred']
                and 'executable' in n and n['executable']
            ]
            tensor = inferred_and_executable[0]

            if all([
                    tensor.has_valid('value') and n.has_valid('value')
                    and strict_compare_tensors(tensor.value, n.value)
                    for n in inferred_and_executable
            ]):
                node.out_node().value = tensor.value.copy()
            else:
                node.out_node().value = None

        # do not use set_shape(tensor.shape) here because input port shape may be different from the calculated output
        # shape and `set_shape` will raise an error that shape has changed
        node.out_node(0).shape = shape_array(tensor.shape)
コード例 #27
0
    def infer(node: Node):
        input_value = node.in_port(0).data.get_value()
        input_shape = node.in_port(0).data.get_shape()

        starts = node.in_port(1).data.get_value()
        ends = node.in_port(2).data.get_value()
        if node.is_in_port_connected(4):
            steps = node.in_port(4).data.get_value()
        else:
            steps = np.ones(len(starts), dtype=np.int64)

        if node.is_in_port_connected(3):
            axes = node.in_port(3).data.get_value()
        else:
            axes = [x for x in range(len(starts))]

        if starts is None or ends is None or steps is None or axes is None:
            node.out_port(0).data.set_shape(
                shape_array([dynamic_dimension_value] * len(input_shape)))
            return

        slice_idx = [slice(0, in_shape, 1) for in_shape in input_shape]
        for i in range(len(axes)):
            # Ranged for output value for specified axis
            slice_idx[axes[i]] = slice(starts[i], ends[i], steps[i])
        if input_value is None or any(is_dynamic_slice(s) for s in slice_idx):
            output_shape = get_shape_from_slice(input_shape, slice_idx)
            if np.ma.any(output_shape <= 0):
                raise Error(
                    'Output shape: {} of node "{}" contains non-positive values'
                    .format(output_shape, node.name))
            node.out_port(0).data.set_shape(output_shape)
        else:
            node.out_port(0).data.set_value(input_value[tuple(slice_idx)])
コード例 #28
0
 def test_select_infer_tf_condition_assert_raises(self):
     with self.assertRaisesRegex(
             AssertionError,
             "if 'condition' is a 1D tensor then it's size"):
         self.build_select_graph_and_infer(condition_value=None,
                                           condition_shape=shape_array([42
                                                                        ]),
                                           then_value=None,
                                           then_shape=shape_array([100,
                                                                   20]),
                                           else_value=None,
                                           else_shape=shape_array([100,
                                                                   20]),
                                           out_value=None,
                                           out_shape=shape_array([100, 20]),
                                           auto_broadcast='numpy',
                                           fw_format='tf')
コード例 #29
0
 def split_helper(node: Node, index: int, direction: str, axis: int = 0):
     return Op._create_data_node(
         node.graph,
         name=node.name + '/SplittedBiLSTM/{}/'.format(direction),
         attrs={
             'value': np.take(node.value, [index], axis),
             'shape': shape_array(np.take(node.value, [index], axis).shape)
         })
コード例 #30
0
ファイル: convolution_test.py プロジェクト: yding10/openvino
    def test_caffe_conv2d_dynamic_input_infer(self):
        graph = build_graph(
            nodes_attributes, [('conv_input', 'conv_node'),
                               ('conv_weights', 'conv_node'),
                               ('conv_node', 'conv_output'),
                               ('conv_output', 'op_output')],
            {
                'conv_output': {
                    'shape': None
                },
                'conv_input': {
                    'shape': shape_array([1, 3, dynamic_dimension_value, 227])
                },
                'conv_weights': {
                    'shape':
                    np.array([64, 3, 3, 3]),
                    'dim_attrs':
                    ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']
                },
                'conv_node': {
                    'pad_spatial_shape': np.array([[0, 0], [0, 0]]),
                    'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
                    'dilation': np.array([1, 1, 1, 1]),
                    'bias_addable': True,
                    'bias_term': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'stride': np.array([1, 1, 1, 1]),
                    'group': 1,
                    'kernel_spatial_idx': np.array([2, 3]),
                    'input_feature_channel': 1,
                    'output_feature_channel': 0,
                    'output': 64,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0])
                }
            })

        conv_node = Node(graph, 'conv_node')
        Convolution.infer(conv_node)
        exp_shape = shape_array([1, 64, dynamic_dimension_value, 225])
        res_shape = graph.node['conv_output']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))