コード例 #1
0
def roipooling_infer(node: Node):
    """
    Sets shape of output node according specified parameters input blobs and node
    Sets number from the first input blob, channels from the second one, height and width are specified
    Parameters
    ----------
    node
    """
    shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))]
    if any(s is None for s in shapes):
        return
    if len(node.in_nodes()) == 4:  # TensorFlow case of CropAndResize operation
        crop_size = node.in_node(3).value
        if crop_size is None:
            log.error('The ROIPooling size is not known for node {}'.format(
                node.soft_get('name')))
            return
        if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2:
            log.error(
                'The ROIPooling size is should have 2 elements for node {}'.
                format(node.soft_get('name')))
        node.pooled_h = crop_size[0]
        node.pooled_w = crop_size[1]
        node.graph.remove_edge(node.in_node(3).id, node.id)
        node.graph.remove_edge(node.in_node(2).id, node.id)

    layout = node.graph.graph['layout']
    assert len(layout) == 4

    node.out_node().shape = shape_for_layout(
        layout,
        batch=shapes[1][get_batch_dim(layout, 4)],
        features=shapes[0][get_features_dim(layout, 4)],
        height=node.pooled_h,
        width=node.pooled_w)
コード例 #2
0
ファイル: depth_to_space.py プロジェクト: yding10/openvino
    def infer(node: Node):
        in_shape = node.in_port(0).data.get_shape()
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C is not dynamic_dimension and C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C // (block_size * block_size),
                                     height=H * block_size,
                                     width=W * block_size)

        if is_fully_defined(in_shape) and is_fully_defined(
                out_shape) and np.prod(in_shape) != np.prod(out_shape):
            raise Error(
                'Number of input elements "{}" is not equal to number of output elements "" for node "{}"'
                ''.format(in_shape, out_shape, node.soft_get('name', node.id)))
        node.out_port(0).data.set_shape(out_shape)
コード例 #3
0
ファイル: space_to_depth.py プロジェクト: yding10/openvino
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if (H is not dynamic_dimension
                and H % block_size) or (W is not dynamic_dimension
                                        and W % block_size):
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * (block_size**2),
                                     height=H // block_size,
                                     width=W // block_size)

        node.out_port(0).data.set_shape(out_shape)
コード例 #4
0
ファイル: upsample.py プロジェクト: zkzt/openvino
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = np.array(
                (input_shape + eps) * node.in_node(1).value).astype(np.int64)
コード例 #5
0
ファイル: upsample.py プロジェクト: srinivasdasu24/dldt
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = input_shape * node.in_node(1).value
コード例 #6
0
    def find_and_replace_pattern(self, graph: Graph):
        layout = graph.graph['layout']
        for eltwise_op_node in graph.get_op_nodes(is_eltwise=True):
            out_shape = eltwise_op_node.out_port().data.get_shape()
            if 4 <= len(out_shape) <= 5:
                out_features = out_shape[get_features_dim(
                    layout, len(out_shape))]
                for port, node in eltwise_op_node.in_nodes().items():
                    if len(node.shape) != len(out_shape) and len(
                            node.shape) == 1 and out_features == node.shape[0]:
                        new_shape = shape_for_layout(
                            layout,
                            batch=1,
                            features=out_features,
                            height=1,
                            width=1,
                            depth=1 if len(out_shape) == 5 else None)
                        dim_const = Const(graph, {
                            'value': new_shape,
                            'name': node.id + '/Dim'
                        }).create_node()
                        reshape_op = Reshape(graph,
                                             attrs={
                                                 'dim': new_shape,
                                                 'name': node.id + '/Broadcast'
                                             }).create_node()

                        eltwise_op_node.in_port(port).get_source(
                        ).node.out_port(0).get_connection().set_destination(
                            reshape_op.in_port(0))
                        reshape_op.in_port(1).connect(dim_const.out_port(0))

                        reshape_op.out_port(0).connect(
                            eltwise_op_node.in_port(port))
コード例 #7
0
    def replace_pattern(self, graph: Graph, match: dict):
        bias_add = match['BiasAdd']

        # Replace BiasAdd by Add operation
        new_add = Add(graph, {'name': bias_add.id + '/Add'}).create_node()

        bias_add.in_port(0).get_connection().set_destination(
            new_add.in_port(0))
        bias_add.in_port(1).get_connection().set_destination(
            new_add.in_port(1))
        bias_add.out_port(0).get_connection().set_source(new_add.out_port(0))

        if bias_add.data_format != 'NCHW':
            return

        input_shape = new_add.in_port(0).data.get_shape()
        bias_shape = new_add.in_port(1).data.get_shape()
        assert len(bias_shape) == 1

        unsqueeze_dims = np.arange(len(input_shape))
        channel_dim = get_features_dim('NCHW', len(input_shape))
        unsqueeze_dims = np.delete(unsqueeze_dims, channel_dim, 0)

        unsqueeze_node = Unsqueeze(graph, {
            'name': new_add.id + '/BiasUnsqueeze'
        }).create_node()
        unsqueeze_dims_node = Const(graph, {
            'name': new_add.id + '/Dims',
            'value': unsqueeze_dims
        }).create_node()
        # Reconnecting nodes
        unsqueeze_node.in_port(1).connect(unsqueeze_dims_node.out_port(0))
        unsqueeze_node['override_output_shape'] = True

        new_add.in_port(1).get_connection().insert_node(unsqueeze_node)
コード例 #8
0
def pad_op_transform(graph: Graph, match: dict):
    op = match['op']
    pad_op = match['pad_op']
    input_data = pad_op.in_node(0)
    pads = pad_op.in_node(1).value if len(
        pad_op.in_nodes()) == 2 else pad_op.pads

    if pad_op.mode != 'constant':
        log.info(
            'The pad node "{}" with pad mode "{}" cannot be fused.'.format(
                pad_op.soft_get('name'), pad_op.mode))
        return

    if pad_op.mode == 'constant' and pad_op.fill_value != 0.0:
        log.info('The pad node "{}" with non-zero fill value cannot be fused.'.
                 format(pad_op.soft_get('name')))
        return

    input_tensor_dims = len(match['pad_output'].shape)
    if np.any(pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0) or \
            np.any(pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0):
        log.info(
            'The pad node "{}" with padding over feature/batch dimension cannot be fused.'
            .format(pad_op.soft_get('name')))
        return

    op.pad += pads
    op.pad_spatial_shape = op.pad[op.spatial_dims]
    op['auto_pad'] = None
    if op.type == 'Pooling':
        op['exclude_pad'] = False
    assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0)
    edge_attrs = graph.get_edge_data(match['pad_output'].id, match['op'].id)[0]
    graph.remove_edge(match['pad_output'].id, match['op'].id)
    graph.add_edge(input_data.id, match['op'].id, **{'in': 0, **edge_attrs})
コード例 #9
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if H % block_size or W % block_size:
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C * (block_size**2)),
                                     height=int(H / block_size),
                                     width=int(W / block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
コード例 #10
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C / (block_size**2)),
                                     height=int(H * block_size),
                                     width=int(W * block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
コード例 #11
0
ファイル: roialign.py プロジェクト: zoeysgithub/openvino
    def infer(node):
        layout = node.graph.graph['layout']
        node_name = node.soft_get('name', node.id)

        assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \
            'The node "{}" must 3 inputs'.format(node_name)

        assert node.has_valid('pooled_w'), '"pooled_w" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('pooled_h'), '"pooled_h" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('mode'), '"mode" attribute is not set for node "{}"'.format(node_name)
        assert node.mode in ['avg', 'max'], \
            '"mode" attribute range of values is ["avg", "max"], got {} for node "{}"'.format(node.mode, node_name)

        input_shape = node.in_port(0).data.get_shape()
        rois_shape = node.in_port(1).data.get_shape()
        indices_shape = node.in_port(2).data.get_shape()
        assert input_shape is not None and rois_shape is not None and indices_shape is not None, \
            'The node "{}" input shape is None'.format(node_name)
        assert rois_shape[0] == indices_shape[0], 'The number of batch indices does not correspond to number of ROIs ' \
                                                  'for node "{}"'.format(node_name)
        assert rois_shape[1] == 4, 'The size of ROI element must be 4 for node "{}"'.format(node_name)
        assert len(input_shape) == 4, 'The rank of port 0 input tensor of node "{}" must be 4.'.format(node_name)

        node.out_port(0).data.set_shape(
            shape_for_layout(layout,
                             batch=rois_shape[0],
                             features=input_shape[get_features_dim(layout, 4)],
                             height=node.pooled_h,
                             width=node.pooled_w)
        )
コード例 #12
0
    def infer(node: None):
        input_shape = node.in_node(0).shape
        name = node.soft_get('name', node.id)

        if node.axes is not None and node.across_channels is not None:
            raise Error('Either axes or across_channels can be set for the MVN in node "{}".'.format(name))

        if node.across_channels is None:
            if node.axes is not None:
                # normalizing (replacing -1 with actual index)
                axes_data_value = node.axes
                axes = [axes_data_value.item()] if axes_data_value.size == 1 else axes_data_value
                axes = [get_canonical_axis_index(input_shape, a) for a in axes]
                # deduce across_channels from the axes, e.g. if the first axis is included (assuming batch is zero axis)
                feature_dim = get_features_dim(node.graph.graph['layout'], len(input_shape)) \
                    if (4 <= len(input_shape) <= 5) \
                    else 1
                node.across_channels = int(feature_dim in axes)

                if 0 in axes:
                    raise Error('Reduction over the batch dimension in node "{}" '
                                'is not supported by the backend.'.format(name))
                for i in range(2, len(input_shape)):
                    if i not in axes:
                        raise Error(
                            'Reduction over spatial dimensions in node "{}" '
                            'is obligatory for the backend.'.format(name))
            else:
                node.across_channels = 0  # default

        copy_shape_infer(node)
コード例 #13
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert value.size == input_node.shape[features_dim_idx] or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name', input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                dst.get_connection().set_source(preprocessing.out_port(0))

        input_node.out_port(0).connect(preprocessing.in_port(0))
コード例 #14
0
    def infer(node: Node):
        assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \
            'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id))
        input_shape = node.in_port(0).data.get_shape()
        name = node.soft_get('name', node.id)
        assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format(name)

        assert len(input_shape) == 4, 'ExtractImagePatches operation supports only 4D tensors'

        layout = node.graph.graph['layout']
        N = input_shape[get_batch_dim(layout, 4)]
        C = input_shape[get_features_dim(layout, 4)]

        size_spatial = int64_array(node.sizes)[node.spatial_dims]

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.strides[node.spatial_dims]

        size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1

        pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer(input_spatial_shape,
                                                                         size_extent,
                                                                         stride_spatial_shape,
                                                                         node.auto_pad,
                                                                         False)

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * np.prod(size_spatial),
                                     height=output_spatial_shape[0],
                                     width=output_spatial_shape[1])

        node.out_port(0).data.set_shape(int64_array(out_shape))
コード例 #15
0
    def infer(node: Node):
        layout = node.graph.graph['layout']

        assert len(layout) == 4
        assert len(
            [p for p in node.in_ports().values() if not p.disconnected()])
        assert node.has_valid('mode')
        assert node.has_valid('axes')

        src_shape = node.in_port(0).data.get_shape()
        assert src_shape is not None
        dst_shape = node.in_port(1).data.get_value()
        assert dst_shape is not None

        out_height = dst_shape[0]
        out_width = dst_shape[1]

        node.out_node().shape = shape_for_layout(
            layout,
            batch=src_shape[get_batch_dim(layout, 4)],
            features=src_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)

        PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')])
コード例 #16
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert value.size == input_node.shape[features_dim_idx] or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name', input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0), "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
コード例 #17
0
ファイル: resample.py プロジェクト: pc2/CustoNN2
    def resample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        in_height = input_shape[get_height_dim(layout, 4)]
        in_width = input_shape[get_width_dim(layout, 4)]

        if node.has('fw') and node.fw == 'tf':
            dst_shape = node.in_node(1).value
            if dst_shape is None or len(input_shape) != 4 or len(
                    dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, input_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            out_height = dst_shape[0]
            out_width = dst_shape[1]
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            if len(node.in_nodes()) == 1:
                if node.has('width') and node.has('height'):
                    out_height = node.height
                    out_width = node.width
                else:
                    out_height = node.factor * in_height
                    out_width = node.factor * in_width
            else:
                out_height = node.in_node(1).shape[get_height_dim(layout, 4)]
                out_width = node.in_node(1).shape[get_width_dim(layout, 4)]

        node.factor = factor_update(
            node.factor,
            [float(out_height) / in_height,
             float(out_width) / in_width], [in_height, in_width],
            [out_height, out_width], node.soft_get('name'))

        node.out_node().shape = shape_for_layout(
            layout,
            batch=input_shape[get_batch_dim(layout, 4)],
            features=input_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)
コード例 #18
0
 def find_and_replace_pattern(self, graph: Graph):
     layout = graph.graph['layout']
     for n in list(graph.nodes()):
         if 'type' in graph.node[n] and graph.node[n]['type'] == 'Eltwise' and get_value_id(Node(graph, n)) is None:
             eltwise_op_node = Node(graph, n)
             out_shape = eltwise_op_node.out_node().shape
             if 4 <= len(out_shape) <= 5:
                 out_features = out_shape[get_features_dim(layout, len(out_shape))]
                 for port, node in eltwise_op_node.in_nodes().items():
                     if len(node.shape) != len(out_shape) and len(node.shape) == 1 and out_features == node.shape[0]:
                         in_atts = deepcopy(graph.get_edge_data(node.id, n)[0])
                         graph.remove_edge(node.id, n)
                         new_shape = shape_for_layout(layout, batch=1, features=out_features, height=1, width=1,
                                                      depth=1 if len(out_shape) == 5 else None)
                         reshape_data_op = Reshape(graph, attrs={'dim': new_shape, 'name': node.id + '/Broadcast'})
                         reshape_data_node = reshape_data_op.create_node_with_data([node])
                         graph.add_edge(reshape_data_node.id, eltwise_op_node.id, **in_atts)
コード例 #19
0
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(
            layout
        ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(
            node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(
                shape_for_layout(layout,
                                 batch=input_shape[get_batch_dim(layout, 4)],
                                 features=input_shape[get_features_dim(
                                     layout, 4)],
                                 height=out_height,
                                 width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(
                node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array(
                [dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int(
                        (input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
コード例 #20
0
ファイル: conv.py プロジェクト: pavel-esir/openvino
def pad_op_transform(graph: Graph, match: dict):
    op = match['op']
    pad_op = match['pad_op']
    input_data = pad_op.in_node(0)

    if pad_op.mode != 'constant':
        log.info(
            'The pad node "{}" with pad mode "{}" cannot be fused.'.format(
                pad_op.soft_get('name'), pad_op.mode))
        return

    if op.type == 'Pooling' and op.pool_method == 'max':
        return

    if pad_op.mode == 'constant':
        fill_value = pad_op.in_port(3).data.get_value()
        if fill_value is None or fill_value != 0.0:
            log.info(
                'The pad node "{}" with non-zero fill value cannot be fused.'.
                format(pad_op.soft_get('name')))
            return

    input_tensor_dims = len(match['pad_output'].shape)
    for in_port in [1, 2]:
        pads = pad_op.in_port(in_port).data.get_value()
        if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \
                pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0:
            log.info(
                'The pad node "{}" with padding over feature/batch dimension cannot be fused.'
                .format(pad_op.soft_get('name')))
            return

    op.pad += np.concatenate([
        pad_op.in_port(1).data.get_value().reshape([-1, 1]),
        pad_op.in_port(2).data.get_value().reshape([-1, 1])
    ],
                             axis=1)
    op.pad_spatial_shape = op.pad[op.spatial_dims]
    op['auto_pad'] = None
    if op.type == 'Pooling':
        op['exclude_pad'] = False
    assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0)
    edge_attrs = graph.get_edge_data(match['pad_output'].id, match['op'].id)[0]
    graph.remove_edge(match['pad_output'].id, match['op'].id)
    graph.add_edge(input_data.id, match['op'].id, **{'in': 0, **edge_attrs})
コード例 #21
0
    def replace_pattern(graph: Graph, match: dict):
        reshape = match['reshape']
        assert len(reshape.in_nodes()) > 0
        if graph.graph['layout'] == 'NCHW' or reshape.has_and_set('nchw_layout') or\
                reshape.soft_get('correct_data_layout') is True:
            return

        input_node = reshape.in_node()
        output_node = reshape.out_node()
        input_shape = input_node.shape
        output_shape = output_node.shape

        if len(input_shape) >= 4 and len(output_shape) == 3:
            # Check that we will permute some shapes in this Reshape by our permutation pass
            layout = 'NCHW'
            c_idx = get_features_dim(layout, len(input_shape))
            hw_idx = [
                get_width_dim(layout, len(input_shape)),
                get_height_dim(layout, len(input_shape))
            ]
            if input_shape[c_idx] != 1 and np.any(
                    input_shape[hw_idx] != [1, 1]):
                # then nhwc -> nchw permutation can change shapes significantly
                # We need to wrap up node with NCHW -> NHWC permutes and don't touch it later
                permutation = PermuteAttrs.get_nchw_to_nhwc_permutation(
                    len(input_shape))
                permutation_back = PermuteAttrs.get_nchw_to_nhwc_permutation(
                    len(input_shape))

                # 1. Insert input Permute
                #    This Permute will permute input from original input layout to operation layout
                edge_attrs = graph.get_edge_data(input_node.id, reshape.id)[0]
                graph.remove_edge(input_node.id, reshape.id)

                permute_op = Permute(graph, {
                    'order': permutation.perm,
                    'name': reshape.name + '/Permute_'
                })
                permute_data_node = permute_op.create_node_with_data(
                    [input_node])

                graph.add_edge(permute_data_node.id, reshape.id, **edge_attrs)
コード例 #22
0
    def replace_pattern(self, graph: Graph, match: dict):
        reshape1 = match['reshape1']
        reshape2 = match['reshape2']
        transpose = match['transpose']

        # Check that Reshape->Transpose->Reshape shuffle only feature channel
        input_shape = np.array(reshape1.in_node(0).shape)
        reshape1_shape = np.array(reshape1.out_node().shape)
        output_shape = np.array(reshape2.out_node().shape)

        # Check that input shape is 4D
        if len(input_shape) != 4:
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence due to input shape should be 4D '
                '(instead of {}D)'.format(transpose.name, len(input_shape)))
            return

        # Check that output shape the same as input shape
        if not np.prod(input_shape) == np.prod(output_shape):
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence due to output shape should be equal '
                'to input shape: {} and {}'.format(transpose.name, input_shape,
                                                   output_shape))
            return

        # Input shapes can be either NCHW or NHWC, so in case of channel split, feature channel can be splited as
        # follows in comments below
        # So feature_dims_split list contains possible dims responsible for feature dim
        layout = graph.graph['layout']
        feature_dim = get_features_dim(layout, len(input_shape))
        spatial_dims = [
            get_height_dim(layout, len(input_shape)),
            get_width_dim(layout, len(input_shape))
        ]
        if layout == 'NCHW':
            # NC1C2HW or NC1C2(H*W)
            feature_dims_split = np.array([feature_dim, feature_dim + 1])
        else:
            # NHWC1C2 or N(H*W)C1C2 or (N*H*W)C1C2
            feature_dims_split = np.array(
                [len(reshape1_shape) - 2,
                 len(reshape1_shape) - 1])

        # Check that feature_dims_split suits reshape layer shape
        for dim in feature_dims_split:
            if dim < 0 or dim >= len(reshape1_shape):
                log.warning(
                    'Can\'t convert Reshape({}:{})->Transpose->Reshape sequence. Can\'t detect feature shuffle.'
                    ''.format(reshape1.shape, reshape1_shape))
                return

        if not np.prod(np.delete(reshape1_shape,
                                 feature_dims_split)) == np.prod(
                                     np.delete(input_shape, feature_dim)):
            log.warning(
                'Can\'t convert Reshape->Transpose->Reshape sequence. Can\'t detect feature shuffle. {} '
                'should be equal to {}'.format(
                    np.prod(np.delete(reshape1_shape, feature_dims_split)),
                    np.prod(np.delete(input_shape, feature_dim))))
            return

        # Check transpose order
        if not np.array_equal(feature_dims_split[::-1],
                              transpose.order[feature_dims_split]):
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence. Transpose operation should witch '
                'feature order (given order: {})'.format(
                    transpose.name, transpose.order))
            return

        # Now we are sure that Reshape->Transpose->Reshape shuffle feature dims
        # So, then we change Reshape and Transpose attrs to suite NCHW layout

        # The resulting shape for Reshape1 layer : [N,C1,C2,(H*W)]
        new_reshape1_shape = np.concatenate(
            (np.array([input_shape[0]]),
             np.array(reshape1_shape[feature_dims_split]),
             np.array([np.prod(input_shape[spatial_dims])])))

        new_transpose_order = np.array([0, 2, 1, 3])
        new_transpose_shape = np.array(new_reshape1_shape[new_transpose_order])

        reshape1.out_node().shape = new_reshape1_shape
        reshape1.dim = np.copy(new_reshape1_shape)

        transpose.order = new_transpose_order
        transpose.out_node().shape = new_transpose_shape

        # Preserve layers from conversion to NCHW (in case of NHWC topology layout)
        reshape1['nchw_layout'] = True
        reshape1.out_node()['nchw_layout'] = True
        transpose['nchw_layout'] = True
        transpose.out_node()['nchw_layout'] = True
コード例 #23
0
    def replace_pattern(self, graph: Graph, match: dict):
        layout = graph.graph['layout']
        if layout != 'NHWC':
            return

        reshape1 = match['reshape1']
        softmax = match['softmax']

        # Check that Reshape->Softmax->Reshape shuffle only feature channel
        input_shape = np.array(reshape1.in_node(0).shape)
        reshape1_shape = np.array(reshape1.out_node().shape)

        # Check that input shape is 4D
        if len(input_shape) != 4:
            log.warning(
                'Can\'t convert Reshape({})->Softmax->Reshape sequence due to input shape should be 4D '
                '(instead of {}D {})'.format(reshape1.name, len(input_shape),
                                             input_shape))
            return

        if len(reshape1_shape) != 2:
            log.warning(
                'This pass expect 2D output tensor for first Reshape {} layer (given shape: {})'
                ''.format(reshape1.name, reshape1_shape))
            return

        # Define feature dim
        feature_dim = get_features_dim(layout, len(input_shape))
        spatial_dims = [
            get_height_dim(layout, len(input_shape)),
            get_width_dim(layout, len(input_shape))
        ]

        # Skip transform in case if spatial dims in input shape are equal to [1,1]
        if np.array_equal(input_shape[spatial_dims], np.array([1, 1])):
            log.info('Skip this transformation due to spatial dims are [1,1]')
            return

        # Check that Reshape1 has out dims [-1, feature_dims]
        if not (reshape1_shape[-1] == input_shape[-1] and reshape1_shape[0]
                == np.prod(np.delete(input_shape, feature_dim))):
            log.warning(
                'Output shape for Reshape operation should be [{},{}] instead of {}'
                .format(np.prod(np.delete(input_shape, feature_dim)),
                        input_shape[-1], reshape1_shape))
            return

        # Now we are sure that Reshape->Softmax suits for this transformation

        # The resulting shape for Reshape1 layer : [N,C,(H*W)]
        new_reshape1_shape = np.concatenate(
            (np.array([input_shape[0]]), np.array([reshape1_shape[-1]]),
             np.array([np.prod(input_shape[spatial_dims])])))

        # update 'dim' attribute but preserve batch dimension size which could be -1
        reshape1.dim = int64_array([reshape1.dim[0], *new_reshape1_shape[1:]])

        old_shape = np.array(reshape1.out_node().shape)
        reshape1.out_node().shape = new_reshape1_shape
        softmax.out_node().shape = new_reshape1_shape

        # Preserve layers from conversion to NCHW (in case of NHWC topology layout)
        reshape1['nchw_layout'] = True
        reshape1.out_node()['nchw_layout'] = True
        softmax['nchw_layout'] = True
        softmax.out_node()['nchw_layout'] = True

        # Create final Reshape to keep original shape for softmax output if softmax is not the last node
        softmax_out_data = softmax.out_node()
        if len(softmax_out_data.out_nodes()) != 0:
            next_operation = softmax_out_data.out_node()
            # Save edge attributes & remove edge
            edge_attrs = graph.get_edge_data(softmax_out_data.id,
                                             next_operation.id)[0]
            graph.remove_edge(softmax_out_data.id, next_operation.id)
            reshape_op = Reshape(
                graph,
                dict(name=softmax.id + "/Reshape",
                     dim=np.array(old_shape),
                     nchw_layout=True))
            reshape_out_data = reshape_op.create_node_with_data(
                inputs=[softmax_out_data])
            graph.add_edges_from([(reshape_out_data.id, next_operation.id,
                                   edge_attrs)])
コード例 #24
0
    def replace_pattern(self, graph: Graph, match: dict):
        y = match['maximum'].in_port(0).data.get_value()
        if y is None:
            y = match['maximum'].in_port(1).data.get_value()

        if y is None or y.shape != ():
            log.debug(
                'The value of the "maximum_y_data" is not defined or is not constant'
            )
            return

        # We need to check axes which performed reduction because IE supports only 2D, 3D, 4D inputs and
        # reduction only along spatial and channel dimensions.
        input_rank = len(match['sum'].in_port(0).data.get_shape())
        if input_rank not in [2, 3, 4]:
            log.debug(
                'IE supports L2 normalization only for 2D, 3D and 4D tensors.')
            return

        axes = match['sum'].in_port(1).data.get_value()
        axes = int64_array(axes)
        if axes.shape == ():
            axes = int64_array([axes])
        axes = int64_array(
            [axis if axis >= 0 else axis + input_rank for axis in axes])
        axes.sort()

        transformation_applicable = False
        # check for case C + all spatial dims. Works for 2D (NC), 3D (NCH) and 4D (NCHW and NHWC)
        if len(axes) + 1 == input_rank and np.array_equal(
                axes, int64_array(np.arange(start=1, stop=input_rank))):
            transformation_applicable = True

        # check for pure C channel normalization
        if len(axes) == 1 and ((input_rank == 4 and get_features_dim(
                graph.graph['layout'], input_rank) == axes[0]) or
                               (input_rank != 4 and axes[0] == 1)):
            transformation_applicable = True

        if not transformation_applicable:
            log.debug(
                'IE doesn\'t support l2 normalization with reduction along axes {}.'
                .format(axes))
            return

        output_name = match['l2_normalize'].soft_get('name',
                                                     match['l2_normalize'].id)
        normalize_node = create_op_node_with_second_input(
            graph, NormalizeL2Op, axes, {
                'name': output_name,
                'eps_mode': 'max',
                'eps': y
            })
        match['square'].in_port(0).get_source().connect(
            normalize_node.in_port(0))

        match['square'].in_port(0).disconnect()
        if match['l2_normalize'].in_port(
                0).get_source().node.id == match['rsqrt'].id:
            match['l2_normalize'].in_port(1).disconnect()
        else:
            match['l2_normalize'].in_port(0).disconnect()

        match['l2_normalize'].out_port(0).get_connection().set_source(
            normalize_node.out_port(0))
        rename_nodes([(match['l2_normalize'], output_name + "/TBR"),
                      (normalize_node, output_name)])
コード例 #25
0
    def interp_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4
        if len(node.in_nodes()) == 2:
            src_shape = node.in_node(0).shape
            dst_shape = node.in_node(1).shape

            # in Caffe can be 2 inputs too, but shape should be got from shape of the second input
            if node.parse_2nd_input == 'shape':
                dst_shape = [
                    dst_shape[get_height_dim(layout, 4)],
                    dst_shape[get_width_dim(layout, 4)]
                ]
            else:
                # it is TF case
                dst_shape = node.in_node(1).value

            if src_shape is None or dst_shape is None or len(
                    src_shape) != 4 or len(dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, src_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            in_height = src_shape[get_height_dim(layout, 4)]
            in_width = src_shape[get_width_dim(layout, 4)]
            out_height = dst_shape[0]
            out_width = dst_shape[1]

            node.factor = factor_update(
                node.factor,
                [float(out_height) / in_height,
                 float(out_width) / in_width], [in_height, in_width],
                [out_height, out_width], node.soft_get('name'))

            if node.factor is None:
                node['width'] = out_width
                node['height'] = out_height

            node.out_node().shape = shape_for_layout(
                layout,
                batch=src_shape[get_batch_dim(layout, 4)],
                features=src_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            outn = node.out_node(0)

            in_shape = node.in_node(0)
            num_ = in_shape.shape[get_batch_dim(layout, 4)]
            channels_ = in_shape.shape[get_features_dim(layout, 4)]
            height_in_ = in_shape.shape[get_height_dim(layout, 4)]
            width_in_ = in_shape.shape[get_width_dim(layout, 4)]

            height_out_ = height_in_ + node.pad_beg + node.pad_end
            width_out_ = width_in_ + node.pad_beg + node.pad_end

            if node.shrink_factor != 1 and node.zoom_factor == 1:
                shrink_factor = node.shrink_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
            elif node.shrink_factor == 1 and node.zoom_factor != 1:
                zoom_factor = node.zoom_factor
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None

                node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \
                                        'layer shape inference function in the file (extensions/ops/interp.op at the ' \
                                        'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100)
                # Reshape methods can be different in some cases
                # Commented out section represents reshape that used in deeplab-caffe
                # Uncomment the following lines, if your model was trained with deeplab-caffe
                # or have the same reshape method
                # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1)
                # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

                # Comment out the following lines if you use the reshape method from previous section
                height_out_ = height_out_ * zoom_factor
                width_out_ = width_out_ * zoom_factor
            elif node.width != 0 and node.height != 0:
                height_out_ = node.height
                width_out_ = node.width
            elif node.shrink_factor != 1 and node.zoom_factor != 1:
                shrink_factor = node.shrink_factor
                zoom_factor = node.zoom_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
                height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor -
                                                                 1)
                width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

            outn.shape = shape_for_layout(layout,
                                          batch=num_,
                                          features=channels_,
                                          height=height_out_,
                                          width=width_out_)
コード例 #26
0
ファイル: Reduce.py プロジェクト: pc2/CustoNN2
    def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
        node = match['reduce']
        if not node.has_valid('reduce_type') or node.reduce_type.lower() not in self.supported_reduce_types:
            log.error("Reduce type {} is not supported for node {}".format(node.soft_get('reduce_type'), node.id))
            return

        reduce_type = node.reduce_type.lower()
        if reduce_type not in self.pool_method_map:
            log.error("Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                      "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_node().shape
        output_shape = node.out_node().shape

        # normalize node.axis to exclude negative indices
        node.axis = [get_canonical_axis_index(input_shape, a) for a in node.axis]

        axis = node.axis

        # Check that values in axis list are consecutive
        for idx in range(1, len(axis)):
            if axis[idx] != (axis[idx - 1] + 1):
                log.error("Reduce with not consecutive axes {} is not supported ".format(axis))
                return

        layout = graph.graph['layout']

        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axis])
        begin_dims = np.array([input_shape[idx] for idx in range(axis[0])])
        end_dim = np.prod([input_shape[idx] for idx in range(axis[-1] + 1, len(input_shape))])

        # 2. Create reshape with appropriate shape
        if layout == 'NCHW':
            if len(begin_dims) > 2:
                begin_dims = np.array([np.prod(begin_dims[0:-1]), begin_dims[-1]], dtype=np.int64)
            else:
                # Expand begin_dims to 2
                begin_dims = np.array(np.append(begin_dims, [1] * (2 - len(begin_dims))), dtype=np.int64)
            reshape_shape = np.array([*begin_dims, reduction_dim, end_dim], dtype=np.int64)
            pool_window = np.array([1, 1, reduction_dim, 1], dtype=np.int64)
        elif layout == 'NHWC':
            begin_dims = np.prod(begin_dims)
            reshape_shape = np.array([begin_dims, reduction_dim, 1, end_dim], dtype=np.int64)
            pool_window = np.array([1, reduction_dim, 1, 1], dtype=np.int64)
        else:
            log.error('{} layout currently is not supported'.format(layout))
            return

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape', 'dim': reshape_shape})
        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape', 'dim': output_shape})
        pooling_op = Pooling(graph,
                             dict(name=node.id + '/Pool',
                                  window=pool_window,
                                  output_spatial_shape=None,
                                  batch_dims=np.array([get_batch_dim(layout, 4)], dtype=np.int64),
                                  channel_dims=np.array([get_features_dim(layout, 4)], dtype=np.int64),
                                  exclude_pad='false', pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        final_reshape_op.create_node_with_data(
            inputs=[pooling_op.create_node_with_data(
                inputs=[reshape_op.create_node_with_data(
                    inputs=[input_data]
                )]
            )],
            data_nodes=output_data)

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'sum':
            output_data.in_node().insert_node_with_data_after(
                output_data,
                Power,
                {'name': node.name + '/Mul', 'scale': float(reduction_dim)}
            )
コード例 #27
0
 def test_get_features_dim_NDHWC(self):
     self.assertEqual(get_features_dim('NHWC', 5), 4)
コード例 #28
0
 def test_get_features_dim_NCDHW(self):
     self.assertEqual(get_features_dim('NCHW', 5), 1)