예제 #1
0
def roipooling_infer(node: Node):
    """
    Sets shape of output node according specified parameters input blobs and node
    Sets number from the first input blob, channels from the second one, height and width are specified
    Parameters
    ----------
    node
    """
    shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))]
    if any(s is None for s in shapes):
        return
    if len(node.in_nodes()) == 4:  # TensorFlow case of CropAndResize operation
        crop_size = node.in_node(3).value
        if crop_size is None:
            log.error('The ROIPooling size is not known for node {}'.format(
                node.soft_get('name')))
            return
        if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2:
            log.error(
                'The ROIPooling size is should have 2 elements for node {}'.
                format(node.soft_get('name')))
        node.pooled_h = crop_size[0]
        node.pooled_w = crop_size[1]
        node.graph.remove_edge(node.in_node(3).id, node.id)
        node.graph.remove_edge(node.in_node(2).id, node.id)

    layout = node.graph.graph['layout']
    assert len(layout) == 4

    node.out_port(0).data.set_shape(
        shape_for_layout(layout,
                         batch=shapes[1][get_batch_dim(layout, 4)],
                         features=shapes[0][get_features_dim(layout, 4)],
                         height=node.pooled_h,
                         width=node.pooled_w))
예제 #2
0
    def insert_pre_processing(graph: Graph, input_node: Node, node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (-1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'], len(input_node.shape))
        assert compatible_dims(value.size, input_node.shape[features_dim_idx]) or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name', input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph, op=op, port_value_dict={1: value}, op_attrs={'name': name})

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0), "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
예제 #3
0
    def infer(node: Node):
        in_shape = node.in_port(0).data.get_shape()
        if in_shape.size != 4:
            raise Error('TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                        'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C is not dynamic_dimension and C % (block_size ** 2):
            raise Error('Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                        'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                        'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C // (block_size * block_size),
                                     height=H * block_size,
                                     width=W * block_size)

        if is_fully_defined(in_shape) and is_fully_defined(out_shape) and np.prod(in_shape) != np.prod(out_shape):
            raise Error('Number of input elements "{}" is not equal to number of output elements "" for node "{}"'
                        ''.format(in_shape, out_shape, node.soft_get('name', node.id)))
        node.out_port(0).data.set_shape(out_shape)
예제 #4
0
    def replace_pattern(self, graph: Graph, match: dict):
        bias_add = match['BiasAdd']

        # Replace BiasAdd by Add operation
        new_add = Add(graph, {'name': bias_add.id + '/Add'}).create_node()

        bias_add.in_port(0).get_connection().set_destination(new_add.in_port(0))
        bias_add.in_port(1).get_connection().set_destination(new_add.in_port(1))
        bias_add.out_port(0).get_connection().set_source(new_add.out_port(0))

        if bias_add.data_format != 'NCHW':
            return

        input_shape = new_add.in_port(0).data.get_shape()
        bias_shape = new_add.in_port(1).data.get_shape()
        assert len(bias_shape) == 1

        unsqueeze_dims = np.arange(len(input_shape))
        channel_dim = get_features_dim('NCHW', len(input_shape))
        unsqueeze_dims = np.delete(unsqueeze_dims, channel_dim, 0)

        unsqueeze_node = Unsqueeze(graph, {'name': new_add.id + '/BiasUnsqueeze'}).create_node()
        unsqueeze_dims_node = Const(graph, {'name': new_add.id + '/Dims',
                                            'value': unsqueeze_dims}).create_node()
        # Reconnecting nodes
        unsqueeze_node.in_port(1).connect(unsqueeze_dims_node.out_port(0))
        unsqueeze_node['override_output_shape'] = True

        new_add.in_port(1).get_connection().insert_node(unsqueeze_node)
예제 #5
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error('TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                        'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if (H is not dynamic_dimension and H % block_size) or (W is not dynamic_dimension and W % block_size):
            raise Error('Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                        'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                        'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * (block_size ** 2),
                                     height=H // block_size,
                                     width=W // block_size)

        node.out_port(0).data.set_shape(out_shape)
예제 #6
0
    def infer(node: Node):
        assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \
            'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id))
        input_shape = node.in_port(0).data.get_shape()
        name = node.soft_get('name', node.id)
        assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format(
            name)

        assert len(
            input_shape
        ) == 4, 'ExtractImagePatches operation supports only 4D tensors'

        layout = node.graph.graph['layout']
        N = input_shape[get_batch_dim(layout, 4)]
        C = input_shape[get_features_dim(layout, 4)]

        size_spatial = shape_array(node.sizes)[node.spatial_dims]

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.strides[node.spatial_dims]

        size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1

        pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer(
            input_spatial_shape, size_extent, stride_spatial_shape,
            node.auto_pad, False)

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * np.prod(size_spatial),
                                     height=output_spatial_shape[0],
                                     width=output_spatial_shape[1])

        node.out_port(0).data.set_shape(out_shape)
예제 #7
0
    def infer(node):
        layout = node.graph.graph['layout']
        node_name = node.soft_get('name', node.id)

        assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \
            'The node "{}" must 3 inputs'.format(node_name)

        assert node.has_valid('pooled_w'), '"pooled_w" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('pooled_h'), '"pooled_h" attribute is not set for node "{}"'.format(node_name)
        assert node.has_valid('mode'), '"mode" attribute is not set for node "{}"'.format(node_name)
        assert node.mode in ['avg', 'max'], \
            '"mode" attribute range of values is ["avg", "max"], got {} for node "{}"'.format(node.mode, node_name)

        input_shape = node.in_port(0).data.get_shape()
        rois_shape = node.in_port(1).data.get_shape()
        indices_shape = node.in_port(2).data.get_shape()
        assert input_shape is not None and rois_shape is not None and indices_shape is not None, \
            'The node "{}" input shape is None'.format(node_name)
        assert compatible_dims(rois_shape[0], indices_shape[0]), 'The number of batch indices does not correspond ' \
                                                                 'to number of ROIs for node "{}"'.format(node_name)
        assert compatible_dims(rois_shape[1], 4), 'The size of ROI element must be 4 for node "{}"'.format(node_name)
        assert len(input_shape) == 4, 'The rank of port 0 input tensor of node "{}" must be 4.'.format(node_name)

        node.out_port(0).data.set_shape(
            shape_for_layout(layout,
                             batch=rois_shape[0],
                             features=input_shape[get_features_dim(layout, 4)],
                             height=node.pooled_h,
                             width=node.pooled_w)
        )
예제 #8
0
    def insert_pre_processing(graph: Graph, input_node: Node,
                              node_mean_scale_values: np.array,
                              preprocessing_name: str):
        assert preprocessing_name in ['scale', 'mean']
        if node_mean_scale_values.get(preprocessing_name) is None:
            return
        user_value = node_mean_scale_values[preprocessing_name]
        value = 1 / user_value if preprocessing_name == 'scale' else user_value * (
            -1)
        optimize_value = int(preprocessing_name == 'scale')
        op = Mul if preprocessing_name == 'scale' else Add

        if all([x == optimize_value for x in value]):
            return
        assert input_node.has_valid('shape')
        features_dim_idx = get_features_dim(graph.graph['layout'],
                                            len(input_node.shape))
        assert compatible_dims(
            value.size, input_node.shape[features_dim_idx]) or value.size == 1

        shape = np.ones(len(input_node.shape), dtype=np.int64)
        shape[features_dim_idx] = value.size
        value = value.reshape(shape)

        name = input_node.soft_get('name',
                                   input_node.id) + '/' + preprocessing_name
        preprocessing = create_op_with_const_inputs(graph,
                                                    op=op,
                                                    port_value_dict={1: value},
                                                    op_attrs={'name': name})

        if input_node.is_out_port_connected(0) and len(
                input_node.out_port(0).get_destinations()) == 1:
            # There are models with pattern Parameter(uint8) -> Convert(float).
            # Adding mean/scale leads to the following:
            # Parameter(uint8) -> Mean/Scale -> Convert(float) which is incorrect.
            # To fix this mean and scale preprocessing node is inserted after Convert(float) node.
            out_node = input_node.out_port(0).get_destination().node
            convert_type = out_node.soft_get('dst_type')
            if out_node.soft_get('type') == "Convert" and (convert_type in [
                    np.float32, np.float16
            ]):
                input_node = out_node
                if convert_type != value.dtype:
                    new_value = value.astype(convert_type)
                    const_node = preprocessing.in_port(
                        1).get_connection().get_source().node
                    const_node['value'] = new_value

        for dst in input_node.out_port(0).get_destinations():
            if dst.node.soft_get('type') != 'ShapeOf':
                # After the insertion of additional operations model optimizer
                # should keep the link to the input layer. Parameter node in framework
                # should map to parameter node in IR.
                # For this reason 'fw_tensor_debug_info' should be kept in data node.
                dst.get_connection().set_source(preprocessing.out_port(0),
                                                "source")

        input_node.out_port(0).connect(preprocessing.in_port(0))
예제 #9
0
    def find_and_replace_pattern(self, graph: Graph):
        layout = graph.graph['layout']
        for eltwise_op_node in graph.get_op_nodes(is_eltwise=True):
                out_shape = eltwise_op_node.out_port().data.get_shape()
                if 4 <= len(out_shape) <= 5:
                    out_features = out_shape[get_features_dim(layout, len(out_shape))]
                    for port, node in eltwise_op_node.in_nodes().items():
                        if len(node.shape) != len(out_shape) and len(node.shape) == 1 and out_features == node.shape[0]:
                            new_shape = shape_for_layout(layout, batch=1, features=out_features, height=1, width=1,
                                                         depth=1 if len(out_shape) == 5 else None)
                            dim_const = Const(graph, {'value': new_shape, 'name': node.id + '/Dim'}).create_node()
                            reshape_op = Reshape(graph, attrs={'dim': new_shape, 'name': node.id + '/Broadcast'}).create_node()

                            eltwise_op_node.in_port(port).get_source().node.out_port(0).get_connection().set_destination(reshape_op.in_port(0))
                            reshape_op.in_port(1).connect(dim_const.out_port(0))

                            reshape_op.out_port(0).connect(eltwise_op_node.in_port(port))
예제 #10
0
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(
            layout
        ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(
            node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(
                shape_for_layout(layout,
                                 batch=input_shape[get_batch_dim(layout, 4)],
                                 features=input_shape[get_features_dim(
                                     layout, 4)],
                                 height=out_height,
                                 width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(
                node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array(
                [dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int(
                        (input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
예제 #11
0
파일: conv.py 프로젝트: yeonbok/openvino
def pad_op_transform(graph: Graph, match: dict):
    op = match['op']
    pad_op = match['pad_op']
    input_data = pad_op.in_node(0)

    if pad_op.mode != 'constant':
        log.info(
            'The pad node "{}" with pad mode "{}" cannot be fused.'.format(
                pad_op.soft_get('name'), pad_op.mode))
        return

    if op.type == 'Pooling' and op.pool_method == 'max':
        return

    if pad_op.mode == 'constant':
        fill_value = pad_op.in_port(3).data.get_value()
        if fill_value is None or fill_value != 0.0:
            log.info(
                'The pad node "{}" with non-zero fill value cannot be fused.'.
                format(pad_op.soft_get('name')))
            return

    input_tensor_dims = len(match['pad_output'].shape)
    for in_port in [1, 2]:
        pads = pad_op.in_port(in_port).data.get_value()
        if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \
                pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0:
            log.info(
                'The pad node "{}" with padding over feature/batch dimension cannot be fused.'
                .format(pad_op.soft_get('name')))
            return

    op.pad += np.concatenate([
        pad_op.in_port(1).data.get_value().reshape([-1, 1]),
        pad_op.in_port(2).data.get_value().reshape([-1, 1])
    ],
                             axis=1)
    op.pad_spatial_shape = op.pad[op.spatial_dims]
    op['auto_pad'] = None
    if op.type == 'Pooling':
        op['exclude_pad'] = False
    assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0)
    edge_attrs = graph.get_edge_data(match['pad_output'].id, match['op'].id)[0]
    graph.remove_edge(match['pad_output'].id, match['op'].id)
    graph.add_edge(input_data.id, match['op'].id, **{'in': 0, **edge_attrs})
예제 #12
0
 def get_channel_index(node: Node) -> int:
     guessed_layout = 'NCHW'
     if node.has_valid('rt_info'):
         rt_info = node.rt_info
         if rt_info.contains('old_api_map_order'):
             old_api_map_version = rt_info.get_attribute_version(
                 'old_api_map_order')
             old_api_map = rt_info.info['old_api_map_order',
                                        old_api_map_version]
             if 'inverse_order' in old_api_map.info:
                 order = old_api_map.info['inverse_order']
                 assert len(order) == len(guessed_layout)
                 guessed_layout = np.array(list(guessed_layout))[order]
                 guessed_layout = ''.join(guessed_layout)
     idx, has_layout = get_dim_from_layout(node, 'C')
     if has_layout:
         return idx
     else:
         return get_features_dim(guessed_layout, len(node.shape))
예제 #13
0
def pad_op_transform(graph: Graph, match: dict):
    op = match['op']
    pad_op: Node = match['pad_op']

    # to keep reshape-ability if Pad receives pads_begin/pads_end from shape subgraph
    if pad_op.in_port(1).get_source().node.soft_get('can_be_fused') is False:
        return

    if pad_op.mode != 'constant':
        log.info('The pad node "{}" with pad mode "{}" cannot be fused.'.format(pad_op.soft_get('name'), pad_op.mode))
        return

    if op.type == 'Pooling' and op.pool_method == 'max':
        return

    if pad_op.mode == 'constant':
        fill_value = pad_op.in_port(3).data.get_value()
        if fill_value is None or fill_value != 0.0:
            log.info('The pad node "{}" with non-zero fill value cannot be fused.'.format(pad_op.soft_get('name')))
            return

    input_tensor_dims = len(match['pad_output'].shape)
    for in_port in [1, 2]:
        pads = pad_op.in_port(in_port).data.get_value()
        if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \
                pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0:
            log.info('The pad node "{}" with padding over feature/batch dimension cannot be fused.'.format(
                pad_op.soft_get('name')))
            return

    op.pad += np.concatenate([pad_op.in_port(1).data.get_value().reshape([-1, 1]),
                              pad_op.in_port(2).data.get_value().reshape([-1, 1])], axis=1)
    op.pad_spatial_shape = op.pad[op.spatial_dims]
    op['auto_pad'] = None
    if op.type == 'Pooling':
        op['exclude_pad'] = False
    assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0)

    match['op'].in_port(0).disconnect()
    pad_op.in_port(0).get_connection().add_destination(match['op'].in_port(0))
예제 #14
0
    def get_suitable_channel_index(node: Node, shape):
        if len(shape) != 4:
            return None

        guessed_layout = 'NCHW'
        if node.has_valid('rt_info'):
            rt_info = node.rt_info
            if rt_info.contains('old_api_map_order'):
                old_api_map_version = rt_info.get_attribute_version('old_api_map_order')
                old_api_map = rt_info.info['old_api_map_order', old_api_map_version]
                if 'inverse_order' in old_api_map.info:
                    order = old_api_map.info['inverse_order']
                    assert len(order) == len(guessed_layout)
                    guessed_layout = np.array(list(guessed_layout))[order]
                    guessed_layout = ''.join(guessed_layout)
        idx, has_layout = get_dim_from_layout(node, 'C')
        if not has_layout:
            idx = get_features_dim(guessed_layout, len(node.shape))
        if compatible_dims(shape[idx], 3):
            return idx
        else:
            return None
예제 #15
0
    def replace_pattern(self, graph: Graph, match: dict):
        y = match['maximum'].in_port(0).data.get_value()
        if y is None:
            y = match['maximum'].in_port(1).data.get_value()

        if y is None or y.shape != ():
            log.debug(
                'The value of the "maximum_y_data" is not defined or is not constant'
            )
            return

        # We need to check axes which performed reduction because IE supports only 2D, 3D, 4D inputs and
        # reduction only along spatial and channel dimensions.
        input_rank = len(match['sum'].in_port(0).data.get_shape())
        if input_rank not in [2, 3, 4]:
            log.debug(
                'IE supports L2 normalization only for 2D, 3D and 4D tensors.')
            return

        axes = match['sum'].in_port(1).data.get_value()
        axes = int64_array(axes)
        if axes.shape == ():
            axes = int64_array([axes])
        axes = int64_array(
            [axis if axis >= 0 else axis + input_rank for axis in axes])
        axes.sort()

        transformation_applicable = False
        # check for case C + all spatial dims. Works for 2D (NC), 3D (NCH) and 4D (NCHW and NHWC)
        if len(axes) + 1 == input_rank and np.array_equal(
                axes, int64_array(np.arange(start=1, stop=input_rank))):
            transformation_applicable = True

        # check for pure C channel normalization
        if len(axes) == 1 and ((input_rank == 4 and get_features_dim(
                graph.graph['layout'], input_rank) == axes[0]) or
                               (input_rank != 4 and axes[0] == 1)):
            transformation_applicable = True

        if not transformation_applicable:
            log.debug(
                'IE doesn\'t support l2 normalization with reduction along axes {}.'
                .format(axes))
            return

        output_name = match['l2_normalize'].soft_get('name',
                                                     match['l2_normalize'].id)
        normalize_node = create_op_node_with_second_input(
            graph, NormalizeL2Op, axes, {
                'name': output_name,
                'eps_mode': 'max',
                'eps': y
            })
        match['square'].in_port(0).get_source().connect(
            normalize_node.in_port(0))

        match['square'].in_port(0).disconnect()
        if match['l2_normalize'].in_port(
                0).get_source().node.id == match['rsqrt'].id:
            match['l2_normalize'].in_port(1).disconnect()
        else:
            match['l2_normalize'].in_port(0).disconnect()

        match['l2_normalize'].out_port(0).get_connection().set_source(
            normalize_node.out_port(0))
        rename_nodes([(match['l2_normalize'], output_name + "/TBR"),
                      (normalize_node, output_name)])
예제 #16
0
    def interp_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4
        if len(node.in_nodes()) == 2:
            src_shape = node.in_node(0).shape
            dst_shape = node.in_node(1).shape

            # in Caffe can be 2 inputs too, but shape should be got from shape of the second input
            if node.parse_2nd_input == 'shape':
                dst_shape = [dst_shape[get_height_dim(layout, 4)], dst_shape[get_width_dim(layout, 4)]]
            else:
                # it is TF case
                dst_shape = node.in_node(1).value

            if src_shape is None or dst_shape is None or len(src_shape) != 4 or len(dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(node.name, node.op, src_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            in_height = src_shape[get_height_dim(layout, 4)]
            in_width = src_shape[get_width_dim(layout, 4)]
            out_height = dst_shape[0]
            out_width = dst_shape[1]

            node.factor = factor_update(
                node.factor,
                [float(out_height) / in_height, float(out_width) / in_width],
                [in_height, in_width],
                [out_height, out_width],
                node.soft_get('name')
            )

            if node.factor is None:
                node['width'] = out_width
                node['height'] = out_height

            node.out_node().shape = shape_for_layout(layout,
                                                     batch=src_shape[get_batch_dim(layout, 4)],
                                                     features=src_shape[get_features_dim(layout, 4)],
                                                     height=out_height,
                                                     width=out_width)
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            outn = node.out_node(0)

            in_shape = node.in_node(0)
            num_ = in_shape.shape[get_batch_dim(layout, 4)]
            channels_ = in_shape.shape[get_features_dim(layout, 4)]
            height_in_ = in_shape.shape[get_height_dim(layout, 4)]
            width_in_ = in_shape.shape[get_width_dim(layout, 4)]

            height_out_ = height_in_ + node.pad_beg + node.pad_end
            width_out_ = width_in_ + node.pad_beg + node.pad_end

            if node.shrink_factor != 1 and node.zoom_factor == 1:
                shrink_factor = node.shrink_factor
                if shrink_factor < 1:
                    log.error('Shrink factor should be positive in node {}'.format(node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
            elif node.shrink_factor == 1 and node.zoom_factor != 1:
                zoom_factor = node.zoom_factor
                if zoom_factor < 1:
                    log.error('Zoom factor should be positive in node {}'.format(node.id))
                    return None

                node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \
                                        'layer shape inference function in the file (openvino/tools/mo/ops/interp.op at the ' \
                                        'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100)
                # Reshape methods can be different in some cases
                # Commented out section represents reshape that used in deeplab-caffe
                # Uncomment the following lines, if your model was trained with deeplab-caffe
                # or have the same reshape method
                # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1)
                # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

                # Comment out the following lines if you use the reshape method from previous section
                height_out_ = height_out_ * zoom_factor
                width_out_ = width_out_ * zoom_factor
            elif node.width != 0 and node.height != 0:
                height_out_ = node.height
                width_out_ = node.width
            elif node.shrink_factor != 1 and node.zoom_factor != 1:
                shrink_factor = node.shrink_factor
                zoom_factor = node.zoom_factor
                if shrink_factor < 1:
                    log.error('Shrink factor should be positive in node {}'.format(node.id))
                    return None
                if zoom_factor < 1:
                    log.error('Zoom factor should be positive in node {}'.format(node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
                height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1)
                width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

            outn.shape = shape_for_layout(layout,
                                          batch=num_,
                                          features=channels_,
                                          height=height_out_,
                                          width=width_out_)
예제 #17
0
 def test_get_features_dim_NCDHW(self):
     self.assertEqual(get_features_dim('NCHW', 5), 1)
예제 #18
0
 def test_get_features_dim_NDHWC(self):
     self.assertEqual(get_features_dim('NHWC', 5), 4)