Esempio n. 1
0
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        in_height = input_shape[get_height_dim(layout, 4)]
        in_width = input_shape[get_width_dim(layout, 4)]

        if len(node.in_nodes()) == 1:
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height_scale = node.height_scale
            out_width_scale = node.width_scale
        else:
            assert node.in_node(1).value is not None
            out_height_scale = node.in_node(1).value[get_height_dim(layout, 4)]
            out_width_scale = node.in_node(1).value[get_width_dim(layout, 4)]
        out_height = math.floor(in_height * out_height_scale)
        out_width = math.floor(in_width * out_width_scale)

        node.out_node().shape = shape_for_layout(
            layout,
            batch=input_shape[get_batch_dim(layout, 4)],
            features=input_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)
Esempio n. 2
0
    def infer(node: Node):
        in_shape = node.in_port(0).data.get_shape()
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C is not dynamic_dimension and C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C // (block_size * block_size),
                                     height=H * block_size,
                                     width=W * block_size)

        if is_fully_defined(in_shape) and is_fully_defined(
                out_shape) and np.prod(in_shape) != np.prod(out_shape):
            raise Error(
                'Number of input elements "{}" is not equal to number of output elements "" for node "{}"'
                ''.format(in_shape, out_shape, node.soft_get('name', node.id)))
        node.out_port(0).data.set_shape(out_shape)
Esempio n. 3
0
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = np.array(
                input_shape * node.in_node(1).value).astype(np.int64)
Esempio n. 4
0
    def priorbox_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape

        # calculate all different aspect_ratios (the first one is always 1)
        # in aspect_ratio 1/x values will be added for all except 1 if flip is True
        ar_seen = [1.0]
        ar_seen.extend(node.aspect_ratio.copy())
        if node.flip:
            for s in node.aspect_ratio:
                ar_seen.append(1.0 / s)

        ar_seen = np.unique(np.array(ar_seen).round(decimals=6))

        num_ratios = 0
        if len(node.min_size) > 0:
            num_ratios = len(ar_seen) * len(node.min_size)

        if node.has_valid('fixed_size') and len(node.fixed_size) > 0:
            num_ratios = len(ar_seen) * len(node.fixed_size)

        if node.has_valid('density') and len(node.density) > 0:
            for d in node.density:
                if node.has_valid('fixed_ratio') and len(node.fixed_ratio) > 0:
                    num_ratios = num_ratios + len(
                        node.fixed_ratio) * (pow(d, 2) - 1)
                else:
                    num_ratios = num_ratios + len(ar_seen) * (pow(d, 2) - 1)

        num_ratios = num_ratios + len(node.max_size)

        res_prod = data_shape[get_height_dim(
            layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4
        node.out_node(0).shape = np.array([1, 2, res_prod], dtype=np.int64)
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']

        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if C % (block_size**2):
            raise Error(
                'Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square '
                'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. '
                'block_size = {}'.format(in_shape, C, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C / (block_size**2)),
                                     height=int(H * block_size),
                                     width=int(W * block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
Esempio n. 6
0
    def tf_resize_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            return

        attrs_msg = "If half_pixel_centers attribute of the node {} with op {} is True, " \
                    "the attribute align_corners must be False"
        node_name = node.soft_get('name', node.id)
        assert not node.half_pixel_centers or (node.half_pixel_centers and not node.align_corners), \
            attrs_msg.format(node_name, node.op)

        connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
        assert len(connected_in_ports) == 2, \
            "Node {} with op {} number of inputs must be equal to 2.".format(node_name, node.op)

        new_sizes_value = node.in_port(1).data.get_value()
        assert new_sizes_value is not None, "Node {} with op {} has no value in input port 1".format(node_name, node.op)

        input_rank = len(input_shape)
        assert input_rank == 4, \
            "Resized input data of the node {} with op {} must be 4D tensor".format(node_name, node.op)

        len_msg = "Op {} with name {} supports only resize with respect to height and width dimension simultaneously"
        assert len(new_sizes_value) == 2, len_msg.format(node_name, node.op)

        output_shape = int64_array(input_shape.copy())

        layout = node.graph.graph['layout']
        output_shape[get_height_dim(layout, input_rank)] = new_sizes_value[0]
        output_shape[get_width_dim(layout, input_rank)] = new_sizes_value[1]

        node.out_port(0).data.set_shape(output_shape)
Esempio n. 7
0
    def upsample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            out_height = math.floor(in_height * node.height_scale)
            out_width = math.floor(in_width * node.width_scale)
            node.out_node().shape = shape_for_layout(
                layout,
                batch=input_shape[get_batch_dim(layout, 4)],
                features=input_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
        else:
            assert node.in_node(1).value is not None
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            node.out_node().shape = np.array(
                (input_shape + eps) * node.in_node(1).value).astype(np.int64)
Esempio n. 8
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if (H is not dynamic_dimension
                and H % block_size) or (W is not dynamic_dimension
                                        and W % block_size):
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=C * (block_size**2),
                                     height=H // block_size,
                                     width=W // block_size)

        node.out_port(0).data.set_shape(out_shape)
Esempio n. 9
0
    def infer(node: Node):
        in_shape = node.in_node().shape
        if in_shape.size != 4:
            raise Error(
                'TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. '
                'Current input shape is \'{}\''.format(in_shape))

        layout = node.graph.graph['layout']
        N = in_shape[get_batch_dim(layout, 4)]
        H = in_shape[get_height_dim(layout, 4)]
        W = in_shape[get_width_dim(layout, 4)]
        C = in_shape[get_features_dim(layout, 4)]

        block_size = node['block_size']
        if H % block_size or W % block_size:
            raise Error(
                'Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by '
                'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. '
                'block_size = {}'.format(in_shape, H, W, block_size))

        out_shape = shape_for_layout(layout,
                                     batch=N,
                                     features=int(C * (block_size**2)),
                                     height=int(H / block_size),
                                     width=int(W / block_size))

        assert np.prod(in_shape) == np.prod(out_shape)
        node.out_node().shape = int64_array(out_shape)
Esempio n. 10
0
def is_spatial_squeeze(layout: str, input_shape: np.ndarray, squeeze_dims: np.ndarray):
    """
    Checks that the squeeze operation removes all spatial dimensions.
    :param layout: graph layout.
    :param input_shape: numpy array with input shape.
    :param squeeze_dims: numpy array with dims to squeeze.
    :return: result of the check.
    """
    if len(input_shape) < 4 or len(input_shape) > 5:
        return False
    spatial_dims = [get_height_dim(layout, len(input_shape)), get_width_dim(layout, len(input_shape))]
    if len(input_shape) == 5:
        spatial_dims.append(get_depth_dim(layout, len(input_shape)))
    for dim in spatial_dims:
        if input_shape[dim] != 1:
            log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
                                                                                                      squeeze_dims))
            return False
    if len(squeeze_dims) != len(spatial_dims):
        log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
                                                                                                  squeeze_dims))
        return False
    log.debug('The reshape from "{}" with squeezed dims "{}" is not a spatial squeeze'.format(input_shape,
                                                                                              squeeze_dims))
    return True
Esempio n. 11
0
    def resample_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4

        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        in_height = input_shape[get_height_dim(layout, 4)]
        in_width = input_shape[get_width_dim(layout, 4)]

        if node.has('fw') and node.fw == 'tf':
            dst_shape = node.in_node(1).value
            if dst_shape is None or len(input_shape) != 4 or len(
                    dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, input_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            out_height = dst_shape[0]
            out_width = dst_shape[1]
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            if len(node.in_nodes()) == 1:
                if node.has('width') and node.has('height'):
                    out_height = node.height
                    out_width = node.width
                else:
                    out_height = node.factor * in_height
                    out_width = node.factor * in_width
            else:
                out_height = node.in_node(1).shape[get_height_dim(layout, 4)]
                out_width = node.in_node(1).shape[get_width_dim(layout, 4)]

        node.factor = factor_update(
            node.factor,
            [float(out_height) / in_height,
             float(out_width) / in_width], [in_height, in_width],
            [out_height, out_width], node.soft_get('name'))

        node.out_node().shape = shape_for_layout(
            layout,
            batch=input_shape[get_batch_dim(layout, 4)],
            features=input_shape[get_features_dim(layout, 4)],
            height=out_height,
            width=out_width)
Esempio n. 12
0
    def priorbox_clustered_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape
        num_ratios = len(node.width)

        res_prod = data_shape[get_height_dim(
            layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4
        node.out_node(0).shape = np.array([1, 2, res_prod], dtype=np.int64)
    def priorbox_clustered_infer(node: Node):
        layout = node.graph.graph['layout']
        data_shape = node.in_node(0).shape
        num_ratios = len(node.width)

        if node.has_and_set('V10_infer'):
            assert node.in_node(0).value is not None
            node.out_node(0).shape = np.array([2, np.prod(node.in_node(0).value) * num_ratios * 4], dtype=np.int64)
        else:
            res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4
            node.out_node(0).shape = np.array([1, 2, res_prod], dtype=np.int64)
Esempio n. 14
0
    def upsample_infer(node: Node):
        node_name = node.soft_get('name', node.id)
        layout = node.graph.graph['layout']
        assert len(
            layout
        ) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(
            node_name)

        input_shape = node.in_port(0).data.get_shape()

        if len(node.in_nodes()) == 1:
            in_height = input_shape[get_height_dim(layout, 4)]
            in_width = input_shape[get_width_dim(layout, 4)]
            assert node.has('width_scale') is not None and node.has(
                'height_scale') is not None
            if in_height is not dynamic_dimension:
                out_height = math.floor(in_height * node.height_scale)
            else:
                out_height = dynamic_dimension
            if in_width is not dynamic_dimension:
                out_width = math.floor(in_width * node.width_scale)
            else:
                out_width = dynamic_dimension
            node.out_port(0).data.set_shape(
                shape_for_layout(layout,
                                 batch=input_shape[get_batch_dim(layout, 4)],
                                 features=input_shape[get_features_dim(
                                     layout, 4)],
                                 height=out_height,
                                 width=out_width))
        else:
            scales = node.in_port(1).data.get_value()
            assert scales is not None, 'The input with scales for node "{}" is not constant'.format(
                node_name)
            eps = 1e-5  # This is to make rounding in case of very close number to round to closest instead of down
            # generic output shape calculation to support 5D input shape case
            output_shape = shape_array(
                [dynamic_dimension for _ in range(len(input_shape))])
            for idx in range(len(output_shape)):
                if input_shape[idx] is not dynamic_dimension:
                    output_shape[idx] = int(
                        (input_shape[idx] + eps) * scales[idx])
                else:
                    output_shape[idx] = dynamic_dimension_value
            node.out_port(0).data.set_shape(output_shape)
Esempio n. 15
0
    def replace_pattern(graph: Graph, match: dict):
        reshape = match['reshape']
        assert len(reshape.in_nodes()) > 0
        if graph.graph['layout'] == 'NCHW' or reshape.has_and_set('nchw_layout') or\
                reshape.soft_get('correct_data_layout') is True:
            return

        input_node = reshape.in_node()
        output_node = reshape.out_node()
        input_shape = input_node.shape
        output_shape = output_node.shape

        if len(input_shape) >= 4 and len(output_shape) == 3:
            # Check that we will permute some shapes in this Reshape by our permutation pass
            layout = 'NCHW'
            c_idx = get_features_dim(layout, len(input_shape))
            hw_idx = [
                get_width_dim(layout, len(input_shape)),
                get_height_dim(layout, len(input_shape))
            ]
            if input_shape[c_idx] != 1 and np.any(
                    input_shape[hw_idx] != [1, 1]):
                # then nhwc -> nchw permutation can change shapes significantly
                # We need to wrap up node with NCHW -> NHWC permutes and don't touch it later
                permutation = PermuteAttrs.get_nchw_to_nhwc_permutation(
                    len(input_shape))
                permutation_back = PermuteAttrs.get_nchw_to_nhwc_permutation(
                    len(input_shape))

                # 1. Insert input Permute
                #    This Permute will permute input from original input layout to operation layout
                edge_attrs = graph.get_edge_data(input_node.id, reshape.id)[0]
                graph.remove_edge(input_node.id, reshape.id)

                permute_op = Permute(graph, {
                    'order': permutation.perm,
                    'name': reshape.name + '/Permute_'
                })
                permute_data_node = permute_op.create_node_with_data(
                    [input_node])

                graph.add_edge(permute_data_node.id, reshape.id, **edge_attrs)
Esempio n. 16
0
    def regionyolo_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            flat_dim = np.prod(input_shape[axis: end_axis + 1])
            node.out_node().shape = np.array([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_node().shape = shape_for_layout(layout,
                                                     batch=input_shape[get_batch_dim(layout, 4)],
                                                     features=(node.classes + node.coords + 1) * len(node.mask),
                                                     height=input_shape[get_height_dim(layout, 4)],
                                                     width=input_shape[get_width_dim(layout, 4)])
Esempio n. 17
0
    def regionyolo_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            dims_to_flatten = input_shape[axis: end_axis + 1]
            if is_fully_defined(dims_to_flatten):
                flat_dim = np.ma.prod(dims_to_flatten)
            else:
                flat_dim = dynamic_dimension_value
            node.out_port(0).data.set_shape([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_port(0).data.set_shape(shape_for_layout(layout,
                                                             batch=input_shape[get_batch_dim(layout, 4)],
                                                             features=(node.classes + node.coords + 1) * len(node.mask),
                                                             height=input_shape[get_height_dim(layout, 4)],
                                                             width=input_shape[get_width_dim(layout, 4)]))
Esempio n. 18
0
    def replace_pattern(self, graph: Graph, match: dict):
        layout = graph.graph['layout']
        if layout != 'NHWC':
            return

        reshape1 = match['reshape1']
        softmax = match['softmax']

        # Check that Reshape->Softmax->Reshape shuffle only feature channel
        input_shape = np.array(reshape1.in_node(0).shape)
        reshape1_shape = np.array(reshape1.out_node().shape)

        # Check that input shape is 4D
        if len(input_shape) != 4:
            log.warning(
                'Can\'t convert Reshape({})->Softmax->Reshape sequence due to input shape should be 4D '
                '(instead of {}D {})'.format(reshape1.name, len(input_shape),
                                             input_shape))
            return

        if len(reshape1_shape) != 2:
            log.warning(
                'This pass expect 2D output tensor for first Reshape {} layer (given shape: {})'
                ''.format(reshape1.name, reshape1_shape))
            return

        # Define feature dim
        feature_dim = get_features_dim(layout, len(input_shape))
        spatial_dims = [
            get_height_dim(layout, len(input_shape)),
            get_width_dim(layout, len(input_shape))
        ]

        # Skip transform in case if spatial dims in input shape are equal to [1,1]
        if np.array_equal(input_shape[spatial_dims], np.array([1, 1])):
            log.info('Skip this transformation due to spatial dims are [1,1]')
            return

        # Check that Reshape1 has out dims [-1, feature_dims]
        if not (reshape1_shape[-1] == input_shape[-1] and reshape1_shape[0]
                == np.prod(np.delete(input_shape, feature_dim))):
            log.warning(
                'Output shape for Reshape operation should be [{},{}] instead of {}'
                .format(np.prod(np.delete(input_shape, feature_dim)),
                        input_shape[-1], reshape1_shape))
            return

        # Now we are sure that Reshape->Softmax suits for this transformation

        # The resulting shape for Reshape1 layer : [N,C,(H*W)]
        new_reshape1_shape = np.concatenate(
            (np.array([input_shape[0]]), np.array([reshape1_shape[-1]]),
             np.array([np.prod(input_shape[spatial_dims])])))

        # update 'dim' attribute but preserve batch dimension size which could be -1
        reshape1.dim = int64_array([reshape1.dim[0], *new_reshape1_shape[1:]])

        old_shape = np.array(reshape1.out_node().shape)
        reshape1.out_node().shape = new_reshape1_shape
        softmax.out_node().shape = new_reshape1_shape

        # Preserve layers from conversion to NCHW (in case of NHWC topology layout)
        reshape1['nchw_layout'] = True
        reshape1.out_node()['nchw_layout'] = True
        softmax['nchw_layout'] = True
        softmax.out_node()['nchw_layout'] = True

        # Create final Reshape to keep original shape for softmax output if softmax is not the last node
        softmax_out_data = softmax.out_node()
        if len(softmax_out_data.out_nodes()) != 0:
            next_operation = softmax_out_data.out_node()
            # Save edge attributes & remove edge
            edge_attrs = graph.get_edge_data(softmax_out_data.id,
                                             next_operation.id)[0]
            graph.remove_edge(softmax_out_data.id, next_operation.id)
            reshape_op = Reshape(
                graph,
                dict(name=softmax.id + "/Reshape",
                     dim=np.array(old_shape),
                     nchw_layout=True))
            reshape_out_data = reshape_op.create_node_with_data(
                inputs=[softmax_out_data])
            graph.add_edges_from([(reshape_out_data.id, next_operation.id,
                                   edge_attrs)])
def replace_resize(graph: Graph, resize: Node):
    log.debug("Converting of ONNX Resize-11 to Interpolate-4 "
              "is triggered for node {}.".format(
                  resize.soft_get('name', resize.id)))

    input_shape = resize.in_port(0).data.get_shape()
    input_rank = len(input_shape)
    resize_name = resize.soft_get('name', resize.id)
    if input_rank not in {4, 5}:
        log.warning(
            'The input shape is not 4D or 5D for op with name {}'.format(
                resize_name))
        return

    num_of_inputs = len([
        port for port in resize.in_ports().values() if not port.disconnected()
    ])
    assert num_of_inputs in {3, 4}, \
        "Number of inputs of ONNXResize (with name {}) should be equal to 3 or 4".format(resize_name)

    assert resize.soft_get('coordinate_transformation_mode') != 'tf_crop_and_resize', \
        'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(resize.op, resize_name)

    layout = graph.graph['layout']

    if input_rank == 4:
        begin_dim = get_height_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1
    else:
        begin_dim = get_depth_dim(layout, input_rank)
        end_dim = get_width_dim(layout, input_rank) + 1

    sizes_ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([begin_dim]),
            2: int64_array([end_dim]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/StridedSlice_sizes',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([1]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })
    scales_ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([begin_dim]),
            2: int64_array([end_dim]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/StridedSlice_scales',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([1]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })
    axes_node = Const(
        graph, {
            'name': resize_name + '/axis',
            'value': int64_array(np.arange(begin_dim, end_dim))
        }).create_node()

    shape_calculation_mode = 'scales' if num_of_inputs == 3 else 'sizes'

    interpolate_node = Interpolate(
        graph, {
            'version': 'opset4',
            'mode': convert_mode(resize.mode),
            'coordinate_transformation_mode':
            resize.coordinate_transformation_mode,
            'cube_coeff': resize.cube_coeff,
            'nearest_mode': resize.nearest_mode,
            'pads_begin': int64_array([0]),
            'pads_end': int64_array([0]),
            'antialias': 0,
            'shape_calculation_mode': shape_calculation_mode,
            'in_ports_count': 4
        }).create_node()

    axes_node.out_port(0).connect(interpolate_node.in_port(3))
    shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node()

    add_node = create_op_with_const_inputs(graph, Add,
                                           {1: float_array([1.0e-5])},
                                           {'name': resize_name + '/Add'})

    input_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type)

    if num_of_inputs == 3:
        cast_shape_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node()
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        cast_shape_to_float.out_port(0).connect(mul_node.in_port(0))
        cast_add_result_to_int = Cast(graph, {
            'dst_type': np.int64
        }).create_node()
        floor_node = Floor(graph, {
            'name': resize_name + '/Floor'
        }).create_node()
        mul_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(floor_node.in_port(0))
        floor_node.out_port(0).connect(cast_add_result_to_int.in_port(0))
        cast_add_result_to_int.out_port(0).connect(sizes_ss.in_port(0))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_scales = resize.in_port(2).get_connection()
        connection_of_scales.set_destination(scales_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_scales.get_source().connect(mul_node.in_port(1))
    else:
        cast_shape_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        cast_sizes_to_float = Cast(graph, {
            'dst_type': input_data_type
        }).create_node()
        div_node = Div(graph, {'name': resize_name + '/Div'}).create_node()
        cast_sizes_to_float.out_port(0).connect(div_node.in_port(0))
        cast_shape_to_float.out_port(0).connect(div_node.in_port(1))
        shape_of.out_port(0).connect(cast_shape_to_float.in_port(0))
        div_node.out_port(0).connect(add_node.in_port(0))
        add_node.out_port(0).connect(scales_ss.in_port(0))
        scales_ss.out_port(0).connect(interpolate_node.in_port(2))
        sizes_ss.out_port(0).connect(interpolate_node.in_port(1))

        connection_of_resize_input = resize.in_port(0).get_connection()
        connection_of_resize_input.set_destination(interpolate_node.in_port(0))

        connection_of_sizes = resize.in_port(3).get_connection()
        connection_of_sizes.set_destination(sizes_ss.in_port(0))

        connection_of_resize_input.get_source().connect(shape_of.in_port(0))
        connection_of_sizes.get_source().connect(
            cast_sizes_to_float.in_port(0))

    rename_nodes([(resize, resize_name + '/delete'),
                  (interpolate_node, resize_name)])
    resize.out_port(0).get_connection().set_source(
        interpolate_node.out_port(0))
 def test_get_width_dim_NDHWC(self):
     self.assertEqual(get_width_dim('NHWC', 5), 3)
 def test_get_width_dim_NCDHW(self):
     self.assertEqual(get_width_dim('NCHW', 5), 4)
 def test_get_width_dim_NHWC(self):
     self.assertEqual(get_width_dim('NHWC', 4), 2)
Esempio n. 23
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        upsample_name = upsample.soft_get('name', upsample.id)
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        depth_scale = None
        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert len(scales) in (
                4, 5
            ), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(
                len(scales), upsample_name)
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[2]
            width_scale = scales[3]
            if len(scales) == 5:
                depth_scale = scales[4]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if not math.isclose(height_scale, width_scale, rel_tol=1e-5):
            log.debug(
                'Width and height scales are not equal: {} vs {} for node {}'.
                format(width_scale, height_scale, upsample_name))
            return
        if depth_scale is not None and not math.isclose(
                height_scale, depth_scale, rel_tol=1e-5):
            log.debug(
                'Depth and height scales are not equal: {} vs {} for node {}'.
                format(depth_scale, height_scale, upsample_name))
            return

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()

        layout = graph.graph['layout']

        if input_shape_rank == 4:
            begin_value = int64_array(
                [get_height_dim(layout, input_shape_rank)])
            factor_value = np.array([height_scale, width_scale])
        else:
            begin_value = int64_array(
                [get_depth_dim(layout, input_shape_rank)])
            factor_value = np.array([depth_scale, height_scale, width_scale])

        ss = create_op_with_const_inputs(
            graph, StridedSlice, {
                1: begin_value,
                2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),
                3: int64_array([1])
            }, {
                'name': upsample_name + '/ss_0_port',
                'begin_mask': int64_array([1]),
                'end_mask': int64_array([1]),
                'new_axis_mask': int64_array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            })

        mul = create_op_node_with_second_input(
            graph, Mul, factor_value, {'name': upsample_name + '/factor_mul_'})

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))

        ss.out_port(0).connect(mul.in_port(0))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        resample_op = Interpolate(
            graph,
            dict(name=upsample_name + '/Interpolate',
                 axes=axes,
                 mode=upsample.attrs()['mode'],
                 antialias=0,
                 convert_to_resample=True)).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(resample_op.in_port(1))

        upsample.in_port(0).get_connection().set_destination(
            resample_op.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            resample_op.out_port(0))

        convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()
        convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()

        mul.in_port(0).get_connection().insert_node(convert_to_float)
        mul.out_port(0).get_connection().insert_node(convert_to_int)
Esempio n. 24
0
    def interp_infer(node: Node):
        layout = node.graph.graph['layout']
        assert len(layout) == 4
        if len(node.in_nodes()) == 2:
            src_shape = node.in_node(0).shape
            dst_shape = node.in_node(1).shape

            # in Caffe can be 2 inputs too, but shape should be got from shape of the second input
            if node.parse_2nd_input == 'shape':
                dst_shape = [
                    dst_shape[get_height_dim(layout, 4)],
                    dst_shape[get_width_dim(layout, 4)]
                ]
            else:
                # it is TF case
                dst_shape = node.in_node(1).value

            if src_shape is None or dst_shape is None or len(
                    src_shape) != 4 or len(dst_shape) != 2:
                log.error(
                    'Node {} with op {} cannot be converted to Resample layer because there is no enough info about '
                    'src/dst shapes: src_shape = {}, dst_shape = {}'.format(
                        node.name, node.op, src_shape, dst_shape))
                node.type = None  # prevent translation to a valid IE layer
                return
            in_height = src_shape[get_height_dim(layout, 4)]
            in_width = src_shape[get_width_dim(layout, 4)]
            out_height = dst_shape[0]
            out_width = dst_shape[1]

            node.factor = factor_update(
                node.factor,
                [float(out_height) / in_height,
                 float(out_width) / in_width], [in_height, in_width],
                [out_height, out_width], node.soft_get('name'))

            if node.factor is None:
                node['width'] = out_width
                node['height'] = out_height

            node.out_node().shape = shape_for_layout(
                layout,
                batch=src_shape[get_batch_dim(layout, 4)],
                features=src_shape[get_features_dim(layout, 4)],
                height=out_height,
                width=out_width)
            node.graph.remove_edge(node.in_node(1).id, node.id)
        else:
            outn = node.out_node(0)

            in_shape = node.in_node(0)
            num_ = in_shape.shape[get_batch_dim(layout, 4)]
            channels_ = in_shape.shape[get_features_dim(layout, 4)]
            height_in_ = in_shape.shape[get_height_dim(layout, 4)]
            width_in_ = in_shape.shape[get_width_dim(layout, 4)]

            height_out_ = height_in_ + node.pad_beg + node.pad_end
            width_out_ = width_in_ + node.pad_beg + node.pad_end

            if node.shrink_factor != 1 and node.zoom_factor == 1:
                shrink_factor = node.shrink_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
            elif node.shrink_factor == 1 and node.zoom_factor != 1:
                zoom_factor = node.zoom_factor
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None

                node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \
                                        'layer shape inference function in the file (extensions/ops/interp.op at the ' \
                                        'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100)
                # Reshape methods can be different in some cases
                # Commented out section represents reshape that used in deeplab-caffe
                # Uncomment the following lines, if your model was trained with deeplab-caffe
                # or have the same reshape method
                # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1)
                # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

                # Comment out the following lines if you use the reshape method from previous section
                height_out_ = height_out_ * zoom_factor
                width_out_ = width_out_ * zoom_factor
            elif node.width != 0 and node.height != 0:
                height_out_ = node.height
                width_out_ = node.width
            elif node.shrink_factor != 1 and node.zoom_factor != 1:
                shrink_factor = node.shrink_factor
                zoom_factor = node.zoom_factor
                if shrink_factor < 1:
                    log.error(
                        'Shrink factor should be positive in node {}'.format(
                            node.id))
                    return None
                if zoom_factor < 1:
                    log.error(
                        'Zoom factor should be positive in node {}'.format(
                            node.id))
                    return None
                height_out_ = (height_out_ - 1) / shrink_factor + 1
                width_out_ = (width_out_ - 1) / shrink_factor + 1
                height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor -
                                                                 1)
                width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1)

            outn.shape = shape_for_layout(layout,
                                          batch=num_,
                                          features=channels_,
                                          height=height_out_,
                                          width=width_out_)
Esempio n. 25
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert scales.shape == (4, )
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[2]
            width_scale = scales[3]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        factor = Const(graph, {
            'value': np.array([height_scale, width_scale])
        }).create_node()

        shape = Shape(graph, {'name': upsample.name + '/0_port'}).create_node()

        layout = graph.graph['layout']
        if input_shape_rank == 4:
            begin = Const(graph, {
                'value':
                int64_array([get_height_dim(layout, input_shape_rank)])
            }).create_node()
        else:
            begin = Const(graph, {
                'value':
                int64_array([get_depth_dim(layout, input_shape_rank)])
            }).create_node()
        end = Const(graph, {
            'value':
            int64_array([get_width_dim(layout, input_shape_rank) + 1])
        }).create_node()

        stride = Const(graph, {'value': int64_array([1])}).create_node()
        ss = StridedSlice(
            graph, {
                'name': upsample.name + '/ss_0_port',
                'begin_mask': np.array([1]),
                'end_mask': np.array([0]),
                'new_axis_mask': np.array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            }).create_node()

        mul = Mul(graph, {
            'name': upsample.name + '/factor_mul_'
        }).create_node()

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))
        begin.out_port(0).connect(ss.in_port(1))
        end.out_port(0).connect(ss.in_port(2))
        stride.out_port(0).connect(ss.in_port(3))
        ss.out_port(0).connect(mul.in_port(0))
        factor.out_port(0).connect(mul.in_port(1))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        resample_op = Interpolate(
            graph,
            dict(name='Interpolate/{}'.format(upsample.name),
                 axes=axes,
                 mode=upsample.attrs()['mode'],
                 antialias=0,
                 convert_to_resample=True)).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(resample_op.in_port(1))

        upsample.in_port(0).get_connection().set_destination(
            resample_op.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            resample_op.out_port(0))
Esempio n. 26
0
def replace_tf_resize(graph: Graph, resize: Node, interpolation_mode: str):
    resize_name = resize.soft_get('name', resize.id)
    log.debug(
        "Converting of {} to Interpolate-4 is triggered for node {}.".format(
            resize.op, resize_name))

    num_of_inputs = len([
        port for port in resize.in_ports().values() if not port.disconnected()
    ])
    assert num_of_inputs == 2, \
        "Number of inputs of {} (with name {}) should be equal to 2".format(resize.op, resize_name)

    attrs_msg = "If half_pixel_centers attribute of the node {} with op {} is True, " \
                "the attribute align_corners must be False"
    assert not resize.half_pixel_centers or (resize.half_pixel_centers and not resize.align_corners), \
        attrs_msg.format(resize_name, resize.op)

    shape = Shape(graph, {'name': resize_name + '/shapeof'}).create_node()

    layout = graph.graph['layout']
    height_dim = get_height_dim(layout, 4)
    width_dim = get_width_dim(layout, 4)

    ss = create_op_with_const_inputs(
        graph, StridedSlice, {
            1: int64_array([height_dim]),
            2: int64_array([width_dim + 1]),
            3: int64_array([1])
        }, {
            'name': resize_name + '/StridedSlice',
            'begin_mask': int64_array([1]),
            'end_mask': int64_array([1]),
            'new_axis_mask': int64_array([0]),
            'shrink_axis_mask': int64_array([0]),
            'ellipsis_mask': int64_array([0])
        })

    div_node = Div(graph, {'name': resize_name + '/Div'}).create_node()

    shape_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()
    size_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()

    size_to_float.out_port(0).connect(div_node.in_port(0))
    shape_to_float.out_port(0).connect(div_node.in_port(1))
    ss.out_port(0).connect(shape_to_float.in_port(0))
    shape.out_port(0).connect(ss.in_port(0))

    align_corners = resize.align_corners
    half_pixel_centers = resize.half_pixel_centers

    nearest_mode = 'floor' if interpolation_mode == 'nearest' else 'round_prefer_floor'
    if align_corners:
        coordinate_transformation_mode = 'align_corners'
        if interpolation_mode == 'nearest':
            nearest_mode = 'round_prefer_ceil'
    elif half_pixel_centers:
        coordinate_transformation_mode = 'tf_half_pixel_for_nn' if interpolation_mode == 'nearest' else 'half_pixel'
    else:
        coordinate_transformation_mode = 'asymmetric'

    interpolate4 = create_op_with_const_inputs(
        graph, Interpolate, {3: int64_array([height_dim, width_dim])}, {
            'name': resize_name + '/interpolate_4',
            'mode': interpolation_mode,
            'antialias': False,
            'coordinate_transformation_mode': coordinate_transformation_mode,
            'pads_begin': int64_array([0]),
            'pads_end': int64_array([0]),
            'nearest_mode': nearest_mode,
            'cube_coeff': -0.75,
            'shape_calculation_mode': 'sizes',
            'version': 'opset4',
            'in_ports_count': 4,
        })

    resize_input_connection = resize.in_port(0).get_connection()
    resize_input_connection.set_destination(interpolate4.in_port(0))
    resize_input_connection.get_source().connect(shape.in_port(0))

    div_node.out_port(0).connect(interpolate4.in_port(2))

    sizes_connection = resize.in_port(1).get_connection()
    sizes_connection.set_destination(interpolate4.in_port(1))
    sizes_connection.get_source().connect(size_to_float.in_port(0))

    resize.out_port(0).get_connection().set_source(interpolate4.out_port(0))
    rename_nodes([(resize, resize_name + '/delete'),
                  (interpolate4, resize_name)])
Esempio n. 27
0
    def replace_pattern(self, graph: Graph, match: dict):
        reshape1 = match['reshape1']
        reshape2 = match['reshape2']
        transpose = match['transpose']

        # Check that Reshape->Transpose->Reshape shuffle only feature channel
        input_shape = np.array(reshape1.in_node(0).shape)
        reshape1_shape = np.array(reshape1.out_node().shape)
        output_shape = np.array(reshape2.out_node().shape)

        # Check that input shape is 4D
        if len(input_shape) != 4:
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence due to input shape should be 4D '
                '(instead of {}D)'.format(transpose.name, len(input_shape)))
            return

        # Check that output shape the same as input shape
        if not np.prod(input_shape) == np.prod(output_shape):
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence due to output shape should be equal '
                'to input shape: {} and {}'.format(transpose.name, input_shape,
                                                   output_shape))
            return

        # Input shapes can be either NCHW or NHWC, so in case of channel split, feature channel can be splited as
        # follows in comments below
        # So feature_dims_split list contains possible dims responsible for feature dim
        layout = graph.graph['layout']
        feature_dim = get_features_dim(layout, len(input_shape))
        spatial_dims = [
            get_height_dim(layout, len(input_shape)),
            get_width_dim(layout, len(input_shape))
        ]
        if layout == 'NCHW':
            # NC1C2HW or NC1C2(H*W)
            feature_dims_split = np.array([feature_dim, feature_dim + 1])
        else:
            # NHWC1C2 or N(H*W)C1C2 or (N*H*W)C1C2
            feature_dims_split = np.array(
                [len(reshape1_shape) - 2,
                 len(reshape1_shape) - 1])

        # Check that feature_dims_split suits reshape layer shape
        for dim in feature_dims_split:
            if dim < 0 or dim >= len(reshape1_shape):
                log.warning(
                    'Can\'t convert Reshape({}:{})->Transpose->Reshape sequence. Can\'t detect feature shuffle.'
                    ''.format(reshape1.shape, reshape1_shape))
                return

        if not np.prod(np.delete(reshape1_shape,
                                 feature_dims_split)) == np.prod(
                                     np.delete(input_shape, feature_dim)):
            log.warning(
                'Can\'t convert Reshape->Transpose->Reshape sequence. Can\'t detect feature shuffle. {} '
                'should be equal to {}'.format(
                    np.prod(np.delete(reshape1_shape, feature_dims_split)),
                    np.prod(np.delete(input_shape, feature_dim))))
            return

        # Check transpose order
        if not np.array_equal(feature_dims_split[::-1],
                              transpose.order[feature_dims_split]):
            log.warning(
                'Can\'t convert Reshape->Transpose({})->Reshape sequence. Transpose operation should witch '
                'feature order (given order: {})'.format(
                    transpose.name, transpose.order))
            return

        # Now we are sure that Reshape->Transpose->Reshape shuffle feature dims
        # So, then we change Reshape and Transpose attrs to suite NCHW layout

        # The resulting shape for Reshape1 layer : [N,C1,C2,(H*W)]
        new_reshape1_shape = np.concatenate(
            (np.array([input_shape[0]]),
             np.array(reshape1_shape[feature_dims_split]),
             np.array([np.prod(input_shape[spatial_dims])])))

        new_transpose_order = np.array([0, 2, 1, 3])
        new_transpose_shape = np.array(new_reshape1_shape[new_transpose_order])

        reshape1.out_node().shape = new_reshape1_shape
        reshape1.dim = np.copy(new_reshape1_shape)

        transpose.order = new_transpose_order
        transpose.out_node().shape = new_transpose_shape

        # Preserve layers from conversion to NCHW (in case of NHWC topology layout)
        reshape1['nchw_layout'] = True
        reshape1.out_node()['nchw_layout'] = True
        transpose['nchw_layout'] = True
        transpose.out_node()['nchw_layout'] = True
Esempio n. 28
0
    def replace_pattern(self, graph: Graph, match: Dict[str, Node]):
        log.debug('UpsampleToResample is triggered')
        upsample = match['upsample']
        upsample_name = upsample.soft_get('name', upsample.id)
        input_shape = upsample.in_port(0).data.get_shape()
        input_shape_rank = len(input_shape)
        if input_shape_rank not in [4, 5]:
            log.warning('The input shape is not 4D or 5D for op {}'.format(
                upsample.soft_get('name')))
            return

        depth_scale = None
        layout = graph.graph['layout']

        if len(upsample.in_nodes()) == 2:
            if upsample.in_node(1).value is None:
                return
            scales = upsample.in_node(1).value
            assert len(scales) in (
                4, 5
            ), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format(
                len(scales), upsample_name)
            if not (math.isclose(scales[0], 1, rel_tol=1e-5)
                    and math.isclose(scales[1], 1, rel_tol=1e-5)):
                return
            height_scale = scales[get_height_dim(layout, input_shape_rank)]
            width_scale = scales[get_width_dim(layout, input_shape_rank)]
            if len(scales) == 5:
                depth_scale = scales[get_depth_dim(layout, input_shape_rank)]
        else:
            height_scale = upsample['height_scale']
            width_scale = upsample['width_scale']

        if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected():
            upsample.in_port(1).disconnect()

        upsample_name = upsample.soft_get('name', upsample.id)
        shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node()

        layout = graph.graph['layout']

        if input_shape_rank == 4:
            begin_value = int64_array(
                [get_height_dim(layout, input_shape_rank)])
            factor_value = float32_array([height_scale, width_scale])
        else:
            begin_value = int64_array(
                [get_depth_dim(layout, input_shape_rank)])
            factor_value = float32_array(
                [depth_scale, height_scale, width_scale])

        ss = create_op_with_const_inputs(
            graph, StridedSlice, {
                1: begin_value,
                2: int64_array([get_width_dim(layout, input_shape_rank) + 1]),
                3: int64_array([1])
            }, {
                'name': upsample_name + '/ss_0_port',
                'begin_mask': int64_array([1]),
                'end_mask': int64_array([1]),
                'new_axis_mask': int64_array([0]),
                'shrink_axis_mask': int64_array([0]),
                'ellipsis_mask': int64_array([0])
            })

        mul = create_op_node_with_second_input(
            graph, Mul, factor_value, {'name': upsample_name + '/factor_mul'})

        source = upsample.in_port(0).get_connection().get_source()
        source.connect(shape.in_port(0))
        shape.out_port(0).connect(ss.in_port(0))

        ss.out_port(0).connect(mul.in_port(0))

        # Create Interpolate operation
        if input_shape_rank == 4:
            axes = int64_array([
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])
        else:
            axes = int64_array([
                get_depth_dim(layout, input_shape_rank),
                get_height_dim(layout, input_shape_rank),
                get_width_dim(layout, input_shape_rank)
            ])

        axes_node = Const(graph, {
            'name': upsample_name + '/axis',
            'value': axes
        }).create_node()

        interpolate = Interpolate(
            graph, {
                'mode': upsample.attrs()['mode'],
                'antialias': 0,
                'pads_begin': int64_array([0]),
                'pads_end': int64_array([0]),
                'coordinate_transformation_mode': 'half_pixel',
                'nearest_mode': 'round_prefer_floor',
                'cube_coeff': -0.75,
                'shape_calculation_mode': 'scales',
                'version': 'opset4',
                'in_ports_count': 4
            }).create_node()

        upsample.add_input_port(1, skip_if_exist=True)
        assert upsample.in_port(1).disconnected()
        mul.out_port(0).connect(interpolate.in_port(1))
        axes_node.out_port(0).connect(interpolate.in_port(3))

        scales_node = Const(graph, {
            'name': upsample_name + '/scales',
            'value': factor_value
        }).create_node()
        scales_node.out_port(0).connect(interpolate.in_port(2))

        upsample.in_port(0).get_connection().set_destination(
            interpolate.in_port(0))
        upsample.out_port(0).get_connection().set_source(
            interpolate.out_port(0))

        rename_nodes([(upsample, upsample_name + '/delete'),
                      (interpolate, upsample_name)])

        convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node()
        convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node()

        mul.in_port(0).get_connection().insert_node(convert_to_float)
        mul.out_port(0).get_connection().insert_node(convert_to_int)