def make_interpolate_reshapeable(interpolate, concat):
        assert interpolate.soft_get('type') == 'Interpolate'
        assert concat.soft_get('type') == 'Concat'

        output_shape = interpolate.out_port(0).data.get_shape()

        interp_axes = [get_canonical_axis_index(output_shape, axis) for axis in Interpolate.get_axes(interpolate)]
        concat_axis = get_canonical_axis_index(output_shape, concat.axis)
        if concat_axis in interp_axes:
            return

        concat_srcs = [port.get_source() for port in concat.in_ports().values() if not port.disconnected()]
        non_interp_concat_srcs = [src for src in concat_srcs if src.node.soft_get('type') != 'Interpolate']
        if len(non_interp_concat_srcs) == 0:
            return

        graph = interpolate.graph
        src = non_interp_concat_srcs[0]

        shape = Shape(graph, {'name': src.node.soft_get('name', src.node.id) + '/Shape'}).create_node()
        shape.in_port(0).connect(src)
        gather = create_op_with_const_inputs(graph, Gather,
                                             {1: np.array(interp_axes, dtype=np.int32), 2: int64_array(0)},
                                             {'name': shape.name + '/Gathered'}, shape)
        interpolate.in_port(1).get_connection().set_source(gather.out_port(0))
Ejemplo n.º 2
0
    def infer(node: Node):
        real_squeeze_dims = int64_array([])
        input_shape = node.in_port(0).data.get_shape()
        node_name = node.soft_get('name', node.id)
        if input_shape is None:
            raise Error(
                'Input shape is not defined for node {}'.format(node_name))

        output_shape = input_shape.copy()
        assert len(node.in_nodes(
        )) == 2, 'The Squeeze node {} must have 2 inputs'.format(node_name)

        # TODO remove the following 'if' statement when IE start support 0D tensors
        squeeze_dims = node.in_port(1).data.get_value()
        if squeeze_dims.ndim == 0:
            squeeze_dims = squeeze_dims.reshape([1])

        for dim in squeeze_dims:
            if output_shape[dim] == 1 or output_shape[dim] is dynamic_dimension:
                real_squeeze_dims = np.ma.append(
                    real_squeeze_dims,
                    get_canonical_axis_index(output_shape, dim))
            else:
                raise Error(
                    'Trying to squeeze dimension not equal to 1 for node "{}"'.
                    format(node_name))

        # if squeeze_dims empty then all 1s should be removed (tf specification of Squeeze op)
        if squeeze_dims.size == 0:
            for i in range(output_shape.size):
                if output_shape[i] == 1:
                    real_squeeze_dims = np.ma.append(
                        real_squeeze_dims,
                        get_canonical_axis_index(output_shape, i))

        assert is_fully_defined(
            real_squeeze_dims
        ), 'Squeeze dimension(s) is not defined for op "{}"'.format(node_name)
        output_shape = shape_delete(output_shape, real_squeeze_dims)
        node.out_port(0).data.set_shape(output_shape)

        # make dimensions positive to correctly translate from NHWC to NCHW layout
        if node.in_port(1).get_source().node.op == 'Const':
            node.in_port(1).data.set_value(real_squeeze_dims)

        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                node.in_port(0).data.get_value().reshape(output_shape))

        # the squeeze_dim attribute will be converted to the second input in the end of the Middle phase
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'axis')
Ejemplo n.º 3
0
def mxnet_slice_axis_infer(node):
    in_shape = node.in_node(0).shape
    node.axis = get_canonical_axis_index(in_shape, node.axis)
    slice_axis = node.axis

    new_shape = np.array(in_shape, dtype=np.int64)
    new_shape[slice_axis] = new_shape[slice_axis] / len(node.out_nodes())

    axis_size = in_shape[slice_axis]
    if node.offset < 0:
        node.offset += axis_size

    if not node.dim:
        node.dim = axis_size
    elif node.dim < 0:
        node.dim += axis_size

    input_dim = in_shape.size
    node.dim = (node.dim - node.offset)
    if node.dim > in_shape[slice_axis]:
        raise Error(
            '{0} node dimension value is bigger than the corresponding value in the input shape {1}. ' +
            '\nIn particular {2} is bigger than {3}. The Model Optimizer does not support this case. ' +
            '\nTo overcome, try to edit the original model "end" property of the {0} layer.',
            node.name, ','.join(str(i) for i in in_shape), str(node.dim), str(in_shape[slice_axis])
        )

    for i in range(0, input_dim):
        if i == slice_axis:
            new_shape[i] = node.dim
        else:
            new_shape[i] = in_shape[i]

    for i in range(0, len(node.out_nodes())):
        node.out_node(i)['shape'] = new_shape
Ejemplo n.º 4
0
    def replace_pattern(self, graph: Graph, match: dict):
        matmul = match['matmul']
        reshape = match['reshape']
        other_input_port_idx = 0 if match['matmul'].in_port(0).get_source().node.id == match['other_input'].id else 1
        shape_source = match['matmul'].in_port(other_input_port_idx).get_source()
        initial_reshape_pattern = reshape.in_port(1).data.get_value()
        if len(initial_reshape_pattern) != 2:
            return

        reshape_is_A_input = matmul.in_port(0).get_source().node.id == reshape.id
        if reshape_is_A_input:
            idx = -1 if matmul.transpose_b else -2
        else:
            idx = -2 if matmul.transpose_a else -1
        idx = get_canonical_axis_index(initial_reshape_pattern, idx)

        shape_name = shape_source.node.soft_get('name', shape_source.node.id)
        shape = Shape(graph, {'name': shape_name + '/Shape'}).create_node()
        shape.in_port(0).connect(shape_source)
        C = node_to_get_shape_value_of_indices(shape, [idx])
        N = Const(graph, {'name': shape_name + '/MinusOne', 'value': int64_array([-1])}).create_node()

        if len(initial_reshape_pattern) == 2:
            if reshape_is_A_input:
                reshape_pattern = [C, N] if matmul.transpose_a else [N, C]
            else:
                reshape_pattern = [N, C] if matmul.transpose_b else [C, N]
            new_reshape_pattern = new_shape_node_from_shape_nodes(reshape_pattern)
            reshape.in_port(1).get_connection().set_source(new_reshape_pattern.out_port(0))
        else:
            return
Ejemplo n.º 5
0
    def argmax_infer(node: Node):
        shape = node.in_node(0).shape
        if shape is None:
            return

        # there are two inputs in TensorFlow. The second input is the axis for ArgMax
        if len(node.in_nodes()) == 2:
            if node.in_node(1).value is None:
                log.debug('The second argument to ArgMax is None')
                return
            node.axis = node.in_node(1).value.item()
            # remove the unnecessary input
            node.graph.remove_edge(node.in_node(1).id, node.id)

        num_top_axes = shape.size
        if num_top_axes < 3:
            num_top_axes = 3

        out_shape = np.ones(num_top_axes, dtype=int)

        if node.has_valid('axis'):
            axis = get_canonical_axis_index(shape, node.axis)
            node.axis = axis
            out_shape = np.array(shape)
            out_shape[axis] = node.top_k
            PermuteAttrs.create_permute_attrs(node,
                                              attrs=[('axis', 'input:0')])
        else:
            out_shape[0] = shape[0]
            out_shape[2] = node.top_k
            if node.out_max_val:
                out_shape[1] = 2

        node.out_node().shape = out_shape
Ejemplo n.º 6
0
def tf_squeeze_infer(node):
    if node.squeeze_dims is None:
        # TODO: implement; there is no implementation now because no test
        return

    real_squeeze_dims = []
    input_shape = node.in_node().shape
    if input_shape is None:
        return
    # UGLY
    output_shape = input_shape.copy()
    for n in node.squeeze_dims:
        if output_shape[n] == 1:
            real_squeeze_dims.append(get_canonical_axis_index(output_shape, n))
        else:
            raise Error('Trying to squeeze dimension not equal to 1 for node "{}"'.format(node.soft_get('name')))

    output_shape = np.delete(output_shape, real_squeeze_dims)
    node.out_node().shape = output_shape

    if is_spatial_squeeze(node.graph.graph['layout'], input_shape, output_shape):
        output_shape = int64_array([0, -1])
    node['dim'] = output_shape
    if node.in_node().value is not None:
        node.out_node().value = np.array(np.reshape(node.in_node().value, output_shape))

    PermuteAttrs.create_permute_attrs(node, attrs=[('dim', 'output:0')])
Ejemplo n.º 7
0
    def infer(node: None):
        input_shape = node.in_node(0).shape
        name = node.soft_get('name', node.id)

        if node.axes is not None and node.across_channels is not None:
            raise Error('Either axes or across_channels can be set for the MVN in node "{}".'.format(name))

        if node.across_channels is None:
            if node.axes is not None:
                # normalizing (replacing -1 with actual index)
                axes_data_value = node.axes
                axes = [axes_data_value.item()] if axes_data_value.size == 1 else axes_data_value
                axes = [get_canonical_axis_index(input_shape, a) for a in axes]
                # deduce across_channels from the axes, e.g. if the first axis is included (assuming batch is zero axis)
                feature_dim = get_features_dim(node.graph.graph['layout'], len(input_shape)) \
                    if (4 <= len(input_shape) <= 5) \
                    else 1
                node.across_channels = int(feature_dim in axes)

                if 0 in axes:
                    raise Error('Reduction over the batch dimension in node "{}" '
                                'is not supported by the backend.'.format(name))
                for i in range(2, len(input_shape)):
                    if i not in axes:
                        raise Error(
                            'Reduction over spatial dimensions in node "{}" '
                            'is obligatory for the backend.'.format(name))
            else:
                node.across_channels = 0  # default

        copy_shape_infer(node)
Ejemplo n.º 8
0
    def infer(node: Node):
        assert len(node.in_nodes()) == 2 or len(node.in_nodes()) == 3

        # There may be three inputs in TensorFlow. The third input is axis
        if len(node.in_nodes()) == 3:
            if node.in_node(2).value is None:
                log.error("Gather is supported only with constant axis value")
                return
            node.axis = node.in_node(2).value.item()
            node.graph.remove_edge(node.in_node(2).id, node.id)

        axis = node.axis
        data = node.in_node(0)
        indices = node.in_node(1)

        # both inputs are constant
        if data.value is not None and indices.value is not None:
            indices.value = np.array(indices.value, dtype=np.int64)
            node.out_node(0).value = np.array(np.take(data.value,
                                                      indices.value, axis),
                                              dtype=data.value.dtype)
            node.out_node(0).shape = np.array(node.out_node(0).value.shape,
                                              dtype=np.int64)
            return

        # Convert negative axis
        axis = get_canonical_axis_index(data.shape, axis)
        node.axis = axis

        shape = np.concatenate((data.shape[:axis], indices.shape))
        if axis < len(data.shape) - 1:
            shape = np.concatenate((shape, data.shape[axis + 1:]))

        node.out_node(0).shape = np.array(shape, dtype=np.int64)
Ejemplo n.º 9
0
def crop_infer(node):
    """
    Crops the shape of the output blob according to input ones be specified params.
    Node should have 2 input blobs - 1st blob is getting cropped by specified axis according
    to the the 2nd (reference) blob.
    The result blob is written to output node shape, and reference blob is removed from graph.
    In order to save the reference dims, it is written to dims parameter.

    Parameters
    ----------
    node


    """
    N = len(node.in_nodes())
    if N < 2:
        log.debug('Wrong number of bottom blobs in ' + node.node)
        return

    shapes = [node.in_node(i).shape for i in range(N)]
    if any(s is None for s in shapes):
        return

    input_shape = np.array(shapes[0])
    start_axis = get_canonical_axis_index(input_shape, node.axis)
    node.axis = start_axis

    reference_shape = np.array(shapes[1])
    input_dim = input_shape.size

    # set new shape to current shape
    new_shape = input_shape.copy()
    ir_axis = []
    ir_offset = []
    dim = []

    for i in range(0, input_dim):
        if i < start_axis:
            new_shape[i] = input_shape[i]
            continue

        crop_offset = 0
        if len(node.offset) == 1:
            crop_offset = node.offset[0]
        elif len(node.offset) > 1:
            crop_offset = node.offset[i - start_axis]

        if input_shape[i] - crop_offset < reference_shape[i]:
            log.error('The crop for dimension is out of bounds in ' + node.node)
            return

        dim.append(reference_shape[i])
        ir_axis.append(i)
        ir_offset.append(crop_offset)
        new_shape[i] = reference_shape[i]

    node.axis = ir_axis
    node.offset = ir_offset
    node.dim = dim
    node.out_node().shape = new_shape
Ejemplo n.º 10
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}
        assert len(connected_in_ports) == 3 and 0 in connected_in_ports and 1 in connected_in_ports and \
               2 in connected_in_ports, "Gather should have 3 connected input port, but it doesn't for " \
                                        "node: `{}`. Ports: {}".format(name, connected_in_ports)

        data_shape = node.in_port(0).data.get_shape()
        assert data_shape is not None
        indices_shape = node.in_port(1).data.get_shape()
        assert indices_shape is not None
        axis = node.in_port(2).data.get_value()
        assert axis is not None
        axis = get_canonical_axis_index(data_shape, axis)

        # we import PermuteInputs locally because it uses Gather inside and we have recursive imports
        from mo.graph.perm_inputs import PermuteInputs
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis')

        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        if data_value is not None and indices_value is not None:
            node.out_port(0).data.set_value(np.array(np.take(data_value, int64_array(indices_value), axis),
                                                     dtype=data_value.dtype))
            return

        shape = np.concatenate((data_shape[:axis], indices_shape))
        if axis < len(data_shape) - 1:
            shape = np.concatenate((shape, data_shape[axis + 1:]))

        node.out_port(0).data.set_shape(int64_array(shape))
Ejemplo n.º 11
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()}
        assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \
            "AttributedGather should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_in_ports)

        axis = node.soft_get('axis', None)
        assert axis is not None

        data_shape = node.in_port(0).data.get_shape()
        assert data_shape is not None
        indices_shape = node.in_port(1).data.get_shape()
        assert indices_shape is not None

        # Convert negative axis
        axis = get_canonical_axis_index(data_shape, axis)
        node.axis = axis

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        if data_value is not None and indices_value is not None:
            node.out_port(0).data.set_value(np.array(np.take(data_value, indices_value, axis), dtype=data_value.dtype))
            return

        shape = np.concatenate((data_shape[:axis], indices_shape))
        if axis < len(data_shape) - 1:
            shape = np.concatenate((shape, data_shape[axis + 1:]))

        node.out_port(0).data.set_shape(int64_array(shape))
Ejemplo n.º 12
0
    def infer(node):
        input_shape = node.in_port(0).data.get_shape()
        shape_like = node.in_port(1).data.get_shape()

        new_shape = np.copy(input_shape)
        if node.axes is not None:
            node.axes = sorted(
                [get_canonical_axis_index(input_shape, i) for i in node.axes])
            for i in node.axes:
                new_shape[i] = shape_like[i]
        else:
            assert input_shape.size == shape_like.size,\
                'Input shape ranks are inconsistent: {} and {}'.format(input_shape.size, shape_like.size)
            node.axes = int64_array(range(shape_like.size))
            new_shape = np.copy(shape_like)
        node.out_port(0).data.set_shape(new_shape)

        if node.in_port(0).get_connection().data.get_value() is not None:
            out_value = np.copy(node.in_port(0).data.get_value())

            slice_indexes = []
            for s in out_value.shape:
                slice_indexes.append(slice(0, s))

            for axis in node.axes:
                slice_indexes[axis] = slice(0, new_shape[axis])
                out_value = out_value[tuple(slice_indexes)]
            node.out_port(0).data.set_value(out_value)
Ejemplo n.º 13
0
    def infer(node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            log.debug(
                'The input shape for the layer "{}" is not defined'.format(
                    node.soft_get('name')))
            return

        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = node.end_axis if node.has('end_axis') else -1
        end_axis = get_canonical_axis_index(input_shape, end_axis)
        prod_axes = np.prod(input_shape[axis:end_axis + 1])
        node.out_node(0).shape = int64_array(
            [*input_shape[0:axis], prod_axes, *input_shape[end_axis + 1:]])

        if node.in_node().has_valid('value'):
            node.out_node().value = node.in_node().value.copy().reshape(
                node.out_node(0).shape)
Ejemplo n.º 14
0
    def regionyolo_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            flat_dim = np.prod(input_shape[axis: end_axis + 1])
            node.out_node().shape = np.array([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_node().shape = shape_for_layout(layout,
                                                     batch=input_shape[get_batch_dim(layout, 4)],
                                                     features=(node.classes + node.coords + 1) * len(node.mask),
                                                     height=input_shape[get_height_dim(layout, 4)],
                                                     width=input_shape[get_width_dim(layout, 4)])
Ejemplo n.º 15
0
Archivo: crop.py Proyecto: pc2/CustoNN2
    def _two_inputs_infer(node: Node):
        N = len(node.in_nodes())

        shapes = [node.in_node(i).shape for i in range(N)]
        if any(s is None for s in shapes):
            log.error('Not all input shapes were defined for {} node'.format(node.name))
            return

        if not node.has_valid('axis'):
            log.error('axis attribute is missing for {} node. should be set in crop extractor'.format(node.name))
            return

        if not node.has_valid('offset'):
            log.error('offset attribute is missing for {} node. should be set in crop extractor'.format(node.name))
            return

        input_shape = np.array(shapes[0])
        start_axis = get_canonical_axis_index(input_shape, node.axis)
        node.axis = start_axis

        reference_shape = np.array(shapes[1])
        input_dim = input_shape.size

        # set new shape to current shape
        new_shape = input_shape.copy()
        ir_axis = []
        ir_offset = []
        dim = []

        for i in range(0, input_dim):
            if i < start_axis:
                new_shape[i] = input_shape[i]
                continue

            crop_offset = 0
            if len(node.offset) == 1:
                crop_offset = node.offset[0]
            elif len(node.offset) > 1:
                crop_offset = node.offset[i - start_axis]

            if input_shape[i] - crop_offset < reference_shape[i]:
                log.error('The crop for dimension is out of bounds in ' + node.node)
                return

            dim.append(reference_shape[i])
            ir_axis.append(i)
            ir_offset.append(crop_offset)
            new_shape[i] = reference_shape[i]

        node.axis = ir_axis
        node.offset = ir_offset
        node['dim'] = dim
        node.out_node().shape = new_shape
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Ejemplo n.º 16
0
    def regionyolo_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        axis = get_canonical_axis_index(input_shape, node.axis)
        end_axis = get_canonical_axis_index(input_shape, node.end_axis)
        node.axis = axis
        node.end_axis = end_axis
        if node.do_softmax:
            dims_to_flatten = input_shape[axis: end_axis + 1]
            if is_fully_defined(dims_to_flatten):
                flat_dim = np.ma.prod(dims_to_flatten)
            else:
                flat_dim = dynamic_dimension_value
            node.out_port(0).data.set_shape([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]])
        else:
            layout = node.graph.graph['layout']
            assert len(layout) == 4

            node.out_port(0).data.set_shape(shape_for_layout(layout,
                                                             batch=input_shape[get_batch_dim(layout, 4)],
                                                             features=(node.classes + node.coords + 1) * len(node.mask),
                                                             height=input_shape[get_height_dim(layout, 4)],
                                                             width=input_shape[get_width_dim(layout, 4)]))
Ejemplo n.º 17
0
def concat_infer(node):
    if not node.has('axis'):
        N = node.N
        axis_input = node.in_node(N)
        if axis_input.has_valid('value') and axis_input.value.size == 1:
            node['axis'] = axis_input.value.item()
            node.graph.remove_edge(
                axis_input.node,
                node.node)  # TODO add skip attribute instead of deleting
        else:
            return
    else:
        N = len(node.in_nodes())

    shapes = [node.in_node(i).shape for i in range(N)]
    if any(s is None for s in shapes):
        return

    shape = np.array(shapes[0])

    axis = get_canonical_axis_index(shape, node.axis)
    node.axis = axis

    mask = np.zeros_like(shape, dtype=np.bool)
    mask[axis] = True  # pylint: disable=unsupported-assignment-operation
    not_mask = np.logical_not(mask)  # pylint: disable=assignment-from-no-return
    for s in shapes[1:]:
        s = int64_array(s)
        if np.all(shape[not_mask] ==
                  s[not_mask]):  # TODO handle -1 in a special way
            shape[mask] += s[mask]
        else:
            log.error('Concat input shapes do not match')
            return

    node.out_node(0).shape = shape
    if len(shape) != 4:
        # exclude it from NHWC to NCHW conversion
        if 'axis' in node.dim_attrs:
            node.dim_attrs.remove('axis')

    PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

    values = [node.in_node(i).value for i in range(N)]
    if any(v is None for v in values):
        return

    node.out_node(0).value = np.concatenate(values, axis=node.axis).astype(
        values[0].dtype, copy=False)
    node.out_node(0).shape = np.array(node.out_node(0).value.shape,
                                      dtype=np.int64)
Ejemplo n.º 18
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='Slice'):
            node_name = node.soft_get('name', node.id)

            input_shape = node.in_port(0).data.get_shape()
            if node.is_in_port_connected(3):
                axes = node.in_port(3).data.get_value().copy()
                assert axes is not None, 'The input with axes is not constant for node {}'.format(node_name)
                for i, val in enumerate(axes):
                    axes[i] = get_canonical_axis_index(input_shape, val)
            else:
                axes = int64_array(range(len(input_shape)))

            ss_begin = create_ss_interval_border(graph, node.in_port(1).get_source(), input_shape, axes, node_name)
            ss_end = create_ss_interval_border(graph, node.in_port(2).get_source(), input_shape, axes, node_name)
            node.in_port(1).disconnect()
            node.in_port(2).disconnect()
            rename_nodes([(ss_begin, node_name + '/Begin'), (ss_end, node_name + '/End')])

            if node.is_in_port_connected(4):
                steps = node.in_port(4).data.get_value()
                assert steps is not None, 'The input with steps is not constant for node {}'.format(node_name)
            else:
                steps = np.ones([axes.size])

            ss_begin_mask = np.zeros(len(input_shape), dtype=np.int64)
            ss_end_mask = np.zeros(len(input_shape), dtype=np.int64)
            ss_step = np.ones(len(input_shape), dtype=np.int64)

            for i, axis in enumerate(axes):
                ss_begin_mask[axis] = 1
                ss_end_mask[axis] = 1
                ss_step[axis] = steps[i]

            ss_strides = Const(graph, dict(name=node_name + '/Strides', value=ss_step)).create_node()

            ss = StridedSlice(graph, dict(name='ss', new_axis_mask=np.zeros(len(input_shape), dtype=np.int64),
                                          shrink_axis_mask=np.zeros(len(input_shape), dtype=np.int64),
                                          ellipsis_mask=np.zeros(len(input_shape), dtype=np.int64),
                                          begin_mask=ss_begin_mask,
                                          end_mask=ss_end_mask)).create_node()

            node.in_port(0).get_connection().set_destination(ss.in_port(0))
            ss.in_port(1).connect(ss_begin.out_port(0))
            ss.in_port(2).connect(ss_end.out_port(0))
            ss.in_port(3).connect(ss_strides.out_port(0))
            node.out_port(0).get_connection().set_source(ss.out_port(0))

            rename_nodes([(node, node_name + '/ShouldBeDeleted'), (ss, node_name)])
Ejemplo n.º 19
0
    def infer(node):
        input_shape = node.in_port(0).data.get_shape()
        shape_like = node.in_port(1).data.get_shape()

        new_shape = np.copy(input_shape)
        if node.axes is not None:
            node.axes = sorted(
                [get_canonical_axis_index(input_shape, i) for i in node.axes])
            for i in node.axes:
                new_shape[i] = shape_like[i]
        else:
            assert input_shape.size == shape_like.size,\
                'Input shape ranks are inconsistent: {} and {}'.format(input_shape.size, shape_like.size)
            node.axes = int64_array(range(shape_like.size))
            new_shape = np.copy(shape_like)

        node.out_port(0).data.set_shape(new_shape)
Ejemplo n.º 20
0
    def infer(node: Node):
        real_squeeze_dims = int64_array([])
        input_shape = node.in_node().shape
        if input_shape is None:
            return

        output_shape = input_shape.copy()
        assert len(node.in_nodes()
                   ) == 2, 'The Squeeze node {} must have 2 inputs'.format(
                       node.soft_get('name'))

        # TODO remove the following 'if' statement when IE start support 0D tensors
        squeeze_dims = node.in_port(1).data.get_value()
        if squeeze_dims.ndim == 0:
            squeeze_dims = squeeze_dims.reshape([1])

        for dim in squeeze_dims:
            if output_shape[dim] == 1:
                real_squeeze_dims = np.append(
                    real_squeeze_dims,
                    get_canonical_axis_index(output_shape, dim))
            else:
                raise Error(
                    'Trying to squeeze dimension not equal to 1 for node "{}"'.
                    format(node.soft_get('name')))

        output_shape = np.delete(output_shape, real_squeeze_dims)
        node.out_node().shape = output_shape

        # make dimensions positive to correctly translate from NHWC to NCHW layout
        if node.in_port(1).get_source().node.op == 'Const':
            node.in_port(1).data.set_value(real_squeeze_dims)

        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                node.in_port(0).data.get_value().reshape(output_shape))

        # the squeeze_dim attribute will be converted to the second input in the end of the Middle phase
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0',
                                              'axis')
Ejemplo n.º 21
0
def arg_ops_infer(node: Node):
    shape = node.in_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)
    assert shape is not None, "Input shape for the node {} is None".format(
        node_name)

    # there are two inputs in TensorFlow. The second input is the axis for ArgMax
    connected_in_ports = [
        port for port in node.in_ports().values() if not port.disconnected()
    ]
    if len(connected_in_ports) == 2:
        axis = node.in_port(1).data.get_value()
        if axis is None:
            log.debug('The second argument to {} is None'.format(
                node.soft_get('name', node.id)))
            return
        node.axis = axis
        # remove the unnecessary input
        node.in_port(1).disconnect()

    num_top_axes = shape.size
    if num_top_axes < 3:
        num_top_axes = 3

    out_shape = np.ones(num_top_axes, dtype=np.int64)

    if node.has_valid('axis'):
        axis = get_canonical_axis_index(shape, node.axis)
        node.axis = axis
        out_shape = shape.copy()
        out_shape[axis] = node.top_k
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    else:
        out_shape[0] = shape[0]
        out_shape[2] = node.top_k
        if node.has_and_set('out_max_val'):
            out_shape[1] = 2

    node.out_port(0).data.set_shape(out_shape)
Ejemplo n.º 22
0
def concat_infer(node):
    node_name = node.soft_get('name', node.id)
    if not node.has('axis'):
        N = node.N
        axis_input = node.in_node(N)
        if axis_input.has_valid('value') and axis_input.value.size == 1:
            node['axis'] = axis_input.value.item()
            node.graph.remove_edge(
                axis_input.node,
                node.node)  # TODO add skip attribute instead of deleting
        else:
            raise Error(
                'Input with value is not specified for node "{}"'.format(
                    node_name))
    else:
        N = len(node.in_nodes())

    shapes = [node.in_node(i).shape for i in range(N)]
    if any(s is None for s in shapes):
        raise Error(
            'One of the input shapes is not defined for node "{}"'.format(
                node_name))

    shape = shape_array(shapes[0])

    axis = get_canonical_axis_index(shape, node.axis)
    node.axis = axis

    mask = np.zeros_like(shape, dtype=np.bool)
    mask[axis] = True  # pylint: disable=unsupported-assignment-operation
    not_mask = np.logical_not(mask)  # pylint: disable=assignment-from-no-return
    for s in shapes[1:]:
        s = shape_array(s)
        if np.ma.allequal(shape[not_mask], s[not_mask]):
            shape[mask] += s[mask]
        else:
            raise Error(
                'Concat input shapes do not match for node "{}" with axis {}'.
                format(node_name, axis))

    #  dynamic dimensions in the output (except the concat axis) can be deduced from input shape
    for pos in range(len(shape)):
        if shape[pos] is dynamic_dimension and pos != axis:
            for in_shape in shapes:
                if in_shape[pos] is not dynamic_dimension:
                    shape[pos] = in_shape[pos]

    node.out_port(0).data.set_shape(shape)
    PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

    values = [node.in_node(i).value for i in range(N)]
    if any([v is None for v in values]):
        return

    # if one of the input values are dynamic, the output tensor type is inferred from one of the fully defined inputs
    output_dtype = np.int64
    for input in values:
        if is_fully_defined(input):
            output_dtype = input.dtype

    if any(not is_fully_defined(v) for v in values):
        node.out_port(0).data.set_value(
            np.ma.concatenate(values, axis=node.axis).astype(output_dtype))
    else:  # there is a serious performance benefit to use concatenation as it is implemented below
        node.out_node(0).value = np.concatenate(values, axis=node.axis).astype(
            values[0].dtype, copy=False)
        node.out_node(0).shape = shape_array(node.out_node(0).value.shape)
Ejemplo n.º 23
0
 def infer(node: Node):
     shape = np.copy(node.in_node().shape)
     node.axis = get_canonical_axis_index(shape, node.axis)
     rep = node.repeats
     shape[node.axis] = shape[node.axis] * rep
     node.out_node().shape = shape
Ejemplo n.º 24
0
    def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
        node = match['reduce']
        if not node.has_valid('reduce_type') or node.reduce_type.lower() not in self.supported_reduce_types:
            log.error("Reduce type {} is not supported for node {}".format(node.soft_get('reduce_type'), node.id))
            return

        reduce_type = node.reduce_type.lower()
        if reduce_type not in self.pool_method_map:
            log.error("Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                      "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_node().shape
        output_shape = node.out_node().shape

        # normalize node.axis to exclude negative indices
        node.axis = [get_canonical_axis_index(input_shape, a) for a in node.axis]

        axis = node.axis

        # Check that values in axis list are consecutive
        for idx in range(1, len(axis)):
            if axis[idx] != (axis[idx - 1] + 1):
                log.error("Reduce with not consecutive axes {} is not supported ".format(axis))
                return

        layout = graph.graph['layout']

        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axis])
        begin_dims = np.array([input_shape[idx] for idx in range(axis[0])])
        end_dim = np.prod([input_shape[idx] for idx in range(axis[-1] + 1, len(input_shape))])

        # 2. Create reshape with appropriate shape
        if layout == 'NCHW':
            if len(begin_dims) > 2:
                begin_dims = np.array([np.prod(begin_dims[0:-1]), begin_dims[-1]], dtype=np.int64)
            else:
                # Expand begin_dims to 2
                begin_dims = np.array(np.append(begin_dims, [1] * (2 - len(begin_dims))), dtype=np.int64)
            reshape_shape = np.array([*begin_dims, reduction_dim, end_dim], dtype=np.int64)
            pool_window = np.array([1, 1, reduction_dim, 1], dtype=np.int64)
        elif layout == 'NHWC':
            begin_dims = np.prod(begin_dims)
            reshape_shape = np.array([begin_dims, reduction_dim, 1, end_dim], dtype=np.int64)
            pool_window = np.array([1, reduction_dim, 1, 1], dtype=np.int64)
        else:
            log.error('{} layout currently is not supported'.format(layout))
            return

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape', 'dim': reshape_shape})
        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape', 'dim': output_shape})
        pooling_op = Pooling(graph,
                             dict(name=node.id + '/Pool',
                                  window=pool_window,
                                  output_spatial_shape=None,
                                  batch_dims=np.array([get_batch_dim(layout, 4)], dtype=np.int64),
                                  channel_dims=np.array([get_features_dim(layout, 4)], dtype=np.int64),
                                  exclude_pad='false', pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        final_reshape_op.create_node_with_data(
            inputs=[pooling_op.create_node_with_data(
                inputs=[reshape_op.create_node_with_data(
                    inputs=[input_data]
                )]
            )],
            data_nodes=output_data)

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'sum':
            output_data.in_node().insert_node_with_data_after(
                output_data,
                Power,
                {'name': node.name + '/Mul', 'scale': float(reduction_dim)}
            )
Ejemplo n.º 25
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['reduce']

        if node.out_port(0).data.get_value() is not None:
            # We leave Reduce* operations located in constant sub-graph as is
            # to keep model reshapable with --keep_shape_ops cli key
            return

        reduce_type = node.type
        if reduce_type not in self.pool_method_map:
            log.error(
                "Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_port(0).data.get_shape()
        output_shape = node.out_port(0).data.get_shape()

        # normalize node axes to exclude negative indices
        axes_data_value = node.in_port(1).data.get_value()
        axes = int64_array([
            axes_data_value.item()
        ]) if axes_data_value.size == 1 else axes_data_value
        axes = [get_canonical_axis_index(input_shape, a) for a in axes]
        axes = sorted(axes)

        # Check that values in axes list are consecutive
        for idx in range(1, len(axes)):
            if axes[idx] != (axes[idx - 1] + 1):
                log.error(
                    "Reduce with not consecutive axes {} is not supported ".
                    format(axes))
                return
        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axes])
        begin_dims = np.array([input_shape[idx] for idx in range(axes[0])])
        end_dim = np.prod([
            input_shape[idx] for idx in range(axes[-1] + 1, len(input_shape))
        ])

        # 2. Create reshape with appropriate shape
        if len(begin_dims) > 2:
            if 0 not in axes:
                begin_dims = int64_array(
                    [begin_dims[0], np.prod(begin_dims[1:])])
            else:
                begin_dims = int64_array(
                    [np.prod(begin_dims[0:-1]), begin_dims[-1]])
        else:
            # Expand begin_dims to 2
            begin_dims = int64_array(
                np.append(begin_dims, [1] * (2 - len(begin_dims))))

        reshape_shape = int64_array([*begin_dims, reduction_dim, end_dim])
        pool_window = int64_array([1, 1, reduction_dim, 1])

        if end_dim == 1:
            new_window = ReduceReplacer.initial_reshape_dim_normalizer(
                reduction_dim)
            reshape_shape = int64_array([*begin_dims, *new_window])
            pool_window = int64_array([1, 1, *new_window])

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape'})
        reshape_dim_const_data = Const(graph, {
            'name': node.id + '/Reshape/Dim',
            'value': reshape_shape
        }).create_node_with_data()

        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape'})
        final_reshape_dim_const_data = Const(graph, {
            'name': node.id + '/FinalReshape/Dim',
            'value': output_shape
        }).create_node_with_data()
        pooling_op = Pooling(
            graph,
            dict(name=node.id + '/Pool',
                 window=pool_window,
                 output_spatial_shape=None,
                 batch_dims=int64_array([0]),
                 channel_dims=int64_array([1]),
                 exclude_pad='false',
                 pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        if np.array_equal(input_shape, reshape_shape):
            input_to_pooling = input_data
        else:
            input_to_pooling = reshape_op.create_node_with_data(
                inputs=[input_data, reshape_dim_const_data])
        pooling = pooling_op.create_node_with_data(inputs=[input_to_pooling])
        final_reshape_op.create_node_with_data(
            inputs=[pooling, final_reshape_dim_const_data],
            data_nodes=output_data)

        # convert batch dimension to 0 to produce reshape-able IR over the batch dimension
        if 0 not in axes:
            reshape_dim_const_data.in_node(0).value[0] = 0
            final_reshape_dim_const_data.in_node(0).value[0] = 0

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'ReduceSum':
            output_data.in_node().insert_node_with_data_after(
                output_data, AttributedPower, {
                    'name': node.name + '/Mul',
                    'scale': float(reduction_dim)
                })
Ejemplo n.º 26
0
 def test_posirive_index(self):
     shape = [1, 2, 3, 4]
     inds = [0, 1, 2, 3]
     expected_inds = [0, 1, 2, 3]
     for i in range(len(inds)):
         assert get_canonical_axis_index(shape, inds[i]) == expected_inds[i]
Ejemplo n.º 27
0
    def _two_inputs_infer(node: Node):
        N = len(node.in_nodes())

        shapes = [node.in_node(i).shape for i in range(N)]
        if any(s is None for s in shapes):
            log.error('Not all input shapes were defined for {} node'.format(
                node.name))
            return

        if not node.has_valid('axis'):
            log.error(
                'axis attribute is missing for {} node. should be set in crop extractor'
                .format(node.name))
            return

        if not node.has_valid('offset'):
            log.error(
                'offset attribute is missing for {} node. should be set in crop extractor'
                .format(node.name))
            return

        input_shape = np.array(shapes[0])
        start_axis = get_canonical_axis_index(input_shape, node.axis)
        node.axis = start_axis

        reference_shape = np.array(shapes[1])
        if node.has_valid('axes'):
            '''
            The axes parameter  contain shape indexes for second input and 
            show which shape indexes we need to use for dim attribute.
            '''
            input_dim = node.axes
            node.in_port(1).disconnect()
        else:
            input_dim = list(range(0, input_shape.size))

        # set new shape to current shape
        new_shape = input_shape.copy()
        ir_axis = []
        ir_offset = []
        dim = []

        for i in input_dim:
            if i < start_axis:
                new_shape[i] = input_shape[i]
                continue

            crop_offset = 0
            if len(node.offset) == 1:
                crop_offset = node.offset[0]
            elif len(node.offset) > 1:
                crop_offset = node.offset[i - start_axis]

            if input_shape[i] - crop_offset < reference_shape[i]:
                log.error('The crop for dimension is out of bounds in ' +
                          node.node)
                return

            dim.append(reference_shape[i])
            ir_axis.append(i)
            ir_offset.append(crop_offset)
            new_shape[i] = reference_shape[i]

        node.axis = ir_axis
        node.offset = ir_offset
        node['dim'] = dim
        node.out_node().shape = new_shape

        if node.in_node(0).has_valid('value') and not node.graph.graph[
                'cmd_params'].enable_ssd_gluoncv:
            out_value = np.copy(node.in_node(0).value)

            slice_indexes = []
            for s in out_value.shape:
                slice_indexes.append(slice(0, s))

            for axis in input_dim:
                slice_indexes[axis] = slice(0, new_shape[axis])
                out_value = out_value[tuple(slice_indexes)]
            node.out_node().value = out_value

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])