Esempio n. 1
0
    def infer(node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) == 1 and 0 in connected_in_ports, \
            "AttributedTile should have 1 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_in_ports)

        shape = node.in_port(0).data.get_shape()
        assert shape is not None, "Undefined input shape for AttributedTile node '{}'.".format(
            name)
        axis = node.soft_get('axis', None)
        assert axis is not None
        tiles = node.soft_get('tiles', None)
        assert tiles is not None, "Undefined `tiles` attribute of Tile node '{}'".format(
            name)

        tile_array = int64_array(np.ones(shape.size))
        tile_array[node.axis] = node.tiles

        node.out_port(0).data.set_shape(shape * tile_array)
        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                np.tile(node.in_port(0).data.get_value(), tile_array))

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 2
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(type='StridedSlice'):
            StridedSliceNormalizer.normalize_strided_slice(graph, node)
            PermuteAttrs.create_permute_attrs(
                node,
                attrs=[
                    ('begin_mask',
                     'input:0'),  # but indeed depends from slice_rank
                    ('end_mask', 'input:0'),
                    ('new_axis_mask', 'input:0'),
                    ('shrink_axis_mask', 'input:0'),
                    ('ellipsis_mask', 'input:0')
                ])

            # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes
            # Until now it was not possible to set correct permutations
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:1', 'slice',
                                                  'dim_size')
            PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                  'input:2', 'slice',
                                                  'dim_size')
            if node.is_in_port_connected(3):
                PermuteInputs().set_input_permutation(node.in_node(3), node,
                                                      'input:3', 'slice',
                                                      'dim_size')
Esempio n. 3
0
    def _one_input_infer(node: Node):
        input_shape = node.in_port(0).data.get_shape()
        node_name = node.soft_get('name', node.id)
        if input_shape is None:
            raise Error('input_shape is none for {} node'.format(node_name))

        if not node.has_valid('axis'):
            raise Error('axis attribute is missing for {} node. should be set in crop extractor'.format(node_name))

        output_shape = input_shape.copy()
        if node.has_valid('dim'):
            if len(node.dim) != len(node.axis):
                raise Error('Number of axis "{}" should match number of dim "{}" for node "{}"'
                            ''.format(node.axis, node.dim, node_name))
            output_shape[node.axis] = node.dim
        elif node.has_valid('crop_begin') and node.has_valid('crop_end'):
            if len(node.crop_begin) != len(node.axis) or len(node.crop_end) != len(node.axis):
                raise Error('number of crop_begin({})/crop_end({}) should match number of axis "{}" for node "{}"'
                            ''.format(node.crop_begin, node.crop_end, node.axis, node_name))
            if type(node.axis) in [list, tuple]:
                for i in range(len(node.axis)):
                    output_shape[node.axis[i]] = output_shape[node.axis[i]] - node.crop_begin[i] - node.crop_end[i]
            else:
                output_shape[node.axis] = output_shape[node.axis] - node.crop_begin - node.crop_end
        else:
            raise Error('Crop node {} should have either dim or crop_begin and crop_end attributes'.format(node_name))

        node.out_port(0).data.set_shape(output_shape)
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 4
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(type='StridedSlice'):
            StridedSliceNormalizer.normalize_strided_slice(graph, node)
            PermuteAttrs.create_permute_attrs(
                node,
                attrs=[
                    ('begin_mask',
                     'input:0'),  # but indeed depends from slice_rank
                    ('end_mask', 'input:0'),
                    ('new_axis_mask', 'input:0'),
                    ('shrink_axis_mask', 'input:0'),
                    ('ellipsis_mask', 'input:0')
                ])

            # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes
            # Until now it was not possible to set correct permutations
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:1', 'slice',
                                                  'dim_size')
            PermuteInputs().set_input_permutation(node.in_node(2), node,
                                                  'input:2', 'slice',
                                                  'dim_size')
            if node.is_in_port_connected(3):
                PermuteInputs().set_input_permutation(node.in_node(3), node,
                                                      'input:3', 'slice',
                                                      'dim_size')

            # If there are new_axis_mask or shrink_axis_mask then StridedSlice should be performed in the
            # original layout, same as for Squeeze, Unsqueeze, Reshape, Gather
            if np.count_nonzero(node['new_axis_mask']) > 0 or np.count_nonzero(
                    node['shrink_axis_mask']) > 0:
                node['reinterp_shape'] = True
                node['nchw_layout'] = True
Esempio n. 5
0
def arg_ops_infer(node: Node):
    shape = node.in_port(0).data.get_shape()
    node_name = node.soft_get('name', node.id)
    assert shape is not None, "Input shape for the node {} is None".format(node_name)

    # there are two inputs in TensorFlow. The second input is the axis for ArgMax
    connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()]
    if len(connected_in_ports) == 2:
        axis = node.in_port(1).data.get_value()
        if axis is None:
            log.debug('The second argument to {} is None'.format(node.soft_get('name', node.id)))
            return
        node.axis = axis
        # remove the unnecessary input
        node.in_port(1).disconnect()

    num_top_axes = shape.size
    if num_top_axes < 3:
        num_top_axes = 3

    out_shape = np.ones(num_top_axes, dtype=np.int64)

    if node.has_valid('axis'):
        axis = get_canonical_axis_index(shape, node.axis)
        node.axis = axis
        out_shape = shape.copy()
        out_shape[axis] = node.top_k
        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
    else:
        out_shape[0] = shape[0]
        out_shape[2] = node.top_k
        if node.has_and_set('out_max_val'):
            out_shape[1] = 2

    node.out_port(0).data.set_shape(out_shape)
Esempio n. 6
0
    def infer(node):
        in_ports = node.in_ports()
        connected_ports = [
            port for port in in_ports.values() if not port.disconnected()
        ]
        assert len(connected_ports) == 2, 'The number of inputs to the TopK layer name "{}" must be equal to 2.' \
                                          ''.format(node.soft_get('name'))

        k = node.in_port(1).data.get_value()
        if k is None:
            raise Error(
                'The value defining number of output elements for layer "{}" is not defined'
                ''.format(node.soft_get('name')))
        assert node.has_valid(
            'axis'), 'The "axis" attribute is not defined for node {}'.format(
                node.name)

        input_shape = node.in_port(0).data.get_shape()
        node.axis = len(
            input_shape) + node.axis if node.axis < 0 else node.axis
        output_shape = input_shape.copy()
        output_shape[node.axis] = k

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

        # setting shape and value if applicable
        if not node.out_port(0).disconnected():
            node.out_port(0).data.set_shape(output_shape)
        if not node.out_port(1).disconnected():
            node.out_port(1).data.set_shape(output_shape)
        if node.in_port(0).data.get_value() is not None:
            # TODO implement value propagation
            pass
Esempio n. 7
0
    def infer(node):
        name = node.soft_get('name', node.id)

        op = node.soft_get('op', None)
        assert op is not None and op in ['Split', 'AttributedSplit'], \
            'Unexpected `op`={} attribute for Split-like node {}'.format(op, name)

        num_in_ports = 1 if op == 'AttributedSplit' else 2 if op == 'Split' else None
        assert num_in_ports in [1, 2], \
            'SplitBase supports AttributedSplit with 1 input and Split with 2 inputs, but it is {} for {} node {}' \
            ''.format(num_in_ports, op, name)

        connected_inputs = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_inputs) == num_in_ports and all([i in connected_inputs for i in range(num_in_ports)]), \
            "{} should have {} connected input ports, but it doesn't for node: `{}`. Ports: {}" \
            "".format(op, num_in_ports, name, connected_inputs)

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None, 'Input shape is unknown for node {}'.format(
            name)
        assert node.has_valid(
            'num_splits'
        ), 'Parameter `num_splits` is unknown for node {}'.format(name)
        num_splits = node.num_splits

        axis = node.in_port(1).data.get_value(
        ) if op == 'Split' else node.soft_get('axis', None)
        assert axis is not None, '{} `axis` is unknown for node {}'.format(
            op, name)
        assert axis.ndim == 0, '{} `axis` should be scalar, but it`s not for node {}'.format(
            op, name)

        assert not is_fully_defined(input_shape[axis]) or input_shape[axis] % num_splits == 0, \
            'Input shape is not evenly divided by `num_splits` of {} node {}. `input_shape`={}, `axis`={}, ' \
            '`num_splits`={}'.format(op, name, input_shape, axis, num_splits)

        out_shape = input_shape.copy()
        out_shape[axis] = input_shape[axis] // num_splits

        input_value = node.in_port(0).data.get_value()
        output_value = np.split(input_value.copy(), axis=axis, indices_or_sections=num_splits) \
            if input_value is not None else None

        for idx, port in node.out_ports().items():
            if idx in node.out_nodes():
                port.data.set_shape(out_shape)
                if output_value is not None:
                    port.data.set_value(output_value[idx])

        if op == 'Split':
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'axis')
        elif op == 'AttributedSplit':
            PermuteAttrs.create_permute_attrs(node,
                                              attrs=[('axis', 'input:0')])
Esempio n. 8
0
    def infer(node):
        name = node.soft_get('name', node.id)
        assert node.has_valid('shape'), \
            'Parameter node {} should have `shape` attribute. Please use cli options to set model input shape' \
            ''.format(name)
        node.out_port(0).data.set_shape(node.shape)

        PermuteAttrs.create_permute_attrs(node, attrs=[('shape', 'output:0')])
Esempio n. 9
0
 def infer(node: Node):
     assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 1,\
         'LogSoftmax node with id {} have more than one port connected'.format(node.id)
     if node.axis < 0:
         node.axis = len(node.in_port(0).data.get_shape()) + node.axis
     assert 0 <= node.axis < len(node.in_port(0).data.get_shape()),\
         'LogSoftmax node with id {} has wrong axis attribute'.format(node.id)
     copy_shape_infer(node)
     PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 10
0
    def infer(node: Node):
        node['order'] = list(range(node.in_node().shape.size))
        node.order[node.dim2], node.order[node.dim1] = node.order[
            node.dim1], node.order[node.dim2]

        input_shape = node.in_port(0).data.get_shape().copy()
        node.out_port(0).data.set_shape(input_shape[node.order])
        if node.in_port(0).data.get_value() is not None:
            node.out_port(0).data.set_value(
                np.transpose(node.in_port(0).data.get_value(),
                             axes=node.order))

        PermuteAttrs.create_permute_attrs(node, attrs=[('order', 'input:0')])
Esempio n. 11
0
    def infer(node):
        input_data_shape = node.in_port(0).data.get_shape()
        assert input_data_shape is not None
        assert node.has_valid('seq_axis')
        assert node.has_valid('batch_axis')

        assert len(node.out_nodes()) == 1
        node.out_port(0).data.set_shape(input_data_shape)

        PermuteAttrs.create_permute_attrs(node,
                                          attrs=[('seq_axis', 'input:0')])
        PermuteAttrs.create_permute_attrs(node,
                                          attrs=[('batch_axis', 'input:0')])
Esempio n. 12
0
    def reorgyolo_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            raise Error('Input shape for operation "{}" is None'.format(node.soft_get('name', node.id)))

        stride = node.stride

        output_shape = input_shape.copy()
        output_shape[node.batch_dims] = input_shape[node.batch_dims]  # pylint: disable=unsupported-assignment-operation
        output_shape[node.channel_dims] = input_shape[node.channel_dims] * stride ** 2  # pylint: disable=unsupported-assignment-operation
        # Round as in caffe
        output_shape[node.spatial_dims] = np.ma.round(input_shape[node.spatial_dims] / stride)  # pylint: disable=unsupported-assignment-operation

        node.out_port(0).data.set_shape(output_shape)
        PermuteAttrs.create_permute_attrs(node, attrs=[('channel_dims', 'input:0'), ('spatial_dims', 'input:0')])
Esempio n. 13
0
def infer_for_opset1(node: Node):
    assert len([p for p in node.in_ports().values() if not p.disconnected()]) == 2
    assert node.has_valid('mode')
    assert node.has_valid('axes')

    src_shape = node.in_port(0).data.get_shape()

    assert src_shape is not None
    dst_shape = node.in_port(1).data.get_value()
    assert dst_shape is not None

    output_shape = src_shape.copy()
    for ind, axis in enumerate(node.axes):
        output_shape[axis] = dst_shape[ind]

    node.out_port(0).data.set_shape(output_shape)

    PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')])
Esempio n. 14
0
    def infer(node: Node):
        data_shape = node.in_port(0).data.get_shape()
        indices_shape = node.in_port(1).data.get_shape()
        axis = node.axis
        data_rank = len(data_shape)

        assert data_rank >= 1, 'data_rank must be >= 1'
        assert data_rank == len(indices_shape), 'data and indices inputs for node {} must be of the ' \
                                                'same rank. Instead got {} and {}'. \
            format(node.name, data_rank, len(indices_shape))
        assert -data_rank <= axis < data_rank, 'axis for node {0} must be within interval ' \
                                               '[-{1},  {1} - 1]. Instead got: axis={2}'. \
            format(node.name, data_rank, axis)
        if axis < 0:
            axis += data_rank
        out_shape = indices_shape.copy()
        for idx, (data_sz, ind_sz) in enumerate(zip(data_shape,
                                                    indices_shape)):
            out_shape[
                idx] = ind_sz if ind_sz is not dynamic_dimension or idx == axis else data_sz
            if idx != axis and data_sz != ind_sz:
                raise Error(
                    'Sizes along axis {} for node {} do not match. data and indices must have '
                    'equal size along all axes except for axis {}'.format(
                        idx, node.name, axis))

        data = node.in_port(0).data.get_value()
        indices = node.in_port(1).data.get_value()

        if data is not None and indices is not None:
            out_value = np.empty(indices_shape, dtype=data.dtype)
            for idx in np.ndindex(*indices_shape):
                data_idx = list(idx)
                data_idx[node.axis] = indices[idx]
                out_value[idx] = data[tuple(data_idx)]
            node.out_port(0).data.set_value(out_value)
        else:
            node.out_port(0).data.set_shape(out_shape)

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 15
0
    def infer(node: Node):
        name = node.soft_get('name', node.id)

        connected_in_ports = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \
            "AttributedGather should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \
            "".format(name, connected_in_ports)

        axis = node.soft_get('axis', None)
        assert axis is not None

        data_shape = node.in_port(0).data.get_shape()
        assert data_shape is not None
        indices_shape = node.in_port(1).data.get_shape()
        assert indices_shape is not None

        # Convert negative axis
        axis = get_canonical_axis_index(data_shape, axis)
        node.axis = axis

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

        data_value = node.in_port(0).data.get_value()
        indices_value = node.in_port(1).data.get_value()
        if data_value is not None and indices_value is not None:
            node.out_port(0).data.set_value(
                mo_array(np.take(data_value, indices_value, axis),
                         dtype=data_value.dtype))
            return

        shape = np.concatenate((data_shape[:axis], indices_shape))
        if axis < len(data_shape) - 1:
            shape = np.concatenate((shape, data_shape[axis + 1:]))

        node.out_port(0).data.set_shape(int64_array(shape))
Esempio n. 16
0
    def infer(node: Node):
        """
        Infers shape of convolution node as it is done in ONNX.
        It is very similar to one that Caffe does, but slightly different.
        We made a complete fork of this function because they are supposed to be
        supported differently by different people.
        Args:
            node: graph convolution node
        """
        input_shape = node.in_port(0).data.get_shape()
        if input_shape is None:
            raise Error('Input data shape is None for node {}'.format(
                node.soft_get('name', node.id)))

        # bias_term cannot be deduced earlier for frameworks that represent
        # convolution weights/biases as regular inputs; so the number of inputs
        # is being checked here and restore correct value for bias_term to
        # have the rest of the code unchanged. It will be used after we merge
        # several infer functions for convolution in different FWs to a single one.
        if not node.has_valid('bias_term'):
            node['bias_term'] = len(node.in_nodes()) == 3

        weights_index = node.weights_index if node.has_valid(
            'weights_index') else 1
        # Reshape weights kernel to original shape
        # In case of caffe or MXNet framework, values for weights have no structured shape like OIHW
        # so we have to reshape weights to normal shape
        # For this case, Convolution node should have attribute reshape_kernel = True
        if node.has_valid('reshape_kernel') and node.reshape_kernel:
            if not (node.has_valid('output') and node.has_valid('channel_dims')
                    and node.has_valid('group')
                    and node.has_valid('kernel_spatial')):
                log.error(
                    'Cannot reshape kernel due to not all required attrs was set to {} node'
                    .format(node.id))
                return
            # layout for Convolution weights is OIHW
            kernel_shape = shape_array([
                node.output,
                input_shape[node.channel_dims].item() / node.group, *[
                    node.kernel_spatial[i]
                    for i in range(len(node.kernel_spatial))
                ]
            ])
            if node.type == 'Deconvolution':  # layout for Deconvolution weights is IOHW
                kernel_shape[[0, 1]] = kernel_shape[[1, 0]]

            if is_fully_defined(
                    kernel_shape) and np.prod(kernel_shape) != np.prod(
                        node.in_node(weights_index).value.shape):
                log.error(
                    "Size of weights {} does not match kernel shape: {}\n"
                    "".format(np.prod(node.in_node(weights_index).value.shape),
                              kernel_shape) +
                    "    Possible reason is wrong channel number in input shape\n"
                )
                raise Error("Cannot reshape weights to kernel shape")

            if not is_fully_defined(kernel_shape):
                num_undefined = np.count_nonzero(kernel_shape.mask is True)  # pylint: disable=no-member
                if num_undefined > 1:
                    raise Error(
                        'Too many undefined dimensions of the kernel shape for node {}. Use --input_shape '
                        'command line parameter to specify model input shapes'.
                        format(node.soft_get('name', node.id)))
                kernel_size = np.prod(node.in_node(weights_index).value.shape)
                # calculate undefined dimension using fully defined shape of the weights input and known kernel_shape
                # dimensions
                kernel_shape[np.where(kernel_shape == np.ma.masked)[0]
                             [0]] = kernel_size // np.prod(kernel_shape)

            node.in_node(weights_index).shape = shape_array(kernel_shape)
            node.in_node(weights_index).value = np.reshape(
                node.in_node(weights_index).value, kernel_shape)
            node.reshape_kernel = False

        # Pass weights shape to node attribute kernel_shape
        kernel_shape = node.in_node(weights_index).shape
        node['kernel_shape'] = kernel_shape
        # Calculate kernel_spatial_idx and spatial_dims if it is not specified
        # It is necessary for ONNX dut to convolution can be 1D/2D/3D
        if not node.has_valid('kernel_spatial_idx'):
            node['kernel_spatial_idx'] = np.delete(
                [x for x in range(len(kernel_shape))],
                (node.input_feature_channel, node.output_feature_channel))

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                (node.channel_dims[0], node.batch_dims[0]))

        node['kernel_spatial'] = kernel_shape[node.kernel_spatial_idx]

        if not node.has_valid('output'):
            # restore the number of output feature maps from the second argument that is weights
            if node.type in [
                    'Convolution', 'Deconvolution', 'DeformableConvolution',
                    'BinaryConvolution'
            ]:
                node['output'] = kernel_shape[node.output_feature_channel]
            else:
                raise Error(
                    'Convolution infer function was called for a node {} with unsupported type {}',
                    node.soft_get('name'), node.type)

        # Set default values for dilation, strides and pads if not set
        if not node.has_valid('dilation'):
            node['dilation'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('stride'):
            node['stride'] = np.full([len(input_shape)], 1, dtype=np.int64)
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]] * len(input_shape))
        node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('output_padding'):
            node['output_padding'] = np.full([len(input_shape)],
                                             0,
                                             dtype=np.int64)

        if node.has_valid('output_padding') and len(input_shape) > len(
                node['output_padding']):
            output_padding = np.zeros(len(input_shape), dtype=np.int64)
            for i in range(len(node['output_padding'])):
                output_padding[i] = node['output_padding'][i]
            node['output_padding'] = output_padding

        input_spatial_shape = input_shape[node.spatial_dims]
        stride_spatial_shape = node.stride[node.spatial_dims]

        kernel_extent = node.dilation[node.spatial_dims] * (
            node.kernel_spatial - 1) + 1
        # TensorFlow always has auto_pad attribute that can be either valid or same_upper
        # In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower)
        # Caffe do not use auto_pad attribute
        if node.has_valid(
                'auto_pad'
        ) and node.auto_pad != 'explicit' and not node.has_valid(
                'output_spatial_shape'):
            node['pad_spatial_shape'], node[
                'output_spatial_shape'] = tf_window_op_pad_infer(
                    input_spatial_shape, kernel_extent, stride_spatial_shape,
                    node.auto_pad, node.type == 'Deconvolution')

            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:
            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)
            if node.type in ('Convolution', 'BinaryConvolution'):
                float_spatial = Convolution.calc_convolution(
                    input_spatial_shape, stride_spatial_shape,
                    pad_spatial_shape, kernel_extent)
                node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'Deconvolution':
                # In case of given output_spatial_shape we calculate pads spatial
                if node.has_valid('output_spatial_shape'):
                    if node.has_valid('get_pad'):
                        node['pad'] = node.get_pad(node, input_shape,
                                                   kernel_shape)
                    else:
                        log.debug(
                            'Can\'t calculate paddings due to missing lambda get_pad in {} node'
                            .format(node.id))
                        return
                else:
                    output_padding = node.output_padding[
                        node.spatial_dims] if node.has_valid(
                            'output_padding') else None
                    if output_padding is not None and any(output_padding):
                        pad_spatial_shape -= output_padding
                        for dim in range(len(pad_spatial_shape)):
                            node.pad_spatial_shape[dim][
                                1] -= pad_spatial_shape[dim]

                    float_spatial = Convolution.calc_deconvolution(
                        node, input_spatial_shape, pad_spatial_shape,
                        kernel_extent)
                    node['output_spatial_shape'] = shape_array(float_spatial)
            elif node.type == 'DeformableConvolution':
                # get the output spatial shape from the second input with offsets
                node['output_spatial_shape'] = int64_array(
                    [node.in_node(1).shape[2:4]])
            else:
                assert 'Unsupported layer type "{}"'.format(node.type)

        # For cases when group attribute wasn't set in extractor we should specify get_group attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_group'):
            node['group'] = node.get_group(node)
        output_shape = shape_array(
            [dynamic_dimension_value for _ in range(len(input_shape))])
        output_shape[node.batch_dims] = input_shape[node.batch_dims]  # pylint: disable=unsupported-assignment-operation
        output_shape[node.spatial_dims] = node.output_spatial_shape  # pylint: disable=unsupported-assignment-operation

        # For cases when output attribute wasn't set in extractor we should specify get_output_feature_dim attribute
        # this attribute should store lambda node: ... (check tf convolution extractor)
        if node.has_valid('get_output_feature_dim'):
            node['output'] = node.get_output_feature_dim(node)
        output_shape[node.channel_dims] = node.output  # pylint: disable=unsupported-assignment-operation
        node['output_shape'] = output_shape

        node.out_port(0).data.set_shape(output_shape)

        # bin attribute is used for pre-processing, but it will be deleted in BlobNormalizer transformation
        # and the blobs (weights, biases) will be represented as inputs to the node
        mark_input_bins(
            node, start_port=1 if node.type != 'DeformableConvolution' else 2)
        assign_dims_to_weights(node.in_node(weights_index),
                               node.kernel_spatial_idx,
                               node.input_feature_channel,
                               node.output_feature_channel, len(kernel_shape))

        PermuteAttrs.create_permute_attrs(
            node,
            attrs=[
                ('pad', 'input:0'),
                ('stride', 'input:0'),
                ('dilation', 'input:0'),
                ('output_shape', 'input:0'),
                ('batch_dims', 'input:0'),
                ('channel_dims', 'input:0'),
                ('spatial_dims', 'input:0'),
                ('kernel_shape', 'input:{}'.format(weights_index)),
                ('kernel_spatial_idx', 'input:{}'.format(weights_index)),
                ('input_feature_channel', 'input:{}'.format(weights_index)),
                ('output_feature_channel', 'input:{}'.format(weights_index)),
            ])

        # is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into IE [C_OUT, C_IN, H, W]
        # but for other nodes in weights subgraph permutations must turned off
        # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
        PermuteAttrs.set_permutation(
            node.in_node(weights_index), node,
            node.soft_get('get_weights_permute', None))
        PermuteInputs().set_input_permutation(node.in_node(weights_index),
                                              node,
                                              'input:{}'.format(weights_index),
                                              'transpose')
Esempio n. 17
0
 def infer(node: Node):
     copy_shape_infer(node)
     PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 18
0
    def infer(node: Node):
        """
        Deconvolution has an input argument that explicitly determines output shape, so in contrast
        to the forward Conv2d we shouldn't infer output shape. We just use this output shape as
        an input shape and pass it to our utilities that computes numeric values for padding.
        They also deliver output shape that is interpreted here as input shape for convolution.
        We need to check that the real input shape and shape inferred by those utility functions match.
        """
        output_shape = shape_array(node.in_node(2).value)
        output_shape[0] = node.in_port(0).data.get_shape()[0]
        kernel_shape = node.in_port(1).data.get_shape()
        node['kernel_shape'] = kernel_shape
        if output_shape is None or kernel_shape is None or node.spatial_dims is None or node.stride is None:
            return

        if not node.has_valid('kernel_spatial_idx'):
            node['kernel_spatial_idx'] = np.delete(
                [x for x in range(len(kernel_shape))],
                (node.input_feature_channel, node.output_feature_channel))

        if not node.has_valid('dilation'):
            node['dilation'] = np.full([len(output_shape)], 1, dtype=np.int64)

        if node.has_valid('get_group'):
            node['group'] = node.get_group(node)

        spatial_dims = node.spatial_dims
        output_spatial = shape_array(output_shape[spatial_dims])
        stride_spatial = shape_array(node.stride[spatial_dims])
        node['kernel_spatial'] = shape_array(
            kernel_shape[node.kernel_spatial_idx])
        node.pad_spatial_shape, input_spatial_for_check = tf_window_op_pad_infer(
            output_spatial, node.kernel_spatial, stride_spatial, node.auto_pad)

        assert compatible_shapes(input_spatial_for_check,
                                 node.in_node(0).shape[spatial_dims])

        pad = np.zeros((len(output_shape), 2), dtype=np.int64)
        pad[spatial_dims] = node.pad_spatial_shape
        node.pad = pad

        node.output = output_shape[node.channel_dims][0]
        node.output_shape = output_shape
        node.out_port(0).data.set_shape(output_shape)

        mark_input_bins(node, ['weights'], 1)
        assign_dims_to_weights(node.in_node(1), node.kernel_spatial_idx,
                               node.input_feature_channel,
                               node.output_feature_channel, len(kernel_shape))

        # OK, now we are sure this is a supported Deconvolution layer
        node.type = 'Deconvolution'
        node.op = 'Deconv2D'

        # Add permute_attrs
        PermuteAttrs.create_permute_attrs(
            node,
            attrs=[
                ('pad', 'input:0'),
                ('stride', 'input:0'),
                ('output_shape', 'input:0'),
                ('batch_dims', 'input:0'),
                ('channel_dims', 'input:0'),
                ('spatial_dims', 'input:0'),
                ('kernel_shape', 'input:1'),
                ('kernel_spatial_idx', 'input:1'),
                ('input_feature_channel', 'input:1'),
                ('output_feature_channel', 'input:1'),
            ])

        # is needed to permute Deconv weights from the original TF [H, W, C_OUT, C_IN] into IE [C_IN, C_OUT, H, W]
        # but for other nodes in weights subgraph permutations must turned off
        # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW.
        PermuteAttrs.set_permutation(
            node.in_node(1), node, node.soft_get('get_weights_permute', None))
        PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1',
                                              'transpose')
        PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0',
                                              'shape')

        node['force_precision_in_ports'] = {2: 'int64'}
Esempio n. 19
0
    def pool_infer(node: Node):
        input_shape = node.in_node(0).shape
        if input_shape is None:
            return

        if not node.has_valid('spatial_dims'):
            node['spatial_dims'] = np.delete(
                [x for x in range(len(input_shape))],
                [node.batch_dims[0], node.channel_dims[0]])

        input_spatial_shape = input_shape[node.spatial_dims]

        # Setting default pad and stride attrs in case if None specified
        if not node.has_valid('pad'):
            node['pad'] = int64_array([[0, 0]
                                       for x in range(len(input_shape))])
        if not node.has_valid('pad_spatial_shape'):
            node['pad_spatial_shape'] = node.pad[node.spatial_dims]

        if not node.has_valid('stride'):
            node['stride'] = int64_array([1 for x in range(len(input_shape))])

        if node.has_and_set('global_pool'):
            node['window'] = np.zeros(len(input_shape), dtype=np.int64)
            node.window[node.spatial_dims] = input_spatial_shape

        if not node.has_valid('dilation'):
            node['dilation'] = np.ones(len(input_shape), dtype=np.float32)

        if not node.has_valid('axis'):
            node['axis'] = 0

        if not node.has_valid('index_element_type'):
            node['index_element_type'] = np.int64

        window_spatial_shape = node.window[node.spatial_dims]
        stride_spatial = node.stride[node.spatial_dims]
        dilation_spatial = node.dilation[node.spatial_dims]
        assert any(stride_spatial), 'Stride can not be zero in node {}'.format(
            node.id)

        if node.has_valid('auto_pad') and node.auto_pad != 'explicit':
            node.pad_spatial_shape, node.output_spatial_shape = tf_window_op_pad_infer(
                input=input_spatial_shape,
                window=window_spatial_shape,
                stride=stride_spatial,
                auto_pad=node.auto_pad,
                dilation=dilation_spatial)
            pad = np.zeros((len(input_shape), 2), dtype=np.int64)
            pad[node.spatial_dims] = node.pad_spatial_shape
            node.pad = pad
        else:

            pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1)

            rounding = np.floor
            if node.soft_get('pooling_convention') == 'full' or node.soft_get(
                    'rounding_type') == 'ceil':
                rounding = np.ceil

            padded_spatial_shape = input_spatial_shape + pad_spatial_shape - (
                (window_spatial_shape - 1) * dilation_spatial + 1)
            if np.any(padded_spatial_shape < 0):
                raise Error(
                    "Data after padding has dimension less than window size. "
                    +
                    "Possible reason of error is incorrectly specified model input shape(s)."
                )

            output_spatial_shape = shape_array([
                dynamic_dimension_value
                for _ in range(len(padded_spatial_shape))
            ])
            for idx in range(len(padded_spatial_shape)):
                if padded_spatial_shape[
                        idx] is not dynamic_dimension and stride_spatial[
                            idx] is not dynamic_dimension:
                    output_spatial_shape[idx] = int(
                        rounding(padded_spatial_shape[idx] /
                                 stride_spatial[idx])) + 1

            original_pads = mo_array([i[1] for i in node.pad_spatial_shape])

            for i in range(len(input_spatial_shape)):
                if original_pads[i] and (output_spatial_shape[i] - 1) * stride_spatial[i] >= \
                        input_spatial_shape[i] + original_pads[i]:
                    output_spatial_shape[i] -= 1

            node['output_spatial_shape'] = output_spatial_shape

        output_shape = input_shape.copy()
        output_shape[node.spatial_dims] = node.output_spatial_shape
        node.out_port(0).data.set_shape(output_shape)

        if len(node.out_ports()) == 2 and not node.out_port(1).disconnected():
            node.out_port(1).data.set_shape(output_shape)

        if node.has_and_set('pool_method') and node['pool_method'] == 'max':
            node['remove_values_output'] = True

        # Add permute_attrs
        PermuteAttrs.create_permute_attrs(node,
                                          attrs=[('pad', 'input:0'),
                                                 ('stride', 'input:0'),
                                                 ('window', 'input:0'),
                                                 ('spatial_dims', 'input:0'),
                                                 ('dilation', 'input:0')])
Esempio n. 20
0
    def _two_inputs_infer(node: Node):
        N = len(node.in_nodes())
        node_name = node.soft_get('name', node.id)

        shapes = [node.in_port(i).data.get_shape() for i in range(N)]
        if any(s is None for s in shapes):
            raise Error('Not all input shapes were defined for {} node'.format(node_name))

        if not node.has_valid('axis'):
            raise Error('axis attribute is missing for {} node. should be set in crop extractor'.format(node_name))

        if not node.has_valid('offset'):
            raise Error('offset attribute is missing for {} node. should be set in crop extractor'.format(node_name))

        input_shape = shapes[0].copy()
        start_axis = get_canonical_axis_index(input_shape, node.axis)
        node.axis = start_axis

        reference_shape = shapes[1].copy()
        if node.has_valid('axes'):
            # The axes parameter  contain shape indexes for second input and show which shape indexes we need to use for
            # dim attribute.
            input_dim = node.axes
            node.in_port(1).disconnect()
        else:
            input_dim = list(range(0, input_shape.size))

        # set new shape to current shape
        new_shape = input_shape.copy()
        ir_axis = []
        ir_offset = []
        dim = []

        for i in input_dim:
            if i < start_axis:
                new_shape[i] = input_shape[i]
                continue

            crop_offset = 0
            if len(node.offset) == 1:
                crop_offset = node.offset[0]
            elif len(node.offset) > 1:
                crop_offset = node.offset[i - start_axis]

            if input_shape[i] - crop_offset < reference_shape[i]:
                raise Error('The crop for dimension is out of bounds in node {}'.format(node_name))

            dim.append(reference_shape[i])
            ir_axis.append(i)
            ir_offset.append(crop_offset)
            new_shape[i] = reference_shape[i]

        node.axis = ir_axis
        node.offset = ir_offset
        node['dim'] = dim
        node.out_port(0).data.set_shape(new_shape)

        if node.in_node(0).has_valid('value') and \
                not getattr(node.graph.graph['cmd_params'], 'enable_ssd_gluoncv', False):
            out_value = np.copy(node.in_node(0).value)

            slice_indexes = []
            for s in out_value.shape:
                slice_indexes.append(slice(0, s))

            for axis in input_dim:
                slice_indexes[axis] = slice(0, new_shape[axis])
                out_value = out_value[tuple(slice_indexes)]
            node.out_port(0).data.set_value(out_value)

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 21
0
    def infer(node):
        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None
        node.out_port(0).data.set_shape(input_shape)

        PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
Esempio n. 22
0
    def infer(node):
        name = node.soft_get('name', node.id)

        op = node.soft_get('op', None)
        assert op is not None and op in ['VariadicSplit', 'AttributedVariadicSplit'], \
            'Unexpected `op`={} attribute for Split-like node {}'.format(op, name)

        num_in_ports = 1 if op == 'AttributedVariadicSplit' else 3 if op == 'VariadicSplit' else None
        assert num_in_ports in [1, 3], \
            'VariadicSplitBase supports AttributedVariadicSplit with 1 input and VariadicSplit with 3 inputs, ' \
            'but it is {} for {} node {}'.format(num_in_ports, op, name)

        connected_inputs = {
            idx: port
            for idx, port in node.in_ports().items()
            if not port.disconnected()
        }
        assert len(connected_inputs) == num_in_ports and all([i in connected_inputs for i in range(num_in_ports)]), \
            "{} should have {} connected input ports, but it doesn't for node: `{}`. Ports: {}" \
            "".format(op, num_in_ports, name, connected_inputs)

        input_shape = node.in_port(0).data.get_shape()
        assert input_shape is not None

        axis = node.in_port(1).data.get_value(
        ) if op == 'VariadicSplit' else node.soft_get('axis', None)
        assert axis is not None, '{} `axis` is unknown for node {}'.format(
            op, name)
        assert axis.ndim == 0 or (axis.ndim == 1 and axis.shape[0] == 1), \
            '{} `axis` should be scalar or tensor with shape [1], but it`s not for node {}'.format(op, name)

        split_lengths = node.in_port(2).data.get_value(
        ) if op == 'VariadicSplit' else node.soft_get('split_lengths', None)
        assert split_lengths is not None, '{} `split_lengths` is unknown for node {}'.format(
            op, name)

        undefined_elements = np.argwhere(split_lengths == -1).flatten()
        assert undefined_elements.size <= 1, \
            '{} split_lengths=`{}` is a list with output sizes, only one of which could be -1. Node: {}' \
            ''.format(op, split_lengths, name)

        input_elements = input_shape[axis]
        assert undefined_elements.size != 0 or input_elements is dynamic_dimension or \
               input_elements == np.sum(split_lengths), 'The sum of split_lengths=`{}` must match data.shape[axis]=' \
                                                        '`{}`. Node: {}'.format(split_lengths, input_elements, name)

        assert len(split_lengths) >= len([port for i, port in node.out_ports().items() if not port.disconnected()]), \
            'Number of split_lengths=`{}` is less than connected output ports. Node: {}'.format(split_lengths, name)

        # in split_lengths some value can be 0, in this case we will ignore it:
        #     * remove according branch
        #     * remove 0 from split_lengths
        for i in reversed(range(len(split_lengths))):
            if split_lengths[i] == 0:
                if node.out_port(i).disconnected():
                    split_lengths = shape_delete(split_lengths, i)
                    if op == 'VariadicSplit':
                        node.in_port(2).data.set_value(split_lengths)
                    else:
                        node['split_lengths'] = split_lengths
                    delete_out_port(i, node)
                else:
                    log.error(
                        "Zero dimension on {} branch after Split node {}".
                        format(i, node.id))
                    return

        # shape propagation
        idxs, curr_pos = [], 0
        for i, piece in enumerate(split_lengths):
            assert piece >= -1, 'VariadicSplit split_lengths=`{}` should be non-negative'.format(
                split_lengths)
            out_shape = input_shape.copy()

            split_length = piece if piece > -1 else input_elements - (
                np.sum(split_lengths) + 1)
            out_shape[axis] = split_length
            curr_pos = curr_pos + split_length
            idxs.append(curr_pos)

            if not node.out_port(i).disconnected():
                node.out_port(i).data.set_shape(out_shape)

        # value propagation
        input_value = node.in_port(0).data.get_value()
        if input_value is not None:
            split = np.split(input_value, idxs[:-1], axis)
            for i, port in node.out_ports().items():
                if not port.disconnected():
                    port.data.set_value(split[i])

        if op == 'VariadicSplit':
            PermuteInputs().set_input_permutation(node.in_node(1), node,
                                                  'input:0', 'axis')
        elif op == 'AttributedVariadicSplit':
            PermuteAttrs.create_permute_attrs(node,
                                              attrs=[('axis', 'input:0')])
Esempio n. 23
0
def concat_infer(node):
    node_name = node.soft_get('name', node.id)
    if not node.has('axis'):
        N = node.N
        axis_input = node.in_node(N)
        if axis_input.has_valid('value') and axis_input.value.size == 1:
            node['axis'] = axis_input.value.item()
            node.graph.remove_edge(
                axis_input.node,
                node.node)  # TODO add skip attribute instead of deleting
        else:
            raise Error(
                'Input with value is not specified for node "{}"'.format(
                    node_name))
    else:
        N = len(node.in_nodes())

    shapes = [node.in_node(i).shape for i in range(N)]
    if any(s is None for s in shapes):
        raise Error(
            'One of the input shapes is not defined for node "{}"'.format(
                node_name))

    shape = shape_array(shapes[0])

    axis = get_canonical_axis_index(shape, node.axis)
    node.axis = axis

    mask = np.zeros_like(shape, dtype=np.bool)
    mask[axis] = True  # pylint: disable=unsupported-assignment-operation
    not_mask = np.logical_not(mask)  # pylint: disable=assignment-from-no-return
    for s in shapes[1:]:
        s = shape_array(s)
        if np.ma.allequal(shape[not_mask], s[not_mask]):
            shape[mask] += s[mask]
        else:
            raise Error(
                'Concat input shapes do not match for node "{}" with axis {}'.
                format(node_name, axis))

    #  dynamic dimensions in the output (except the concat axis) can be deduced from input shape
    for pos in range(len(shape)):
        if shape[pos] is dynamic_dimension and pos != axis:
            for in_shape in shapes:
                if in_shape[pos] is not dynamic_dimension:
                    shape[pos] = in_shape[pos]

    node.out_port(0).data.set_shape(shape)
    PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])

    values = [node.in_node(i).value for i in range(N)]
    if any([v is None for v in values]):
        return

    # if one of the input values are dynamic, the output tensor type is inferred from one of the fully defined inputs
    output_dtype = np.int64
    for input in values:
        if is_fully_defined(input):
            output_dtype = input.dtype

    if any(not is_fully_defined(v) for v in values):
        node.out_port(0).data.set_value(
            np.ma.concatenate(values, axis=node.axis).astype(output_dtype))
    else:  # there is a serious performance benefit to use concatenation as it is implemented below
        node.out_node(0).value = np.concatenate(values, axis=node.axis).astype(
            values[0].dtype, copy=False)
        node.out_node(0).shape = shape_array(node.out_node(0).value.shape)