Exemplo n.º 1
0
def propagate_conv_transpose(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    input, filter = op.inputs[:2]

    if 'output_shape' in op.attribs:
        return [infer.copy(op.attribs['output_shape'])], [input.dtype]

    filter_size = filter.shape[2:]
    stride = op.attribs.get('strides', [1] * len(filter_size))
    dilation = op.attribs.get('dilations', [1] * len(filter_size))
    padding = get_concrete_padding(auto_padding=op.attribs.get('auto_pad'),
                                   custom_padding=op.attribs.get('pads'),
                                   upscaled_shape=input.shape[2:],
                                   filter_shape=filter_size,
                                   stride=stride,
                                   dilation=dilation)
    groups = op.attribs.get('group', 1)
    output_padding = op.attribs.get('output_padding', [0] * len(filter_size))

    return [
        infer.conv(input=input.shape,
                   filter=filter_size,
                   padding=padding,
                   stride=stride,
                   dilation=dilation,
                   groups=groups,
                   output_channels=filter.shape[1] * groups,
                   format=infer.Format.NCHW,
                   output_padding=list(
                       zip([0] * len(filter_size), output_padding)),
                   deconv=True)
    ], [input.dtype]
Exemplo n.º 2
0
def propagate_conv(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    input, filter = op.inputs[:2]

    filter_size = filter.shape[2:]
    stride = op.attribs.get('strides', [1] * len(filter_size))
    dilation = op.attribs.get('dilations', [1] * len(filter_size))
    padding = get_concrete_padding(auto_padding=op.attribs.get('auto_pad'),
                                   custom_padding=op.attribs.get('pads'),
                                   upscaled_shape=input.shape[2:],
                                   filter_shape=filter_size,
                                   stride=stride,
                                   dilation=dilation)

    return [
        infer.conv(input=input.shape,
                   filter=filter_size,
                   padding=padding,
                   stride=stride,
                   dilation=dilation,
                   groups=op.attribs.get('group', 1),
                   output_channels=filter.shape[0],
                   format=infer.Format.NCHW)
    ], [input.dtype]
Exemplo n.º 3
0
def convolution_shape(op, transposed):
    return shapes.conv(input=op.inputs[0].shape,
                       filter=op.attribs['kernel_size'],
                       padding=pairs(op.attribs['pad']),
                       stride=op.attribs['stride'],
                       dilation=op.attribs['dilation'],
                       groups=op.attribs['group'],
                       output_channels=op.attribs['num_output'],
                       deconv=transposed,
                       format=shapes.Format.NCHW)
Exemplo n.º 4
0
def conv_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC'
    return infer.conv(
        input=op.inputs[0].shape,
        filter=op.inputs[1].shape[1:-1] if is_nhwc else op.inputs[1].shape[2:],
        padding=caffe2_pads_to_nnef_padding(op.attribs['pads']),
        stride=op.attribs['strides'],
        dilation=op.attribs['dilations'],
        groups=op.attribs['group'],
        format=(infer.Format.NHWC if is_nhwc else infer.Format.NCHW),
        output_channels=op.inputs[1].shape[0]), op.inputs[0].dtype
Exemplo n.º 5
0
def conv_transpose_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    is_nhwc = op.attribs.get('order', 'NCHW').upper() == 'NHWC'
    return infer.conv(
        input=op.inputs[0].shape,
        filter=op.inputs[1].shape[1:-1] if is_nhwc else op.inputs[1].shape[2:],
        padding=caffe2_pads_to_nnef_padding(op.attribs['pads']),
        stride=op.attribs['strides'],
        dilation=[1] * (op.inputs[0].rank - 2),
        groups=op.attribs['group'],
        format=(infer.Format.NHWC if is_nhwc else infer.Format.NCHW),
        output_channels=op.inputs[1].shape[-1 if is_nhwc else 1] *
        op.attribs['group'],
        output_padding=[(0, a) for a in op.attribs['adjs']],
        deconv=True), op.inputs[0].dtype
Exemplo n.º 6
0
def propagate_conv(op, const_value_by_tensor, depthwise):
    # type: (TFOperation, _ConstValueByTensorT, bool)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]

    input, filter = op.inputs
    format = infer.Format.NCHW if op.attribs["data_format"][1].upper() == "C" else infer.Format.NHWC
    return [infer.conv(
        input=input.shape,
        filter=filter.shape[:-2],
        padding=infer.Padding.SAME_UPPER if op.attribs["padding"].upper() == 'SAME' else infer.Padding.VALID,
        stride=infer.spatial(op.attribs["strides"], format),
        dilation=(infer.spatial(op.attribs["dilations"], format)
                  if 'dilations' in op.attribs
                  else infer.singleton(input.rank - 2)),
        groups=0 if depthwise else 1,
        output_channels=filter.shape[-2] * filter.shape[-1] if depthwise else filter.shape[-1],
        format=format,
    )], [op.attribs['T']]
Exemplo n.º 7
0
def unify_deconv(op):
    # type: (NNEFOperation)->None
    input, filter, bias = op.inputs

    if not op.attribs['stride']:
        op.attribs['stride'] = [1] * (input.rank - 2)
    if not op.attribs['dilation']:
        op.attribs['dilation'] = [1] * (input.rank - 2)

    if not op.attribs['groups']:
        if op.attribs['output_shape']:
            op.attribs['groups'] = op.attribs['output_shape'][1]
        else:
            print("Warning: Planewise deconvolution without output_size, "
                  "assuming that num(input channels) == num(output channels).")
            op.attribs['groups'] = filter.shape[0]

    output_channels = filter.shape[1] * op.attribs['groups']
    if op.attribs['output_shape']:
        assert op.attribs['output_shape'][1] == output_channels

    if not op.attribs['padding']:
        calculated_output_size = [
            i * s for i, s in zip(input.shape[2:], op.attribs['stride'])
        ]
        op.attribs['padding'] = infer.same_padding(
            upscaled_input=calculated_output_size,
            filter=filter.shape[2:],
            stride=op.attribs['stride'],
            dilation=op.attribs['dilation'])
    else:
        calculated_output_size = infer.conv(input=list(input.shape),
                                            filter=filter.shape[2:],
                                            padding=op.attribs['padding'],
                                            stride=op.attribs['stride'],
                                            dilation=op.attribs['dilation'],
                                            groups=op.attribs['groups'],
                                            output_channels=output_channels,
                                            format=infer.Format.NCHW,
                                            deconv=True)[2:]

    if not op.attribs['output_shape']:
        op.attribs['output_shape'] = [input.shape[0], output_channels
                                      ] + calculated_output_size
Exemplo n.º 8
0
def nnef_deconv(
        input,  # type: torch.Tensor
        filter,  # type: torch.Tensor
        bias,  # type: torch.Tensor
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        output_shape=None,  # type: Optional[List[int]]
        groups=1,  # type: int
):
    # type: (...)->torch.Tensor

    if border != 'constant':
        raise utils.NNEFToolsException(
            "Deconv: '{}' border unsupported.".format(border))

    if output_shape and output_shape[0] != input.shape[0]:
        output_shape = list(output_shape)
        output_shape[0] = input.shape[0]

    rank = len(input.shape)
    if rank not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Deconvolution is only implemented for 3D, 4D, 5D tensors, given: {}D."
            .format(len(input.shape)))

    spatial_dims = len(input.shape[2:])
    stride = [1] * spatial_dims if not stride else stride
    dilation = [1] * spatial_dims if not dilation else dilation

    if groups == 0:
        if output_shape:
            groups = output_shape[1]
        else:
            # Planewise deconvolution without output_size, assuming that #(input channels) = #(output channels)
            groups = filter.shape[0]

    output_channels = filter.shape[1] * groups
    if output_shape:
        assert output_shape[1] == output_channels

    if not padding:
        output_size = output_shape[2:] if output_shape else [
            i * s for i, s in zip(input.shape[2:], stride)
        ]
        padding = shape_inference.same_padding(upscaled_input=output_size,
                                               filter=filter.shape[2:],
                                               stride=stride,
                                               dilation=dilation)
    else:
        output_size = output_shape[
            2:] if output_shape else shape_inference.conv(
                input=list(input.shape),
                filter=filter.shape[2:],
                padding=padding,
                stride=stride,
                dilation=dilation,
                groups=groups,
                output_channels=output_channels,
                format=shape_inference.Format.NCHW,
                deconv=True)[2:]

    uncropped_output_size = shape_inference.conv(
        input=list(input.shape),
        filter=filter.shape[2:],
        padding=shape_inference.Padding.VALID,
        stride=stride,
        dilation=dilation,
        groups=groups,
        output_channels=output_channels,
        format=shape_inference.Format.NCHW,
        deconv=True)[2:]

    crop_before = [p for p, _q in padding]
    crop_after = [
        uncropped - out - before for uncropped, out, before in zip(
            uncropped_output_size, output_size, crop_before)
    ]

    bias = bias.reshape(1, 1).expand(
        (1, output_channels)) if utils.product(bias.size()) == 1 else bias

    deconv = {
        1: F.conv_transpose1d,
        2: F.conv_transpose2d,
        3: F.conv_transpose3d
    }[spatial_dims](input=input,
                    weight=filter,
                    bias=bias.squeeze(dim=0).contiguous(),
                    stride=tuple(stride),
                    padding=0,
                    output_padding=0,
                    groups=groups,
                    dilation=tuple(dilation))

    return nnef_pad(deconv,
                    padding=[(0, 0), (0, 0)] +
                    [(-cb, -ca) for cb, ca in zip(crop_before, crop_after)])
Exemplo n.º 9
0
def partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv,
                                input, filter, bias, output):
    # type: (Converter, NNEFOperation, ONNXGraph, bool, ONNXTensor, ONNXTensor, ONNXTensor, ONNXTensor)->None

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * (input.rank - 2)

    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * (input.rank - 2)

    groups = nnef_op.attribs.get(
        'groups', 1)  # default needed because box does not have groups
    if groups == 0:
        groups = input.shape[1]

    assert nnef_op.attribs['border'] == 'constant'

    if is_deconv:
        pads = nnef_op.attribs['padding']
        if not pads:  # auto pad
            calc_output_size = [i * s for i, s in (input.shape[2:], strides)]
            pads = infer.same_padding(upscaled_input=calc_output_size,
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        else:
            calc_output_size = infer.conv(input=input.shape,
                                          filter=filter.shape[2:],
                                          padding=pads,
                                          stride=strides,
                                          dilation=dilations,
                                          groups=groups,
                                          output_channels=output.shape[1],
                                          format=infer.Format.NCHW,
                                          deconv=True)[2:]
        output_size = output.shape[2:]
        output_padding = [o - c for o, c in zip(output_size, calc_output_size)]
    else:
        pads = nnef_op.attribs['padding']
        if not pads:
            pads = infer.same_padding(upscaled_input=input.shape[2:],
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        output_padding = [0] * len(pads)

    pads = converter.onnx_pads(pads)

    if bias.is_constant and bias.data == [0.0]:
        inputs = (input, filter)
    else:
        if bias.rank == 2:
            assert bias.shape[0] == 1
            bias = converter.add_squeeze(onnx_graph=onnx_graph,
                                         onnx_tensor=bias,
                                         axes=[0])
        inputs = (input, filter, bias)

    op = ONNXOperation(
        graph=onnx_graph,
        name='ConvTranspose' if is_deconv else 'Conv',
        inputs=inputs,
        attribs=dict(
            kernel_shape=filter.
            shape[2:],  # Not mandatory, but Caffe2 fails without this
            strides=strides,
            dilations=dilations,
            pads=pads,
            group=groups),
        outputs=output)

    if is_deconv:
        op.attribs['output_padding'] = output_padding
Exemplo n.º 10
0
    def test_conv(self):
        self.assertEqual([10, 30, 30, 16], infer.conv(input=[10, 32, 32, 3],
                                                      filter=[3, 3],
                                                      padding=infer.Padding.VALID,
                                                      stride=[1, 1],
                                                      dilation=[1, 1],
                                                      groups=1,
                                                      spatial_begin=infer.spatial_begin(infer.Format.NHWC),
                                                      channel_axis=infer.channel_axis(infer.Format.NHWC),
                                                      output_channels=16))

        self.assertEqual([10, 16, 30, 30], infer.conv(input=[10, 3, 32, 32],
                                                      filter=[3, 3],
                                                      padding=[(0, 0), (0, 0)],
                                                      stride=[1, 1],
                                                      dilation=[1, 1],
                                                      groups=1,
                                                      spatial_begin=infer.spatial_begin(infer.Format.NCHW),
                                                      channel_axis=infer.channel_axis(infer.Format.NCHW),
                                                      output_channels=16))

        self.assertEqual([10, 3, 32, 32], infer.conv(input=[10, 16, 30, 30],
                                                     filter=[3, 3],
                                                     padding=[(0, 0), (0, 0)],
                                                     stride=[1, 1],
                                                     dilation=[1, 1],
                                                     groups=1,
                                                     format=infer.Format.NCHW,
                                                     output_channels=3,
                                                     deconv=True))

        self.assertEqual([10, 3, 32, 32], infer.conv(input=[10, 16, 32, 32],
                                                     filter=[3, 3],
                                                     padding=infer.Padding.SAME_UPPER,
                                                     stride=[1, 1],
                                                     dilation=[1, 1],
                                                     groups=1,
                                                     format=infer.Format.NCHW,
                                                     output_channels=3))

        self.assertEqual([10, 6, 32, 32], infer.conv(input=[10, 3, 32, 32],
                                                     filter=[3, 3],
                                                     padding=infer.Padding.SAME_LOWER,
                                                     stride=[1, 1],
                                                     dilation=[1, 1],
                                                     groups=0,
                                                     format=infer.Format.NCHW,
                                                     output_channels=6))

        self.assertEqual([10, 16, 32, 32], infer.conv(input=[10, 3, 32, 32],
                                                      filter=[3, 3],
                                                      padding=infer.Padding.SAME_UPPER,
                                                      stride=[1, 1],
                                                      dilation=[1, 1],
                                                      groups=1,
                                                      format=infer.Format.NCHW,
                                                      output_channels=16,
                                                      deconv=True))

        self.assertEqual([10, 16, 64, 64], infer.conv(input=[10, 3, 32, 32],
                                                      filter=[3, 3],
                                                      padding=infer.Padding.SAME_UPPER,
                                                      stride=[2, 2],
                                                      dilation=[1, 1],
                                                      groups=1,
                                                      format=infer.Format.NCHW,
                                                      output_channels=16,
                                                      deconv=True))

        self.assertEqual([10, 16, 65, 65], infer.conv(input=[10, 3, 32, 32],
                                                      filter=[3, 3],
                                                      padding=infer.Padding.SAME_UPPER,
                                                      stride=[2, 2],
                                                      dilation=[1, 1],
                                                      groups=1,
                                                      format=infer.Format.NCHW,
                                                      output_channels=16,
                                                      output_padding=[(0, 1), (0, 1)],
                                                      deconv=True))