Exemplo n.º 1
0
    def test_same_padding(self):
        self.assertEqual([(0, 1)],
                         infer.same_padding(upscaled_input=[32],
                                            filter=[3],
                                            stride=[2],
                                            dilation=[1]))

        self.assertEqual([(1, 0)],
                         infer.same_padding(upscaled_input=[32],
                                            filter=[3],
                                            stride=[2],
                                            dilation=[1],
                                            left_bigger=True))

        self.assertEqual([(2, 3)],
                         infer.same_padding(upscaled_input=[32],
                                            filter=[3],
                                            stride=[2],
                                            dilation=[3]))

        self.assertEqual([(0, 0)],
                         infer.same_padding(upscaled_input=[2],
                                            filter=[1],
                                            stride=[2],
                                            dilation=[1]))
Exemplo n.º 2
0
def unify_debox(op):
    # type: (NNEFOperation)->None
    input = op.inputs[0]
    if not op.attribs['stride']:
        op.attribs['stride'] = [1] * input.rank
    if not op.attribs['dilation']:
        op.attribs['dilation'] = [1] * input.rank

    if not op.attribs['padding']:
        calculated_output_shape = [
            i * s for i, s in zip(input.shape, op.attribs['stride'])
        ]
        op.attribs['padding'] = infer.same_padding(
            upscaled_input=calculated_output_shape,
            filter=op.attribs['size'],
            stride=op.attribs['stride'],
            dilation=op.attribs['dilation'])
    else:
        calculated_output_shape = infer.sliding_window(
            input=input.shape,
            filter=op.attribs['size'],
            padding=op.attribs['padding'],
            stride=op.attribs['stride'],
            dilation=op.attribs['dilation'],
            upscale=True)
    if not op.attribs['output_shape']:
        op.attribs['output_shape'] = calculated_output_shape
Exemplo n.º 3
0
def partial_convert_pool(converter,
                         nnef_op,
                         onnx_graph,
                         target_name,
                         input,
                         outputs,
                         force_constant=False):
    # type: (Converter, NNEFOperation, ONNXGraph, str, ONNXTensor, typing.Tuple[ONNXTensor, ...], bool)->None
    if nnef_op.name == 'argmax_pool':
        outputs = (ONNXTensor(graph=onnx_graph,
                              shape=list(outputs[0].shape),
                              dtype=input.dtype), ) + tuple(outputs)

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * input.rank
    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * input.rank

    assert nnef_op.attribs['border'] in ['constant', 'ignore']

    pads = nnef_op.attribs['padding']
    if not pads:
        pads = infer.same_padding(upscaled_input=input.shape[2:],
                                  filter=nnef_op.attribs['size'][2:],
                                  stride=strides[2:],
                                  dilation=dilations[2:])

    assert pads[:2] == [
        (0, 0), (0, 0)
    ], "Padding in batch and channel dimensions is not supported in ONNX"
    pads = pads[2:]
    pads = converter.onnx_pads(pads)

    assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \
        'Pooling in batch and channel dimensions is not supported in ONNX'
    strides = strides[2:]
    dilations = dilations[2:]

    assert all(
        d == 1
        for d in dilations), 'Dilation is not supported for pooling in ONNX'

    onnx_op = ONNXOperation(graph=onnx_graph,
                            name=target_name,
                            inputs=input,
                            attribs=dict(
                                kernel_shape=nnef_op.attribs['size'][2:],
                                pads=pads,
                                strides=strides),
                            outputs=outputs)

    if target_name == 'AveragePool':
        onnx_op.attribs['count_include_pad'] = 1 if (
            nnef_op.attribs['border'] == 'constant' or force_constant) else 0
Exemplo n.º 4
0
def _evaluate_max_pool_or_box_params(input_shape, size, padding, stride,
                                     dilation):
    rank = len(input_shape)
    stride = [1] * rank if not stride else stride
    dilation = [1] * rank if not dilation else dilation
    padding = shape_inference.same_padding(
        upscaled_input=input_shape,
        filter=size,
        stride=stride,
        dilation=dilation) if not padding else padding
    return padding, stride, dilation
Exemplo n.º 5
0
def unify_box_and_pool(op):
    # type: (NNEFOperation)->None
    input = op.inputs[0]
    if not op.attribs['stride']:
        op.attribs['stride'] = [1] * input.rank
    if not op.attribs['dilation']:
        op.attribs['dilation'] = [1] * input.rank
    if not op.attribs['padding']:
        op.attribs['padding'] = infer.same_padding(
            upscaled_input=input.shape,
            filter=op.attribs['size'],
            stride=op.attribs['stride'],
            dilation=op.attribs['dilation'])
Exemplo n.º 6
0
def get_concrete_padding(auto_padding, custom_padding, upscaled_shape,
                         filter_shape, stride, dilation):
    if auto_padding in [None, '', 'NOTSET']:
        if custom_padding is None:
            return [(0, 0)] * len(upscaled_shape)
        return to_nnef_padding(custom_padding)
    else:
        assert custom_padding is None
        if auto_padding == 'SAME_UPPER':
            return infer.same_padding(upscaled_input=upscaled_shape,
                                      filter=filter_shape,
                                      stride=stride,
                                      dilation=dilation,
                                      left_bigger=False)
        elif auto_padding == 'SAME_LOWER':
            return infer.same_padding(upscaled_input=upscaled_shape,
                                      filter=filter_shape,
                                      stride=stride,
                                      dilation=dilation,
                                      left_bigger=True)
        elif auto_padding == 'VALID':
            return infer.valid_padding(rank=len(upscaled_shape))
        else:
            assert False, "Unexpected padding type: {}".format(auto_padding)
Exemplo n.º 7
0
def unify_conv(op):
    # type: (NNEFOperation)->None
    input, filter, bias = op.inputs

    if not op.attribs['stride']:
        op.attribs['stride'] = [1] * (input.rank - 2)
    if not op.attribs['dilation']:
        op.attribs['dilation'] = [1] * (input.rank - 2)
    if not op.attribs['padding']:
        op.attribs['padding'] = infer.same_padding(
            upscaled_input=input.shape[2:],
            filter=filter.shape[2:],
            stride=op.attribs["stride"],
            dilation=op.attribs["dilation"])
    if not op.attribs['groups']:
        op.attribs['groups'] = input.shape[1]
Exemplo n.º 8
0
def convert_desample(converter, nnef_op, onnx_graph):
    # type: (Converter, NNEFOperation, ONNXGraph)->None

    input, indices = converter.converted_tensors(nnef_op.inputs)
    output = converter.converted_tensor(nnef_op.output)

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * input.rank
    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * input.rank

    assert nnef_op.attribs['border'] in ['constant', 'ignore']

    pads = nnef_op.attribs['padding']
    if not pads:  # auto pad
        calc_output_size = [i * s for i, s in (input.shape[2:], strides)]
        pads = infer.same_padding(upscaled_input=calc_output_size,
                                  filter=nnef_op.attribs['size'],
                                  stride=strides,
                                  dilation=dilations)

    assert pads[:2] == [
        (0, 0), (0, 0)
    ], "Padding in batch and channel dimensions is not supported in ONNX"
    pads = pads[2:]
    pads = converter.onnx_pads(pads)

    assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \
        'Pooling in batch and channel dimensions is not supported in ONNX'
    strides = strides[2:]
    dilations = dilations[2:]

    assert all(
        d == 1
        for d in dilations), 'Dilation is not supported for pooling in ONNX'

    ONNXOperation(graph=onnx_graph,
                  name='MaxUnpool',
                  inputs=(input, indices),
                  attribs=dict(kernel_shape=nnef_op.attribs['size'][2:],
                               pads=pads,
                               strides=strides),
                  outputs=output)
Exemplo n.º 9
0
def nnef_conv(
        input,  # type: torch.Tensor
        filter,  # type: torch.Tensor
        bias,  # type: torch.Tensor
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        groups=1,  # type: int
):
    # type: (...)->torch.Tensor

    if len(input.shape) not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Convolution is only implemented for 3D, 4D, 5D tensors, given: {}D."
            .format(len(input.shape)))

    bias = bias.reshape(1, 1).expand(
        (1, filter.shape[0])) if utils.product(bias.size()) == 1 else bias

    spatial_dims = len(input.shape[2:])
    groups = input.shape[1] if groups == 0 else groups
    stride = [1] * spatial_dims if not stride else stride
    dilation = [1] * spatial_dims if not dilation else dilation
    padding = shape_inference.same_padding(
        upscaled_input=input.shape[2:],
        filter=filter.shape[2:],
        stride=stride,
        dilation=dilation) if not padding else padding

    pad = nnef_pad(input=input, padding=[(0, 0)] * 2 + padding, border=border)
    conv = {
        1: F.conv1d,
        2: F.conv2d,
        3: F.conv3d
    }[spatial_dims](input=pad,
                    weight=filter,
                    bias=bias.squeeze(dim=0).contiguous(),
                    stride=tuple(stride),
                    padding=0,
                    dilation=tuple(dilation),
                    groups=groups)

    return conv
Exemplo n.º 10
0
def unify_deconv(op):
    # type: (NNEFOperation)->None
    input, filter, bias = op.inputs

    if not op.attribs['stride']:
        op.attribs['stride'] = [1] * (input.rank - 2)
    if not op.attribs['dilation']:
        op.attribs['dilation'] = [1] * (input.rank - 2)

    if not op.attribs['groups']:
        if op.attribs['output_shape']:
            op.attribs['groups'] = op.attribs['output_shape'][1]
        else:
            print("Warning: Planewise deconvolution without output_size, "
                  "assuming that num(input channels) == num(output channels).")
            op.attribs['groups'] = filter.shape[0]

    output_channels = filter.shape[1] * op.attribs['groups']
    if op.attribs['output_shape']:
        assert op.attribs['output_shape'][1] == output_channels

    if not op.attribs['padding']:
        calculated_output_size = [
            i * s for i, s in zip(input.shape[2:], op.attribs['stride'])
        ]
        op.attribs['padding'] = infer.same_padding(
            upscaled_input=calculated_output_size,
            filter=filter.shape[2:],
            stride=op.attribs['stride'],
            dilation=op.attribs['dilation'])
    else:
        calculated_output_size = infer.conv(input=list(input.shape),
                                            filter=filter.shape[2:],
                                            padding=op.attribs['padding'],
                                            stride=op.attribs['stride'],
                                            dilation=op.attribs['dilation'],
                                            groups=op.attribs['groups'],
                                            output_channels=output_channels,
                                            format=infer.Format.NCHW,
                                            deconv=True)[2:]

    if not op.attribs['output_shape']:
        op.attribs['output_shape'] = [input.shape[0], output_channels
                                      ] + calculated_output_size
Exemplo n.º 11
0
def nnef_desample(
        input,  # type: torch.Tensor
        index,  # type: torch.Tensor
        size,  # type: List[int]
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        output_shape=None,  # type: Optional[List[int]]
):
    # type: (...)->torch.Tensor

    if output_shape and output_shape[0] != input.shape[0]:
        output_shape = list(output_shape)
        output_shape[0] = input.shape[0]

    input_shape = list(input.shape)
    rank = len(input_shape)
    spatial_dims = len(input_shape[2:])

    if len(input_shape) not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Desample is only implemented for 3D, 4D, 5D tensors, given: {}D.".
            format(len(input_shape)))

    if size and size[:2] != [1, 1]:
        raise utils.NNEFToolsException(
            "Desample is only implemented for size = 1 in N and C dimensions.")
    if padding and padding[:2] != [(0, 0), (0, 0)]:
        raise utils.NNEFToolsException(
            "Desample is only implemented for padding = (0, 0) in N and C dimensions."
        )
    if stride and stride[:2] != [1, 1]:
        raise utils.NNEFToolsException(
            "Desample is only implemented for stride = 1 in N and C dimensions."
        )
    if dilation and not all(d == 1 for d in dilation):
        raise utils.NNEFToolsException(
            "Desample is only implemented for dilation = 1.")

    stride = [1] * rank if not stride else stride
    dilation = [1] * rank if not dilation else dilation

    if not padding:
        calculated_output_shape = [i * s for i, s in zip(input_shape, stride)]
        padding = shape_inference.same_padding(
            upscaled_input=calculated_output_shape,
            filter=size,
            stride=stride,
            dilation=dilation)
    else:
        calculated_output_shape = shape_inference.sliding_window(
            input=input_shape,
            filter=size,
            padding=padding,
            stride=stride,
            dilation=dilation,
            upscale=True)

    output_shape = output_shape if output_shape else calculated_output_shape
    padded_output_shape = [
        s + p + q for s, (p, q) in zip(output_shape, padding)
    ]
    unpooled = {
        1: F.max_unpool1d,
        2: F.max_unpool2d,
        3: F.max_unpool3d
    }[spatial_dims](input=input,
                    indices=index,
                    kernel_size=size[2:],
                    stride=stride[2:],
                    padding=0,
                    output_size=padded_output_shape)
    return nnef_slice(unpooled,
                      axes=list(range(rank)),
                      begin=[p for p, _q in padding],
                      end=[p + s for (p, _q), s in zip(padding, output_shape)])
Exemplo n.º 12
0
def nnef_deconv(
        input,  # type: torch.Tensor
        filter,  # type: torch.Tensor
        bias,  # type: torch.Tensor
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        output_shape=None,  # type: Optional[List[int]]
        groups=1,  # type: int
):
    # type: (...)->torch.Tensor

    if border != 'constant':
        raise utils.NNEFToolsException(
            "Deconv: '{}' border unsupported.".format(border))

    if output_shape and output_shape[0] != input.shape[0]:
        output_shape = list(output_shape)
        output_shape[0] = input.shape[0]

    rank = len(input.shape)
    if rank not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Deconvolution is only implemented for 3D, 4D, 5D tensors, given: {}D."
            .format(len(input.shape)))

    spatial_dims = len(input.shape[2:])
    stride = [1] * spatial_dims if not stride else stride
    dilation = [1] * spatial_dims if not dilation else dilation

    if groups == 0:
        if output_shape:
            groups = output_shape[1]
        else:
            # Planewise deconvolution without output_size, assuming that #(input channels) = #(output channels)
            groups = filter.shape[0]

    output_channels = filter.shape[1] * groups
    if output_shape:
        assert output_shape[1] == output_channels

    if not padding:
        output_size = output_shape[2:] if output_shape else [
            i * s for i, s in zip(input.shape[2:], stride)
        ]
        padding = shape_inference.same_padding(upscaled_input=output_size,
                                               filter=filter.shape[2:],
                                               stride=stride,
                                               dilation=dilation)
    else:
        output_size = output_shape[
            2:] if output_shape else shape_inference.conv(
                input=list(input.shape),
                filter=filter.shape[2:],
                padding=padding,
                stride=stride,
                dilation=dilation,
                groups=groups,
                output_channels=output_channels,
                format=shape_inference.Format.NCHW,
                deconv=True)[2:]

    uncropped_output_size = shape_inference.conv(
        input=list(input.shape),
        filter=filter.shape[2:],
        padding=shape_inference.Padding.VALID,
        stride=stride,
        dilation=dilation,
        groups=groups,
        output_channels=output_channels,
        format=shape_inference.Format.NCHW,
        deconv=True)[2:]

    crop_before = [p for p, _q in padding]
    crop_after = [
        uncropped - out - before for uncropped, out, before in zip(
            uncropped_output_size, output_size, crop_before)
    ]

    bias = bias.reshape(1, 1).expand(
        (1, output_channels)) if utils.product(bias.size()) == 1 else bias

    deconv = {
        1: F.conv_transpose1d,
        2: F.conv_transpose2d,
        3: F.conv_transpose3d
    }[spatial_dims](input=input,
                    weight=filter,
                    bias=bias.squeeze(dim=0).contiguous(),
                    stride=tuple(stride),
                    padding=0,
                    output_padding=0,
                    groups=groups,
                    dilation=tuple(dilation))

    return nnef_pad(deconv,
                    padding=[(0, 0), (0, 0)] +
                    [(-cb, -ca) for cb, ca in zip(crop_before, crop_after)])
Exemplo n.º 13
0
def partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv,
                                input, filter, bias, output):
    # type: (Converter, NNEFOperation, ONNXGraph, bool, ONNXTensor, ONNXTensor, ONNXTensor, ONNXTensor)->None

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * (input.rank - 2)

    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * (input.rank - 2)

    groups = nnef_op.attribs.get(
        'groups', 1)  # default needed because box does not have groups
    if groups == 0:
        groups = input.shape[1]

    assert nnef_op.attribs['border'] == 'constant'

    if is_deconv:
        pads = nnef_op.attribs['padding']
        if not pads:  # auto pad
            calc_output_size = [i * s for i, s in (input.shape[2:], strides)]
            pads = infer.same_padding(upscaled_input=calc_output_size,
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        else:
            calc_output_size = infer.conv(input=input.shape,
                                          filter=filter.shape[2:],
                                          padding=pads,
                                          stride=strides,
                                          dilation=dilations,
                                          groups=groups,
                                          output_channels=output.shape[1],
                                          format=infer.Format.NCHW,
                                          deconv=True)[2:]
        output_size = output.shape[2:]
        output_padding = [o - c for o, c in zip(output_size, calc_output_size)]
    else:
        pads = nnef_op.attribs['padding']
        if not pads:
            pads = infer.same_padding(upscaled_input=input.shape[2:],
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        output_padding = [0] * len(pads)

    pads = converter.onnx_pads(pads)

    if bias.is_constant and bias.data == [0.0]:
        inputs = (input, filter)
    else:
        if bias.rank == 2:
            assert bias.shape[0] == 1
            bias = converter.add_squeeze(onnx_graph=onnx_graph,
                                         onnx_tensor=bias,
                                         axes=[0])
        inputs = (input, filter, bias)

    op = ONNXOperation(
        graph=onnx_graph,
        name='ConvTranspose' if is_deconv else 'Conv',
        inputs=inputs,
        attribs=dict(
            kernel_shape=filter.
            shape[2:],  # Not mandatory, but Caffe2 fails without this
            strides=strides,
            dilations=dilations,
            pads=pads,
            group=groups),
        outputs=output)

    if is_deconv:
        op.attribs['output_padding'] = output_padding