예제 #1
0
def convert_box(converter, nnef_op, onnx_graph):
    # type: (Converter, NNEFOperation, ONNXGraph)->None

    input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))

    if nnef_op.attribs['size'] == [1] * input.rank:
        onnx_op = ONNXOperation(graph=onnx_graph,
                                name='Pad',
                                inputs=input,
                                attribs=dict(mode=converter.onnx_pad_mode(nnef_op.attribs['border']),
                                             pads=converter.onnx_pads(nnef_op.attribs['padding'])),
                                outputs=output)

        if onnx_op.attribs['mode'] == 'constant':
            onnx_op.attribs['value'] = 0.0
        return

    if nnef_op.attribs['normalize']:
        partial_convert_pool(converter, nnef_op, onnx_graph,
                             target_name='AveragePool', input=input, outputs=(output,))
    else:
        temporary = ONNXTensor(graph=onnx_graph, shape=list(output.shape), dtype=output.dtype)
        partial_convert_pool(converter, nnef_op, onnx_graph,
                             target_name='AveragePool', input=input, outputs=(temporary,), force_constant=True)
        ONNXOperation(
            graph=onnx_graph,
            name='Mul',
            inputs=(temporary,
                    converter.constant_0d_tensor(onnx_graph, float(np.product(nnef_op.attribs['size'])), 'FLOAT')),
            outputs=output)
예제 #2
0
def partial_convert_pool(converter,
                         nnef_op,
                         onnx_graph,
                         target_name,
                         input,
                         outputs,
                         force_constant=False):
    # type: (Converter, NNEFOperation, ONNXGraph, str, ONNXTensor, typing.Tuple[ONNXTensor, ...], bool)->None
    if nnef_op.name == 'argmax_pool':
        outputs = (ONNXTensor(graph=onnx_graph,
                              shape=list(outputs[0].shape),
                              dtype=input.dtype), ) + tuple(outputs)

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * input.rank
    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * input.rank

    assert nnef_op.attribs['border'] in ['constant', 'ignore']

    pads = nnef_op.attribs['padding']
    if not pads:
        pads = infer.same_padding(upscaled_input=input.shape[2:],
                                  filter=nnef_op.attribs['size'][2:],
                                  stride=strides[2:],
                                  dilation=dilations[2:])

    assert pads[:2] == [
        (0, 0), (0, 0)
    ], "Padding in batch and channel dimensions is not supported in ONNX"
    pads = pads[2:]
    pads = converter.onnx_pads(pads)

    assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \
        'Pooling in batch and channel dimensions is not supported in ONNX'
    strides = strides[2:]
    dilations = dilations[2:]

    assert all(
        d == 1
        for d in dilations), 'Dilation is not supported for pooling in ONNX'

    onnx_op = ONNXOperation(graph=onnx_graph,
                            name=target_name,
                            inputs=input,
                            attribs=dict(
                                kernel_shape=nnef_op.attribs['size'][2:],
                                pads=pads,
                                strides=strides),
                            outputs=outputs)

    if target_name == 'AveragePool':
        onnx_op.attribs['count_include_pad'] = 1 if (
            nnef_op.attribs['border'] == 'constant' or force_constant) else 0
예제 #3
0
def convert_pad(converter, nnef_op, onnx_graph):
    # type: (Converter, NNEFOperation, ONNXGraph)->None

    input, output = converter.converted_tensors((nnef_op.input, nnef_op.output))

    onnx_op = ONNXOperation(graph=onnx_graph,
                            name='Pad',
                            inputs=input,
                            attribs=dict(mode=converter.onnx_pad_mode(nnef_op.attribs['border']),
                                         pads=converter.onnx_pads(nnef_op.attribs['padding'])),
                            outputs=output)

    if onnx_op.attribs['mode'] == 'constant':
        onnx_op.attribs['value'] = 0.0
예제 #4
0
def convert_desample(converter, nnef_op, onnx_graph):
    # type: (Converter, NNEFOperation, ONNXGraph)->None

    input, indices = converter.converted_tensors(nnef_op.inputs)
    output = converter.converted_tensor(nnef_op.output)

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * input.rank
    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * input.rank

    assert nnef_op.attribs['border'] in ['constant', 'ignore']

    pads = nnef_op.attribs['padding']
    if not pads:  # auto pad
        calc_output_size = [i * s for i, s in (input.shape[2:], strides)]
        pads = infer.same_padding(upscaled_input=calc_output_size,
                                  filter=nnef_op.attribs['size'],
                                  stride=strides,
                                  dilation=dilations)

    assert pads[:2] == [
        (0, 0), (0, 0)
    ], "Padding in batch and channel dimensions is not supported in ONNX"
    pads = pads[2:]
    pads = converter.onnx_pads(pads)

    assert nnef_op.attribs['size'][:2] == strides[:2] == dilations[:2] == [1, 1], \
        'Pooling in batch and channel dimensions is not supported in ONNX'
    strides = strides[2:]
    dilations = dilations[2:]

    assert all(
        d == 1
        for d in dilations), 'Dilation is not supported for pooling in ONNX'

    ONNXOperation(graph=onnx_graph,
                  name='MaxUnpool',
                  inputs=(input, indices),
                  attribs=dict(kernel_shape=nnef_op.attribs['size'][2:],
                               pads=pads,
                               strides=strides),
                  outputs=output)
예제 #5
0
def partial_convert_conv_deconv(converter, nnef_op, onnx_graph, is_deconv,
                                input, filter, bias, output):
    # type: (Converter, NNEFOperation, ONNXGraph, bool, ONNXTensor, ONNXTensor, ONNXTensor, ONNXTensor)->None

    strides = list(nnef_op.attribs['stride'])
    if not strides:
        strides = [1] * (input.rank - 2)

    dilations = nnef_op.attribs['dilation']
    if not dilations:
        dilations = [1] * (input.rank - 2)

    groups = nnef_op.attribs.get(
        'groups', 1)  # default needed because box does not have groups
    if groups == 0:
        groups = input.shape[1]

    assert nnef_op.attribs['border'] == 'constant'

    if is_deconv:
        pads = nnef_op.attribs['padding']
        if not pads:  # auto pad
            calc_output_size = [i * s for i, s in (input.shape[2:], strides)]
            pads = infer.same_padding(upscaled_input=calc_output_size,
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        else:
            calc_output_size = infer.conv(input=input.shape,
                                          filter=filter.shape[2:],
                                          padding=pads,
                                          stride=strides,
                                          dilation=dilations,
                                          groups=groups,
                                          output_channels=output.shape[1],
                                          format=infer.Format.NCHW,
                                          deconv=True)[2:]
        output_size = output.shape[2:]
        output_padding = [o - c for o, c in zip(output_size, calc_output_size)]
    else:
        pads = nnef_op.attribs['padding']
        if not pads:
            pads = infer.same_padding(upscaled_input=input.shape[2:],
                                      filter=filter.shape[2:],
                                      stride=strides,
                                      dilation=dilations)
        output_padding = [0] * len(pads)

    pads = converter.onnx_pads(pads)

    if bias.is_constant and bias.data == [0.0]:
        inputs = (input, filter)
    else:
        if bias.rank == 2:
            assert bias.shape[0] == 1
            bias = converter.add_squeeze(onnx_graph=onnx_graph,
                                         onnx_tensor=bias,
                                         axes=[0])
        inputs = (input, filter, bias)

    op = ONNXOperation(
        graph=onnx_graph,
        name='ConvTranspose' if is_deconv else 'Conv',
        inputs=inputs,
        attribs=dict(
            kernel_shape=filter.
            shape[2:],  # Not mandatory, but Caffe2 fails without this
            strides=strides,
            dilations=dilations,
            pads=pads,
            group=groups),
        outputs=output)

    if is_deconv:
        op.attribs['output_padding'] = output_padding