コード例 #1
0
def fc_shape(op, transposed=False):
    # type: (Caffe2Operation, bool)->ShapeResult
    X, W, b = op.inputs
    axis = op.attribs.get('axis', 1)
    axis_w = op.attribs.get('axis_w', 1)

    if not transposed:
        shape = X.shape[:axis] + [utils.product(W.shape[:axis_w])]
    else:
        shape = X.shape[:axis] + [utils.product(W.shape[axis_w:])]

    return shape, op.inputs[0].dtype
コード例 #2
0
def propagate_gemm(op):
    # type: (ONNXOperation)->typing.Tuple[typing.List[typing.List[int]], typing.List[str]]
    A, B = op.inputs[:2]

    assert A.rank >= 2 and B.rank >= 2
    A_shape = [A.shape[0], utils.product(A.shape[1:])]
    B_shape = [B.shape[0], utils.product(B.shape[1:])]
    return [
        infer.matmul(a=A_shape,
                     b=B_shape,
                     transpose_a=bool(op.attribs.get('transA', False)),
                     transpose_b=bool(op.attribs.get('transB', False)))
    ], [A.dtype]
コード例 #3
0
def topk_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    shape = list(op.input.shape)
    shape[-1] = int(op.attribs['k'])

    shapes = (shape, shape, [utils.product(shape)])
    dtypes = (op.input.dtype, DTYPE_INT64, DTYPE_INT64)
    return shapes[:len(op.outputs)], dtypes[:len(op.outputs)]
コード例 #4
0
ファイル: operations.py プロジェクト: stjordanis/NNEF-Tools
def _box_impl(
        input,  # type: torch.Tensor
        size,  # type: List[int]
        border,  # type: str
        padding,  # type: List[Tuple[int, int]]
        stride,  # type: List[int]
        dilation,  # type: List[int]
        normalize,  # type: bool
):
    # type: (...)->torch.Tensor

    assert 3 <= len(input.shape) <= 5
    assert len(input.shape) == len(size) == len(padding) == len(stride) == len(
        dilation)
    assert padding[:2] == [(0, 0), (0, 0)]
    assert size[:2] == stride[:2] == dilation[:2]

    if dilation and any(d != 1 for d in dilation):
        raise utils.NNEFToolsException(
            "Box (avg or sum pooling) is only implemented for dilation = 1.")

    spatial_dims = len(input.shape) - 2

    pad = nnef_pad(input=input,
                   padding=padding,
                   border='constant' if border == 'ignore' else border)

    avg_pool = {
        1: F.avg_pool1d,
        2: F.avg_pool2d,
        3: F.avg_pool3d
    }[spatial_dims](input=pad,
                    kernel_size=size[2:],
                    stride=stride[2:],
                    padding=0)

    if border == 'ignore' and normalize:
        ones = torch.ones_like(input)
        padded_ones = nnef_pad(input=ones, padding=padding, border='constant')
        avg_pool_ones = {
            1: F.avg_pool1d,
            2: F.avg_pool2d,
            3: F.avg_pool3d
        }[spatial_dims](input=padded_ones,
                        kernel_size=size[2:],
                        stride=stride[2:],
                        padding=0)
        # If padding is big, zero averages can happen on the border, don't divide by zero
        avg_pool_ones = nnef_select(avg_pool_ones > 0, avg_pool_ones,
                                    torch.ones_like(avg_pool_ones))
        avg_pool /= avg_pool_ones

    if normalize:
        return avg_pool
    else:
        return avg_pool * utils.product(size)
コード例 #5
0
ファイル: operations.py プロジェクト: stjordanis/NNEF-Tools
def nnef_debox(
        input,  # type: torch.Tensor
        size,  # type: List[int]
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        output_shape=None,  # type: Optional[List[int]]
        normalize=False,  # type: bool
):
    if border not in ('constant', 'ignore'):
        raise utils.NNEFToolsException(
            "Debox: '{}' border unsupported.".format(border))

    if len(size) not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Debox is only implemented for 3D, 4D, 5D tensors, given: {}D.".
            format(len(size)))

    if size[:2] != [1, 1]:
        raise utils.NNEFToolsException(
            "Debox is only implemented for size = 1 in N and C dimensions.")

    if padding and padding[:2] != [(0, 0), (0, 0)]:
        raise utils.NNEFToolsException(
            "Debox is only implemented for padding = (0, 0) in N and C dimensions."
        )
    if stride and stride[:2] != [1, 1]:
        raise utils.NNEFToolsException(
            "Debox is only implemented for stride = 1 in N and C dimensions.")
    if dilation and dilation[:2] != [1, 1]:
        raise utils.NNEFToolsException(
            "Debox is only implemented for dilation = 1 in N and C dimensions."
        )

    filter = torch.full(size=[input.shape[1], 1] + list(size)[2:],
                        fill_value=(1.0 /
                                    utils.product(size) if normalize else 1.0),
                        device=input.device,
                        dtype=input.dtype)
    bias = torch.zeros(size=tuple(), device=input.device, dtype=input.dtype)

    return nnef_deconv(input=input,
                       filter=filter,
                       bias=bias,
                       border='constant',
                       padding=padding[2:] if padding else padding,
                       stride=stride[2:] if stride else stride,
                       dilation=dilation[2:] if dilation else dilation,
                       output_shape=output_shape,
                       groups=input.shape[1])
コード例 #6
0
ファイル: __init__.py プロジェクト: stjordanis/NNEF-Tools
 def _calculate_stat_tensor(torch_tensor):
     # type: (torch.Tensor)->torch.Tensor
     count = utils.product(torch_tensor.shape)
     if count == 0:  # Avoid nans
         return torch.zeros([4],
                            dtype=torch_tensor.dtype,
                            device=torch_tensor.device)
     elif count == 1:  # Avoid nans
         elem = torch.min(torch_tensor)
         return torch.stack((elem, elem, elem, 0.0 * elem))
     else:
         return torch.stack(
             (torch.min(torch_tensor), torch.max(torch_tensor),
              torch.mean(torch_tensor),
              torch.std(torch_tensor, unbiased=True)))
コード例 #7
0
ファイル: operations.py プロジェクト: stjordanis/NNEF-Tools
def nnef_conv(
        input,  # type: torch.Tensor
        filter,  # type: torch.Tensor
        bias,  # type: torch.Tensor
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        groups=1,  # type: int
):
    # type: (...)->torch.Tensor

    if len(input.shape) not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Convolution is only implemented for 3D, 4D, 5D tensors, given: {}D."
            .format(len(input.shape)))

    bias = bias.reshape(1, 1).expand(
        (1, filter.shape[0])) if utils.product(bias.size()) == 1 else bias

    spatial_dims = len(input.shape[2:])
    groups = input.shape[1] if groups == 0 else groups
    stride = [1] * spatial_dims if not stride else stride
    dilation = [1] * spatial_dims if not dilation else dilation
    padding = shape_inference.same_padding(
        upscaled_input=input.shape[2:],
        filter=filter.shape[2:],
        stride=stride,
        dilation=dilation) if not padding else padding

    pad = nnef_pad(input=input, padding=[(0, 0)] * 2 + padding, border=border)
    conv = {
        1: F.conv1d,
        2: F.conv2d,
        3: F.conv3d
    }[spatial_dims](input=pad,
                    weight=filter,
                    bias=bias.squeeze(dim=0).contiguous(),
                    stride=tuple(stride),
                    padding=0,
                    dilation=tuple(dilation),
                    groups=groups)

    return conv
コード例 #8
0
ファイル: operations.py プロジェクト: stjordanis/NNEF-Tools
def _box_or_max_pool(
        input,  # type: torch.Tensor
        size,  # type: List[int]
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]],
        stride=None,  # type: Optional[List[int]],
        dilation=None,  # type: Optional[List[int]]
        normalize=False,  # type: bool
        is_max_pool=False,  # type: bool
):
    assert not (normalize and is_max_pool)

    rank = len(input.shape)
    padding, stride, dilation = _evaluate_max_pool_or_box_params(
        input_shape=list(input.shape),
        size=size,
        padding=padding,
        stride=stride,
        dilation=dilation)
    active = [
        size_ != 1 or padding_ != (0, 0) or stride_ != 1
        or dilation_ != 1 for size_, padding_, stride_, dilation_ in zip(
            size, padding, stride, dilation)
    ]

    if sum(active) == 0:
        return input

    if rank < 3:
        perm, perm_inv, inactive_shape, active_shape = None, None, None, None
    else:
        perm, perm_inv, inactive_shape, active_shape = _get_transform_for_box_or_max_pool(
            list(input.shape), active)

    if rank < 3:
        input = input.unsqueeze(0).unsqueeze(0)
        size = [1, 1] + size
        padding = [(0, 0), (0, 0)] + padding
        stride = [1, 1] + stride
        dilation = [1, 1] + dilation
    elif perm is not None:
        input = input.permute(*perm)
        size = utils.apply_permutation(size, perm)
        padding = utils.apply_permutation(padding, perm)
        stride = utils.apply_permutation(stride, perm)
        dilation = utils.apply_permutation(dilation, perm)

        active_rank = len(active_shape)
        input = input.reshape(*[utils.product(inactive_shape), 1] +
                              active_shape)
        size = [1, 1] + size[-active_rank:]
        padding = [(0, 0), (0, 0)] + padding[-active_rank:]
        stride = [1, 1] + stride[-active_rank:]
        dilation = [1, 1] + dilation[-active_rank:]

    if is_max_pool:
        output = _max_pool_impl(input=input,
                                size=size,
                                border=border,
                                padding=padding,
                                stride=stride,
                                dilation=dilation,
                                with_index=False)
    else:
        output = _box_impl(input=input,
                           size=size,
                           border=border,
                           padding=padding,
                           stride=stride,
                           dilation=dilation,
                           normalize=normalize)

    if rank < 3:
        output = output.squeeze(0).squeeze(0)
    elif perm is not None:
        active_rank = len(active_shape)
        output = output.reshape(inactive_shape +
                                list(output.shape)[-active_rank:])
        output = output.permute(*perm_inv)

    return output
コード例 #9
0
ファイル: operations.py プロジェクト: stjordanis/NNEF-Tools
def nnef_deconv(
        input,  # type: torch.Tensor
        filter,  # type: torch.Tensor
        bias,  # type: torch.Tensor
        border='constant',  # type: str
        padding=None,  # type: Optional[List[Tuple[int, int]]]
        stride=None,  # type: Optional[List[int]]
        dilation=None,  # type: Optional[List[int]]
        output_shape=None,  # type: Optional[List[int]]
        groups=1,  # type: int
):
    # type: (...)->torch.Tensor

    if border != 'constant':
        raise utils.NNEFToolsException(
            "Deconv: '{}' border unsupported.".format(border))

    if output_shape and output_shape[0] != input.shape[0]:
        output_shape = list(output_shape)
        output_shape[0] = input.shape[0]

    rank = len(input.shape)
    if rank not in (3, 4, 5):
        raise utils.NNEFToolsException(
            "Deconvolution is only implemented for 3D, 4D, 5D tensors, given: {}D."
            .format(len(input.shape)))

    spatial_dims = len(input.shape[2:])
    stride = [1] * spatial_dims if not stride else stride
    dilation = [1] * spatial_dims if not dilation else dilation

    if groups == 0:
        if output_shape:
            groups = output_shape[1]
        else:
            # Planewise deconvolution without output_size, assuming that #(input channels) = #(output channels)
            groups = filter.shape[0]

    output_channels = filter.shape[1] * groups
    if output_shape:
        assert output_shape[1] == output_channels

    if not padding:
        output_size = output_shape[2:] if output_shape else [
            i * s for i, s in zip(input.shape[2:], stride)
        ]
        padding = shape_inference.same_padding(upscaled_input=output_size,
                                               filter=filter.shape[2:],
                                               stride=stride,
                                               dilation=dilation)
    else:
        output_size = output_shape[
            2:] if output_shape else shape_inference.conv(
                input=list(input.shape),
                filter=filter.shape[2:],
                padding=padding,
                stride=stride,
                dilation=dilation,
                groups=groups,
                output_channels=output_channels,
                format=shape_inference.Format.NCHW,
                deconv=True)[2:]

    uncropped_output_size = shape_inference.conv(
        input=list(input.shape),
        filter=filter.shape[2:],
        padding=shape_inference.Padding.VALID,
        stride=stride,
        dilation=dilation,
        groups=groups,
        output_channels=output_channels,
        format=shape_inference.Format.NCHW,
        deconv=True)[2:]

    crop_before = [p for p, _q in padding]
    crop_after = [
        uncropped - out - before for uncropped, out, before in zip(
            uncropped_output_size, output_size, crop_before)
    ]

    bias = bias.reshape(1, 1).expand(
        (1, output_channels)) if utils.product(bias.size()) == 1 else bias

    deconv = {
        1: F.conv_transpose1d,
        2: F.conv_transpose2d,
        3: F.conv_transpose3d
    }[spatial_dims](input=input,
                    weight=filter,
                    bias=bias.squeeze(dim=0).contiguous(),
                    stride=tuple(stride),
                    padding=0,
                    output_padding=0,
                    groups=groups,
                    dilation=tuple(dilation))

    return nnef_pad(deconv,
                    padding=[(0, 0), (0, 0)] +
                    [(-cb, -ca) for cb, ca in zip(crop_before, crop_after)])
コード例 #10
0
def convert_inner_product(converter, caffe_op, nnef_graph):
    # type: (Converter, CaffeOperation, NNEFGraph)->None

    input, weight = converter.converted_tensors(caffe_op.inputs[:2])
    output = converter.converted_tensor(caffe_op.output)

    transpose_a = False
    transpose_b = not caffe_op.attribs['transpose']

    axis = converter.nnef_axis(caffe_op.attribs["axis"], input.rank)

    if weight.rank == 4 and weight.shape[:2] == [1, 1]:
        weight.shape = weight.shape[2:]
        weight.data = weight.data.reshape(weight.shape)

    if axis > 1:
        weight.shape = [1] * (axis - 1) + weight.shape
        weight.data = weight.data.reshape(weight.shape)

    if axis != input.rank - 1:
        reshape_output = NNEFTensor(graph=nnef_graph,
                                    shape=[1] * int(axis == 0) +
                                    input.shape[:axis] +
                                    [utils.product(input.shape[axis:])],
                                    dtype=input.dtype)
        NNEFOperation(graph=nnef_graph,
                      name="reshape",
                      inputs=input,
                      outputs=reshape_output,
                      attribs=dict(shape=[1] * int(axis == 0) +
                                   input.shape[:axis] + [-1]))

        input = reshape_output

    if caffe_op.attribs["bias_term"] and transpose_b and axis == 1:
        bias = converter.converted_tensor(caffe_op.inputs[2])
        assert bias.rank == 1 or (bias.rank == 4
                                  and bias.shape[:3] == [1, 1, 1])
        bias.shape = [1, bias.shape[-1]]
        bias.data = bias.data.reshape(bias.shape)
        if axis > 1:
            bias.shape = [1] * (axis - 1) + bias.shape
            bias.data = bias.data.reshape(bias.shape)

        NNEFOperation(graph=nnef_graph,
                      name="linear",
                      inputs=(input, weight, bias),
                      outputs=output)
    elif caffe_op.attribs["bias_term"]:
        matmul_output = NNEFTensor(graph=nnef_graph,
                                   shape=infer.matmul(a=input.shape,
                                                      b=weight.shape,
                                                      transpose_a=transpose_a,
                                                      transpose_b=transpose_b),
                                   dtype=input.dtype)

        add_output = NNEFTensor(graph=nnef_graph,
                                shape=list(matmul_output.shape),
                                dtype=input.dtype) if axis == 0 else output

        NNEFOperation(graph=nnef_graph,
                      name="matmul",
                      inputs=(input, weight),
                      outputs=matmul_output,
                      attribs=dict(transposeA=transpose_a,
                                   transposeB=transpose_b))

        bias = converter.converted_tensor(caffe_op.inputs[2])
        assert bias.rank == 1 or (bias.rank == 4
                                  and bias.shape[:3] == [1, 1, 1])
        bias.shape = [1, bias.shape[-1]]
        bias.data = bias.data.reshape(bias.shape)
        if axis > 1:
            bias.shape = [1] * (axis - 1) + bias.shape
            bias.data = bias.data.reshape(bias.shape)

        NNEFOperation(graph=nnef_graph,
                      name="add",
                      inputs=(matmul_output, bias),
                      outputs=add_output)
        if axis == 0:
            NNEFOperation(graph=nnef_graph,
                          name="unsqueeze",
                          inputs=add_output,
                          outputs=output,
                          attribs=dict(axes=[0]))
    else:
        matmul_output = NNEFTensor(graph=nnef_graph,
                                   shape=infer.matmul(a=input.shape,
                                                      b=weight.shape,
                                                      transpose_a=transpose_a,
                                                      transpose_b=transpose_b),
                                   dtype=input.dtype) if axis == 0 else output

        NNEFOperation(graph=nnef_graph,
                      name="matmul",
                      inputs=(input, weight),
                      outputs=matmul_output,
                      attribs=dict(transposeA=transpose_a,
                                   transposeB=transpose_b))

        if axis == 0:
            NNEFOperation(graph=nnef_graph,
                          name="unsqueeze",
                          inputs=matmul_output,
                          outputs=output,
                          attribs=dict(axes=[0]))
コード例 #11
0
def flatten_to_2d(shape, axis):
    return [utils.product(shape[:axis]), utils.product(shape[axis:])]
コード例 #12
0
def flatten_to_vec_shape(op):
    # type: (Caffe2Operation)->ShapeResult
    return [utils.product(op.inputs[0].shape)], op.inputs[0].dtype