예제 #1
0
def ref_deconvolution_2d(x,
                         w,
                         b,
                         base_axis,
                         pad,
                         stride,
                         dilation,
                         group,
                         channel_last=False,
                         output_padding=(0, 0)):
    if channel_last:
        transpose_x = refs.ChannelLastToFirstTranspose(x.ndim, len(pad))
        transpose_w = refs.ChannelLastToFirstTranspose(w.ndim, len(pad))
        return transpose_x.inv(
            ref_deconvolution_2d(transpose_x(x), transpose_w(w), b, base_axis,
                                 pad, stride, dilation, group, False,
                                 output_padding))

    y = []
    for xx in x.reshape((-1, ) + x.shape[base_axis:]):
        y += [
            refs.deconvolution_2d(xx,
                                  w,
                                  b,
                                  pad,
                                  stride,
                                  dilation,
                                  group,
                                  output_padding=output_padding)[np.newaxis]
        ]
    y = np.vstack(y)
    return y.reshape(x.shape[:base_axis] + y.shape[1:])
예제 #2
0
def test_deconvolution_2d_forward_backward(inshape, kernel, outmaps, pad,
                                           stride, dilation, group, with_bias,
                                           channel_last, output_padding,
                                           seed, ctx, func_name):
    from nbla_test_utils import function_tester

    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('channel_last=True is only supported in CUDNN backend.')
    base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = rng.randn(*inshape).astype(np.float32)
    kshape = (inmaps,) + (outmaps // group,) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = rng.randn(*kshape).astype(np.float32)
    base_axis = len(inshape) - 3
    b = None
    if with_bias:
        b = rng.randn(outmaps).astype(np.float32)
    inputs = [i, k, b]
    func_args = [base_axis, pad, stride, dilation, group, channel_last,
                 output_padding]
    function_tester(rng, F.deconvolution, ref_deconvolution_2d, inputs,
                    func_args=func_args, func_name=func_name, ctx=ctx,
                    atol_f=1e-4, atol_b=1e-2, atol_accum=1e-5, dstep=1e-2)
예제 #3
0
def core_test_convolution_forward_backward(inshape, kernel, outmaps, pad,
                                           stride, dilation, group,
                                           channel_last, with_bias, seed, ctx,
                                           func_name):
    from nbla_test_utils import function_tester
    if func_name == 'ConvolutionCuda':
        pytest.skip(
            'CUDA Convolution N-D is only supported in CUDNN extension')
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip(
            'channel_last=True is only supported in CUDNN backend so far.')
    if channel_last and func_name.endswith('Cudnn') and (
            np.any(np.asarray(dilation) > 1) or group > 1):
        import nnabla_ext.cuda as nc
        major, minor, revision = map(int, nc.__cudnn_version__.split('.'))
        version = major * 1000 + minor * 100
        if version < 7200:
            pytest.skip(
                'channel_last dilated convolution not work in CUDNN {}.'.
                format(version))

    base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = rng.randn(*inshape).astype(np.float32)
    kshape = (outmaps, ) + (inmaps // group, ) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = rng.randn(*kshape).astype(np.float32)
    b = None
    if with_bias:
        b = rng.randn(outmaps).astype(np.float32)
    inputs = [i, k, b]
    atol_half = 1.0 if inmaps > 64 else 1e-1
    function_tester(
        rng,
        F.convolution,
        ref_convolution,
        inputs,
        func_args=[base_axis, pad, stride, dilation, group, channel_last],
        atol_f=1e-4,
        atol_b=1e-2,
        atol_accum=1e-5,
        dstep=1e-2,
        ctx=ctx,
        func_name=func_name,
        atol_half=atol_half)
예제 #4
0
def ref_convolution(x, w, b, base_axis, pad, stride, dilation, group, channel_last):
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(x.ndim, len(pad))
        x = t(x)
        tw = refs.ChannelLastToFirstTranspose(w.ndim, len(pad))
        w = tw(w)
        y = ref_convolution(x, w, b, base_axis, pad,
                            stride, dilation, group, False)
        return t.inv(y)
    y = []
    for xx in x.reshape((-1,) + x.shape[base_axis:]):
        y += [refs.convolution_nd(xx, w, b, pad, stride,
                                  dilation, group)[np.newaxis]]
    y = np.vstack(y)
    return y.reshape(x.shape[:base_axis] + y.shape[1:])
예제 #5
0
def test_sum_pooling_3d_double_backward(seed, inshape, kernel, stride, pad, ignore_border, channel_last,
                                        ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.sum_pooling import SumPoolingDataGrad
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    if not ignore_border and func_name.endswith('Cudnn'):
        pytest.skip('ignore_border=False in Cudnn is not supported.')
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    # 2nd-order
    backward_function_tester(rng, F.sum_pooling, inputs=inputs,
                             func_args=func_args, ctx=ctx)
    # 3rd-order
    df, y = grad_function_forward_function_output(SumPoolingDataGrad,
                                                  F.sum_pooling,
                                                  ctx, inputs,
                                                  *func_args)
    df.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng, df,
                             inputs=ginputs, ctx=ctx, atol_accum=3e-2, non_accum_check=True)
예제 #6
0
def test_max_pooling_3d_double_backward(seed, inshape, kernel, stride, pad,
                                        ignore_border, channel_last, ctx,
                                        func_name):
    # pytest.skip('`>3`-dimension are not supported.')
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    # 2nd-order
    backward_function_tester(rng,
                             F.max_pooling,
                             inputs=inputs,
                             func_args=func_args,
                             ctx=ctx)
    # 3nd-order
    import nnabla as nn
    y = F.max_pooling(nn.Variable(inputs[0].shape), *func_args)
    ginputs = [rng.randn(*y.shape), inputs[0]]
    backward_function_tester(rng,
                             F.max_pooling_backward,
                             inputs=ginputs,
                             func_args=func_args,
                             ctx=ctx,
                             backward=[True, False],
                             non_accum_check=True)
예제 #7
0
def test_average_pooling_2d_double_backward(seed, inshape, kernel, stride, pad, ignore_border,
                                            channel_last,
                                            including_pad, ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.average_pooling import AveragePoolingDataGrad
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border,
                 pad, channel_last, including_pad]
    # 2nd-order
    backward_function_tester(rng, F.average_pooling,
                             inputs=inputs, func_args=func_args,
                             ctx=ctx)
    # 3rd-order
    average_pooling_data_grad, y = grad_function_forward_function_output(AveragePoolingDataGrad,
                                                                         F.average_pooling,
                                                                         ctx, inputs,
                                                                         *func_args)
    average_pooling_data_grad.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng, average_pooling_data_grad,
                             inputs=ginputs, func_args=[],
                             ctx=ctx)
예제 #8
0
def ref_unpooling(x, kernel, channel_last):
    if channel_last:
        n_sdim = len(kernel)
        t = refs.ChannelLastToFirstTranspose(x.ndim, n_sdim)
        x = t(x)
    y = x
    for ind, p in enumerate(kernel[::-1]):
        y = y.repeat(p, axis=y.ndim - (ind + 1))
    if channel_last:
        y = t.inv(y)
    return y
예제 #9
0
def ref_sum_pooling(x, kernel, stride, ignore_border, pad, channel_last):
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(x.ndim, len(kernel))
        x = t(x)
        y = ref_sum_pooling(x, kernel, stride, ignore_border, pad, False)
        return t.inv(y)
    if len(kernel) == 3:
        y = ref_sum_pooling_3d(x, kernel, stride, ignore_border, pad)
        return y
    y = ref_sum_pooling_2d(x, kernel, stride, ignore_border, pad)
    return y
예제 #10
0
def test_sum_pooling_3d(seed, inshape, kernel, stride, pad, ignore_border, channel_last,
                        ctx, func_name):
    from nbla_test_utils import function_tester
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    function_tester(rng, F.sum_pooling, ref_sum_pooling, inputs=inputs,
                    func_args=func_args, func_name=func_name, ctx=ctx,
                    atol_f=1e-6, atol_b=1e-2)
예제 #11
0
def ref_average_pooling(x, kernel, stride, ignore_border, pad, channel_last, including_pad):
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(x.ndim, len(kernel))
        x = t(x)
        y = ref_average_pooling(
            x, kernel, stride, ignore_border, pad, False, including_pad)
        y = t.inv(y)
        return y
    if len(kernel) == 3:
        y = ref_average_pooling_3d(
            x, kernel, stride, ignore_border, pad, including_pad)
        return y
    y = ref_average_pooling_2d(
        x, kernel, stride, ignore_border, pad, including_pad)
    return y
예제 #12
0
def ref_interpolate(x,
                    scale,
                    output_size,
                    mode,
                    align_corners=True,
                    half_pixel=False,
                    half_pixel_for_nn=False,
                    channel_last=False):
    assert scale or output_size, 'Need either scale or output_size.'
    assert not scale or len(scale) in (1, 2, 3), 'Only 1D/2D/3D'
    assert not output_size or len(output_size) in (1, 2, 3), 'Only 1D/2D/3D'

    if channel_last:
        n_sdim = len(scale) if scale else len(output_size)
        t = refs.ChannelLastToFirstTranspose(x.ndim, n_sdim)
        x = t(x)

    if not output_size:
        output_size = np.floor(np.array(scale) * x.shape[-len(scale):])
        output_size = tuple(map(int, output_size))

    if mode == "nearest":
        if len(output_size) == 1:
            out = ref_nearest_interpolate_1d(x, output_size, align_corners,
                                             half_pixel, half_pixel_for_nn)
            out = t.inv(out) if channel_last else out
        if len(output_size) == 2:
            out = ref_nearest_interpolate_2d(x, output_size, align_corners,
                                             half_pixel, half_pixel_for_nn)
            out = t.inv(out) if channel_last else out
        if len(output_size) == 3:
            out = ref_nearest_interpolate_3d(x, output_size, align_corners,
                                             half_pixel, half_pixel_for_nn)
            out = t.inv(out) if channel_last else out
    elif mode == "linear":
        if len(output_size) == 1:
            out = ref_linear_interpolate_1d(x, output_size, align_corners,
                                            half_pixel)

        if len(output_size) == 2:
            out = ref_linear_interpolate_2d(x, output_size, align_corners,
                                            half_pixel)

        if len(output_size) == 3:
            out = ref_linear_interpolate_3d(x, output_size, align_corners,
                                            half_pixel)
        out = t.inv(out) if channel_last else out
    return out
예제 #13
0
def test_max_pooling_3d_double_backward(seed, inshape, kernel, stride, pad, ignore_border, channel_last,
                                        ctx, func_name):
    # pytest.skip('`>3`-dimension are not supported.')
    # TODO: some test fail
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    backward_function_tester(rng, F.max_pooling, None, inputs=inputs,
                             func_args=func_args, func_name=func_name, ctx=ctx,
                             atol_f=1e-2, atol_b=1e-1, atol_accum=1e-1, dstep=1e-3)
예제 #14
0
def ref_interpolate(x, scale, output_size, mode, align_corners=None, channel_last=False):
    assert scale or output_size, 'Need either scale or output_size.'
    assert not scale or len(scale) in (1, 2, 3), 'Only 1D/2D/3D'
    assert not output_size or len(output_size) in (1, 2, 3), 'Only 1D/2D/3D'

    if channel_last:
        n_sdim = len(scale) if scale else len(output_size)
        t = refs.ChannelLastToFirstTranspose(x.ndim, n_sdim)
        x = t(x)

    if not output_size:
        output_size = np.floor(np.array(scale) * x.shape[-len(scale):])
        output_size = tuple(map(int, output_size))

    if mode == "nearest":
        osize = output_size
        isize = x.shape[-len(osize):]
        scale = [i / o for i, o in zip(isize, osize)]
        index = [s * (np.arange(o) + 0.5) for s, o in zip(scale, osize)]
        index = [idx.astype(np.int32) for idx in index]
        index = [np.minimum(idx, i - 1) for idx, i in zip(index, isize)]
        xx = x.reshape(-1, *isize)
        ib = np.arange(xx.shape[0])
        yy = xx[np.ix_(ib, *index)]
        out = yy.reshape(x.shape[:-len(osize)] + osize)
        out = t.inv(out) if channel_last else out
        return out

    elif mode == "linear":
        if len(output_size) == 1:
            out = ref_linear_interpolate_1d(
                x, output_size, mode, align_corners)

        if len(output_size) == 2:
            out = ref_linear_interpolate_2d(
                x, output_size, mode, align_corners)

        if len(output_size) == 3:
            out = ref_linear_interpolate_3d(
                x, output_size, mode, align_corners)
        out = t.inv(out) if channel_last else out
        return out
예제 #15
0
def core_test_convolution_double_backward(inshape,
                                          kernel,
                                          outmaps,
                                          pad,
                                          stride,
                                          dilation,
                                          group,
                                          channel_last,
                                          with_bias,
                                          seed,
                                          ctx,
                                          func_name,
                                          non_accum_check=True,
                                          atol_f=1e-4,
                                          atol_b=1e-3,
                                          atol_accum=8e-2,
                                          dstep=1e-3):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.convolution import ConvolutionDataGrad, ConvolutionFilterGrad
    if func_name == 'ConvolutionCuda':
        pytest.skip(
            'CUDA Convolution N-D is only supported in CUDNN extension')
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip(
            'channel_last=True is only supported in CUDNN backend so far.')
    if channel_last and func_name.endswith('Cudnn') and (
            np.any(np.asarray(dilation) > 1) or group > 1):
        import nnabla_ext.cuda as nc
        major, minor, revision = map(int, nc.__cudnn_version__.split('.'))
        version = major * 1000 + minor * 100
        if version < 7200:
            pytest.skip(
                'channel_last dilated convolution not work in CUDNN {}.'.
                format(version))

    base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = np.clip(rng.randn(*inshape).astype(np.float32), -0.8, 0.8)
    kshape = (outmaps, ) + (inmaps // group, ) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = np.clip(rng.randn(*kshape).astype(np.float32), -0.8, 0.8)
    b = None
    if with_bias:
        b = np.clip(rng.randn(outmaps).astype(np.float32), -0.8, 0.8)
    inputs = [i, k, b]
    atol_half = 1.0 if inmaps > 64 else 1e-1
    func_args = [base_axis, pad, stride, dilation, group, channel_last]
    # Convolution
    backward_function_tester(rng,
                             F.convolution,
                             inputs,
                             func_args=func_args,
                             atol_f=atol_f,
                             atol_accum=atol_accum,
                             dstep=dstep,
                             ctx=ctx)
    # DataGrad
    df, y = grad_function_forward_function_output(ConvolutionDataGrad,
                                                  F.convolution, ctx, inputs,
                                                  *func_args)
    df.xshape = i.shape
    ginputs = [rng.randn(*y.shape), k]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             atol_f=atol_f,
                             atol_b=atol_b,
                             atol_accum=atol_accum,
                             dstep=dstep,
                             ctx=ctx,
                             non_accum_check=non_accum_check)

    # FilterGrad
    df, y = grad_function_forward_function_output(ConvolutionFilterGrad,
                                                  F.convolution, ctx, inputs,
                                                  *func_args)
    df.wshape = k.shape
    ginputs = [rng.randn(*y.shape), i]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             atol_f=atol_f,
                             atol_b=atol_b,
                             atol_accum=atol_accum,
                             dstep=dstep,
                             ctx=ctx,
                             non_accum_check=non_accum_check)
예제 #16
0
def test_deconvolution_2d_double_backward(inshape, kernel, outmaps, pad,
                                          stride, dilation, group, with_bias,
                                          channel_last, output_padding, seed,
                                          ctx, func_name):
    from nbla_test_utils import function_tester, backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.deconvolution import DeconvolutionDataGrad, DeconvolutionFilterGrad

    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('channel_last=True is only supported in CUDNN backend.')
    base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = np.clip(rng.randn(*inshape).astype(np.float32), -0.5, 0.5)
    kshape = (inmaps, ) + (outmaps // group, ) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = np.clip(rng.randn(*kshape).astype(np.float32), -0.5, 0.5)
    base_axis = len(inshape) - 3
    b = None
    if with_bias:
        b = np.clip(rng.randn(outmaps).astype(np.float32), -0.5, 0.5)
    inputs = [i, k, b]
    func_args = [
        base_axis, pad, stride, dilation, group, channel_last, output_padding
    ]
    # Deconvolution
    backward_function_tester(rng,
                             F.deconvolution,
                             inputs,
                             func_args=func_args,
                             ctx=ctx,
                             atol_accum=1e-1)

    # DataGrad
    df, y = grad_function_forward_function_output(DeconvolutionDataGrad,
                                                  F.deconvolution, ctx, inputs,
                                                  *func_args)
    df.xshape = i.shape
    ginputs = [rng.randn(*y.shape), k]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             ctx=ctx,
                             atol_accum=1e-1,
                             non_accum_check=True)

    # FilterGrad
    df, y = grad_function_forward_function_output(DeconvolutionFilterGrad,
                                                  F.deconvolution, ctx, inputs,
                                                  *func_args)
    df.wshape = k.shape
    ginputs = [rng.randn(*y.shape), i]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             ctx=ctx,
                             atol_accum=1e-1,
                             non_accum_check=True)
예제 #17
0
def test_forward_backward_2d(inshape, kernel, out_channels, pad, stride,
                             dilation, group, deformable_group, with_mask,
                             channel_last, with_bias, seed, ctx, func_name):
    if channel_last:
        pytest.skip(
            'channel_last=True is not supported in any backends so far.')

    import platform
    if platform.machine().startswith("arm"):
        pytest.skip('Skip the arm platform temporarily.')

    rng = np.random.RandomState(seed)

    # Create arguments
    base_axis = len(inshape) - len(kernel) - 1
    func_args = [
        base_axis, pad, stride, dilation, group, deformable_group, channel_last
    ]

    # Compute shapes
    in_channels = inshape[base_axis]
    kshape = (out_channels, in_channels // group) + kernel
    offset_channels = 2 * deformable_group * kernel[0] * kernel[1]
    offset_shape = inshape[0:base_axis] + \
        (offset_channels,) + inshape[base_axis + 1:]
    mask_shape = inshape[0:base_axis] + \
        (deformable_group * kernel[0] * kernel[1],) + inshape[base_axis + 1:]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
        t = refs.ChannelLastToFirstTranspose(len(offset_shape), len(kernel))
        offset_shape = tuple(offset_shape[i] for i in t.inv_axes)
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)

    # Create inputs
    x = rng.randn(*inshape).astype(np.float32)
    w = rng.randn(*kshape).astype(np.float32)
    b = rng.randn(out_channels).astype(np.float32) if with_bias else None

    # Because numerical gradient cannot be calculated correctly
    # near the input boundary, offsets are generated to avoid this case.
    # 1. Generate offsets in [-1.9, 1.9].
    offsets = (3.8 * rng.rand(*offset_shape).astype(np.float32)) - 1.9
    # 2. Adhoc remove the values dstep-neighborhood of {-1, 0, 1}; selecting bad
    #    values as 0.1-neighborhood (large enough dstep-neighborhood) and shifting
    #    them +0.5 (must larger than 2 * dstep).
    offsets += np.logical_or(
        np.abs(offsets - np.floor(offsets)) < 0.1,
        np.abs(offsets - np.ceil(offsets)) < 0.1).astype(np.int) * 0.5

    mask = rng.rand(*mask_shape).astype(np.float32) if with_mask else None

    inputs = [x, w, offsets, mask, b]

    # Test
    atol_half = 1.0 if in_channels > 64 else 1.5e-1
    function_tester(rng,
                    F.deformable_convolution,
                    ref_deformable_convolution_2d,
                    inputs,
                    func_args,
                    atol_f=1e-4,
                    atol_b=1e-2,
                    atol_accum=1e-5,
                    dstep=1e-2,
                    ctx=ctx,
                    func_name=func_name,
                    atol_half=atol_half)