Exemple #1
0
def test_average_pooling_2d_double_backward(seed, inshape, kernel, stride, pad, ignore_border,
                                            channel_last,
                                            including_pad, ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.average_pooling import AveragePoolingDataGrad
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border,
                 pad, channel_last, including_pad]
    # 2nd-order
    backward_function_tester(rng, F.average_pooling,
                             inputs=inputs, func_args=func_args,
                             ctx=ctx)
    # 3rd-order
    average_pooling_data_grad, y = grad_function_forward_function_output(AveragePoolingDataGrad,
                                                                         F.average_pooling,
                                                                         ctx, inputs,
                                                                         *func_args)
    average_pooling_data_grad.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng, average_pooling_data_grad,
                             inputs=ginputs, func_args=[],
                             ctx=ctx)
Exemple #2
0
def test_batch_matmul_double_backward(seed, reduce_dim, row_a, col_b,
                                      transpose_a, transpose_b, batch_dims_a,
                                      batch_dims_b, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    if transpose_a:
        shape_a = (reduce_dim, row_a)
    else:
        shape_a = (row_a, reduce_dim)
    if transpose_b:
        shape_b = (col_b, reduce_dim)
    else:
        shape_b = (reduce_dim, col_b)
    shape_a = batch_dims_a + shape_a
    shape_b = batch_dims_b + shape_b

    rng = np.random.RandomState(seed)
    # Input
    inputs = [
        rng.randn(*shape_a).astype(np.float32),
        rng.randn(*shape_b).astype(np.float32),
    ]
    backward_function_tester(rng,
                             F.batch_matmul,
                             inputs,
                             func_args=[transpose_a, transpose_b],
                             atol_accum=1e-1,
                             dstep=1e-3,
                             ctx=ctx)
Exemple #3
0
def test_istft_double_backward(ctx, seed, window_size, stride, fft_size, window_type, center, pad_mode, as_stft_backward):
    backend = ctx.backend[0].split(":")[0]
    if backend == 'cuda':
        pytest.skip('CUDA Convolution N-D is only supported in CUDNN extension')

    if not as_stft_backward:
        if pad_mode != "constant":
            pytest.skip(
                '`pad_mode != "constant"` is only for `as_stft_backward == True`')

    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)

    # Generate istft inputs by calling stft
    x_shape = create_stft_input_shape(window_size)
    stft_input = rng.randn(*x_shape).astype(np.float32)
    y_r, y_i = ref_stft(stft_input, window_size, stride,
                        fft_size, window_type, center, pad_mode, False)
    istft_inputs = [y_r, y_i]

    if not as_stft_backward:
        # Skip for NOLA condition violation
        length = x_shape[1]
        if is_nola_violation(window_type, window_size, stride, fft_size, length, center):
            pytest.skip('NOLA condition violation.')

    rng = np.random.RandomState(seed)
    func_args = [window_size, stride, fft_size,
                 window_type, center, pad_mode, as_stft_backward]
    backward_function_tester(rng, F.istft,
                             inputs=istft_inputs,
                             func_args=func_args,
                             ctx=ctx,
                             atol_accum=6e-2)
Exemple #4
0
def test_broadcast_double_backward(align, ndim, broadcast_dim, seed, fname, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester

    rng = np.random.RandomState(seed)
    shape = rng.randint(2, 5, size=(ndim,))
    inshape = shape.copy()
    inshape[broadcast_dim] = 1

    if ndim == 0:
        # Performing 0-dim array test too.
        inputs = [np.array(rng.randn()).astype("float32")]
        backward_function_tester(rng, F.broadcast,
                                 inputs=inputs,
                                 func_args=[shape], func_kwargs={},
                                 ctx=ctx)

    if not align:
        # Trailing pattern, e.g., inshape = (3, 4), shape = (2, 3, 4)
        if np.all(broadcast_dim) or not np.all(broadcast_dim):
            pytest.skip(
                "All true or all false of broadcast_dim is not needed to test.")

        inshape = inshape[np.logical_not(broadcast_dim)]
        shape1 = shape[broadcast_dim]
        shape0 = shape[np.logical_not(broadcast_dim)]
        shape = shape1 + shape0

    inputs = [np.array(rng.randn(*inshape)).astype("float32")]
    backward_function_tester(rng, F.broadcast, inputs,
                             func_args=[shape], func_kwargs={},
                             dstep=1e-3,
                             ctx=ctx)
Exemple #5
0
def test_deconvolution_2d_double_backward(inshape, kernel, outmaps, pad,
                                          stride, dilation, group, with_bias,
                                          channel_last, output_padding,
                                          seed, ctx, func_name):
    from nbla_test_utils import function_tester, backward_function_tester

    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('channel_last=True is only supported in CUDNN backend.')
    base_axis = len(inshape) - len(kernel) - 1
    inmaps = inshape[base_axis]
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    i = rng.randn(*inshape).astype(np.float32)
    kshape = (inmaps,) + (outmaps // group,) + kernel
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(kshape), len(kernel))
        kshape = tuple(kshape[i] for i in t.inv_axes)
    k = rng.randn(*kshape).astype(np.float32)
    base_axis = len(inshape) - 3
    b = None
    if with_bias:
        b = rng.randn(outmaps).astype(np.float32)
    inputs = [i, k, b]
    func_args = [base_axis, pad, stride, dilation, group, channel_last,
                 output_padding]
    backward_function_tester(rng, F.deconvolution, None, inputs,
                             func_args=func_args,
                             atol_f=1e-4, atol_b=1e-1, atol_accum=1e-1, dstep=1e-3,
                             ctx=ctx, func_name=func_name)
Exemple #6
0
def test_slice_double_backward(seed, inshape, start, stop, step, ctx, fname):
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    rng = np.random.RandomState(seed)
    x = rng.randn(*inshape).astype(np.float32)
    backward_function_tester(rng, F.slice, None, [x], ctx=ctx, func_name=fname,
                             func_args=[start, stop, step], atol_f=1e-4,
                             atol_b=1e-2, atol_accum=1e-2, dstep=1e-4)
Exemple #7
0
def test_max_pooling_3d_double_backward(seed, inshape, kernel, stride, pad,
                                        ignore_border, channel_last, ctx,
                                        func_name):
    # pytest.skip('`>3`-dimension are not supported.')
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    # 2nd-order
    backward_function_tester(rng,
                             F.max_pooling,
                             inputs=inputs,
                             func_args=func_args,
                             ctx=ctx)
    # 3nd-order
    import nnabla as nn
    y = F.max_pooling(nn.Variable(inputs[0].shape), *func_args)
    ginputs = [rng.randn(*y.shape), inputs[0]]
    backward_function_tester(rng,
                             F.max_pooling_backward,
                             inputs=ginputs,
                             func_args=func_args,
                             ctx=ctx,
                             backward=[True, False],
                             non_accum_check=True)
Exemple #8
0
def test_deconvolution_2d_double_backward(inshape, kernel, outmaps, pad,
                                          stride, dilation, group, with_bias,
                                          seed, ctx, func_name):
    from nbla_test_utils import backward_function_tester

    rng = np.random.RandomState(seed)
    i = rng.randn(*inshape).astype(np.float32)
    inmaps = inshape[-3]
    kshape = (inmaps, ) + (outmaps // group, ) + kernel
    k = rng.randn(*kshape).astype(np.float32)
    base_axis = len(inshape) - 3
    b = None
    if with_bias:
        b = rng.randn(outmaps).astype(np.float32) * 1e3  # long tailed
    inputs = [i, k, b]
    backward_function_tester(
        rng,
        F.deconvolution,
        None,
        inputs,
        func_args=[base_axis, pad, stride, dilation, group],
        atol_f=1e-4,
        atol_b=1e-1,
        atol_accum=1e-1,
        dstep=1e-3,
        ctx=ctx,
        func_name=func_name)
Exemple #9
0
def test_average_pooling_3d_double_backward(seed, inshape, kernel, stride, pad,
                                            ignore_border, channel_last,
                                            including_pad, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    if channel_last:
        pytest.skip('Channel last is not supported in the double backward.')
    # if channel_last and not func_name.endswith('Cudnn'):
    #     pytest.skip('Channel last is only supported in Cudnn so far')
    # if channel_last:
    #     t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
    #     inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [
        kernel, stride, ignore_border, pad, channel_last, including_pad
    ]
    backward_function_tester(rng,
                             F.average_pooling,
                             None,
                             inputs=inputs,
                             func_args=func_args,
                             func_name=func_name,
                             ctx=ctx,
                             atol_f=1e-6,
                             atol_b=1e-2,
                             atol_accum=1e-2)
Exemple #10
0
def test_softmax_cross_entropy_double_backward(seed, axis, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    ishape = [2, 3, 4]
    rng = np.random.RandomState(seed)

    l_shape = list(ishape)
    l_shape[axis] = 1
    n_class = ishape[axis]

    inputs = [
        rng.randn(2, 3, 4).astype(np.float32) * 2,
        rng.randint(0, n_class, size=l_shape).astype(np.int)
    ]

    backward_function_tester(rng,
                             F.softmax_cross_entropy,
                             ref_softmax_cross_entropy,
                             inputs,
                             func_args=[axis],
                             backward=[True, False],
                             atol_b=1e-3,
                             atol_accum=1e-3,
                             dstep=1e-3,
                             ctx=ctx,
                             func_name=func_name)
Exemple #11
0
def test_max_pooling_3d_double_backward(seed, inshape, kernel, stride, pad,
                                        ignore_border, channel_last, ctx,
                                        func_name):
    # pytest.skip('`>3`-dimension are not supported.')
    # TODO: some test fail
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    if channel_last:
        pytest.skip('Channel last is not supported in the double backward.')
    # if channel_last and not func_name.endswith('Cudnn'):
    ##     pytest.skip('Channel last is only supported in Cudnn so far')
    # if channel_last:
    ##     t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
    ##     inshape = tuple(inshape[i] for i in t.inv_axes)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    backward_function_tester(rng,
                             F.max_pooling,
                             None,
                             inputs=inputs,
                             func_args=func_args,
                             func_name=func_name,
                             ctx=ctx,
                             atol_f=1e-2,
                             atol_b=1e-1,
                             atol_accum=1e-1,
                             dstep=1e-3)
Exemple #12
0
def test_sum_pooling_3d_double_backward(seed, inshape, kernel, stride, pad, ignore_border, channel_last,
                                        ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.sum_pooling import SumPoolingDataGrad
    if channel_last and not func_name.endswith('Cudnn'):
        pytest.skip('Channel last is only supported in Cudnn so far')
    if channel_last:
        t = refs.ChannelLastToFirstTranspose(len(inshape), len(kernel))
        inshape = tuple(inshape[i] for i in t.inv_axes)
    if not ignore_border and func_name.endswith('Cudnn'):
        pytest.skip('ignore_border=False in Cudnn is not supported.')
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, stride, ignore_border, pad, channel_last]
    # 2nd-order
    backward_function_tester(rng, F.sum_pooling, inputs=inputs,
                             func_args=func_args, ctx=ctx)
    # 3rd-order
    df, y = grad_function_forward_function_output(SumPoolingDataGrad,
                                                  F.sum_pooling,
                                                  ctx, inputs,
                                                  *func_args)
    df.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng, df,
                             inputs=ginputs, ctx=ctx, atol_accum=3e-2, non_accum_check=True)
Exemple #13
0
def test_gru_double_backward(seed, num_layers, dropout, bidirectional, training,
                             seq_len, batch_size, input_size, hidden_size, with_bias, ctx, func_name):
    from nbla_test_utils import backward_function_tester

    with nn.context_scope(ctx):
        rng = np.random.RandomState(seed)
        num_directions = 1
        if bidirectional:
            num_directions = 2
        inputs = [rng.randn(seq_len, batch_size,
                            input_size).astype(np.float32) * 0.1]
        inputs += [rng.randn(num_layers, num_directions,
                             batch_size, hidden_size).astype(np.float32)]
        inputs += [rng.randn(num_directions, 3, hidden_size,
                             input_size + hidden_size)]
        if num_layers > 1:
            inputs += [rng.randn(max(1, num_layers-1), num_directions, 3, hidden_size,
                                 num_directions*hidden_size + hidden_size).astype(np.float32)]
        else:
            inputs += [None]
        if with_bias:
            inputs += [rng.randn(num_layers, num_directions,
                                 4, hidden_size).astype(np.float32)]
        else:
            inputs += [None]

        backward = [False for _ in inputs]
        if training:
            backward = [True for _ in inputs]

        backward_function_tester(rng, F.gru, inputs, func_kwargs=dict(
            num_layers=num_layers, dropout=dropout, bidirectional=bidirectional,
            training=training), atol_f=1e-6, dstep=1e-3, backward=backward,
                                 ctx=ctx, skip_backward_check=True)
Exemple #14
0
def test_unpooling_double_backward(seed, inshape, kernel, channel_last, ctx,
                                   func_name):
    if channel_last and func_name == "Unpooling":
        pytest.skip("Unpooling with channel_last is only supported in CUDA.")
    if channel_last and len(inshape) == len(kernel):
        pytest.skip(
            "len(input shape) == len(kernel) is only valid for the channel first."
        )

    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.unpooling import UnpoolingDataGrad
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [kernel, channel_last]
    # 2nd-order
    backward_function_tester(rng,
                             F.unpooling,
                             inputs=inputs,
                             func_args=func_args,
                             ctx=ctx)
    # 3rd-order
    df, y = grad_function_forward_function_output(UnpoolingDataGrad,
                                                  F.unpooling, ctx, inputs,
                                                  *func_args)
    df.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng,
                             df,
                             inputs=ginputs,
                             ctx=ctx,
                             non_accum_check=True)
Exemple #15
0
def test_fft_double_backward(seed, ctx, func_name, batch_dims,
                             signal_ndim, dims, normalized):

    if func_name == "IFFTCuda" and sys.platform == 'win32':
        from nnabla_ext import cuda
        if cuda._version.__cuda_version__ == '11.4':
            pytest.skip("Skip win32+CUDA114 tests")

    if func_name == "IFFT":
        pytest.skip("Not implemented in CPU.")

    from nbla_test_utils import backward_function_tester, convert_to_float2_array, convert_to_complex_array
    rng = np.random.RandomState(seed)
    shape = batch_dims + dims
    x_data_complex = rng.rand(*shape) + 1j * rng.rand(*shape)
    x_data = convert_to_float2_array(x_data_complex)
    inputs = [x_data]
    func_args = [signal_ndim, normalized]
    backward_function_tester(rng,
                             F.ifft,
                             inputs,
                             func_args=func_args,
                             atol_f=1e-4,
                             atol_accum=5e-2,
                             backward=[True],
                             ctx=ctx)
Exemple #16
0
def test_interpolate_nearest_double_backward(seed, inshape, outsize, scale,
                                             sdim_only, align_corners,
                                             half_pixel, half_pixel_for_nn,
                                             channel_last, ctx, func_name):
    if channel_last and func_name == "Interpolate":
        pytest.skip("Interpolate with channel_last is only supported in CUDA.")
    if sdim_only and channel_last:
        pytest.skip(
            "Interpolate for spatial dimension only data is only supported for channel_first option."
        )

    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [
        scale, outsize, 'nearest', align_corners, half_pixel,
        half_pixel_for_nn, channel_last
    ]
    backward_function_tester(rng,
                             F.interpolate,
                             ref_interpolate,
                             inputs,
                             func_name=func_name,
                             func_args=func_args,
                             atol_f=1e-6,
                             atol_b=1e-2,
                             atol_accum=1e-2,
                             dstep=2e-3,
                             ctx=ctx)
Exemple #17
0
def test_pad_constant_double_backward(seed, ctx, func_name, inshape, pad_width,
                                      constant_value):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.pad import PadDataGrad
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [pad_width, "constant", constant_value]
    # 2nd-order
    backward_function_tester(rng, F.pad, inputs, ctx=ctx, func_args=func_args)

    # 3rd-order
    # constant value is always zero after 1st-order derivative
    func_args = [pad_width, "constant", 0]
    df, y = grad_function_forward_function_output(PadDataGrad, F.pad, ctx,
                                                  inputs, *func_args)
    df.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             ctx=ctx,
                             atol_f=1e-6,
                             atol_accum=5e-2,
                             non_accum_check=True)
Exemple #18
0
def test_transpose_double_backward(seed, inshape, axes, ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.transpose import TransposeDataGrad
    rng = np.random.RandomState(seed)
    # Input
    inputs = [rng.randn(*inshape).astype(np.float32)]
    func_args = [axes]
    # 2rd-order
    backward_function_tester(rng,
                             F.transpose,
                             inputs,
                             func_args=func_args,
                             ctx=ctx)
    # 3rd-order
    df, y = grad_function_forward_function_output(TransposeDataGrad,
                                                  F.transpose, ctx, inputs,
                                                  *func_args)
    df.xshape = inputs[0].shape
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             ctx=ctx,
                             non_accum_check=True)
def test_activation_double_backward(act_name, seed, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    act = getattr(F, act_name)
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]

    backward_function_tester(rng, act, inputs, ctx=ctx)
Exemple #20
0
def test_softmax_double_backward(seed, axis, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
    backward_function_tester(rng, F.softmax, None, inputs, func_args=[axis],
                             ctx=ctx, func_name=func_name,
                             atol_b=1e-4, atol_accum=1e-4, dstep=1e-3)
Exemple #21
0
def test_concatenate_double_backward(seed, axis, different_size, num_inputs,
                                     ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.concatenate import ConcatenateDataGrad
    rng = np.random.RandomState(seed)
    shape0 = [2, 3, 4]
    inputs = []
    for i in range(num_inputs):
        inputs.append(rng.randn(*shape0).astype(np.float32))
        shape0[axis] += int(different_size)
    func_kwargs = dict(axis=axis)

    # 2nd-order
    backward_function_tester(rng,
                             F.concatenate,
                             inputs=inputs,
                             func_args=[],
                             func_kwargs=func_kwargs,
                             atol_accum=1e-2,
                             dstep=1e-3,
                             ctx=ctx)
    # 3rd-order
    df, y = grad_function_forward_function_output(ConcatenateDataGrad,
                                                  F.concatenate, ctx, inputs,
                                                  *[], **func_kwargs)
    df.xshapes = [x.shape for x in inputs]
    ginputs = [rng.randn(*y.shape)]
    backward_function_tester(rng, df, ginputs, ctx=ctx, non_accum_check=True)
Exemple #22
0
def test_embed_double_backward(seed, shape_x, shape_w, ctx, func_name):
    from nbla_test_utils import backward_function_tester, grad_function_forward_function_output
    from nnabla.backward_function.embed import EmbedFilterGrad
    rng = np.random.RandomState(seed)
    n_class = shape_w[0]
    x = rng.randint(0, n_class - 1, shape_x).astype(np.int32)
    w = rng.randn(*shape_w).astype(np.float32)
    inputs = [x, w]
    # Embed
    backward_function_tester(rng,
                             F.embed,
                             inputs,
                             ctx=ctx,
                             backward=[False, True])
    # FilterGrad
    df, y = grad_function_forward_function_output(EmbedFilterGrad, F.embed,
                                                  ctx, inputs)
    df.wshape = inputs[1].shape
    ginputs = [rng.randn(*y.shape), inputs[0]]
    backward_function_tester(rng,
                             df,
                             ginputs,
                             func_args=[],
                             backward=[True, False],
                             atol_accum=3e-2,
                             dstep=1e-3,
                             ctx=ctx,
                             non_accum_check=True)
Exemple #23
0
def test_broadcast_double_backward(ndim, broadcast_dim, seed, fname, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester

    rng = np.random.RandomState(seed)
    shape = rng.randint(2, 5, size=(ndim,))
    inshape = shape.copy()
    inshape[broadcast_dim] = 1
    if ndim == 0:
        # Performing 0-dim array test too.
        inputs = [np.array(rng.randn()).astype("float32")]
        backward_function_tester(rng, F.broadcast, None,
                                 inputs=inputs,
                                 func_args=[shape], func_kwargs={},
                                 atol_b=1e-3,
                                 atol_accum=1e-3,
                                 dstep=1e-3,
                                 ctx=ctx, func_name=None,
                                 disable_half_test=False)

    inputs = [np.array(rng.randn(*inshape)).astype("float32")]
    backward_function_tester(rng, F.broadcast, None,
                             inputs=inputs,
                             func_args=[shape], func_kwargs={},
                             atol_f=1e-3,
                             atol_b=2e-2,
                             atol_accum=2e-2,
                             dstep=1e-3,
                             ctx=ctx, func_name=None,
                             disable_half_test=False)
Exemple #24
0
def test_r_pow_scalar_double_backward(seed, val, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
    backward_function_tester(rng, F.r_pow_scalar, inputs,
                             func_args=[val], atol_accum=3e-2,
                             dstep=1e-3,
                             ctx=ctx)
Exemple #25
0
def test_sign_double_backward(seed, alpha, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]

    backward_function_tester(rng, F.sign, inputs, func_args=[alpha],
                             ctx=ctx,
                             dstep=1e-3)
Exemple #26
0
def test_crelu_double_backward(seed, axis, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]
    backward_function_tester(rng, F.crelu, inputs, func_args=[axis], ctx=ctx)
Exemple #27
0
def test_epsilon_insensitive_loss_double_backward(seed, ctx, func_name, epsilon):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2 for _ in range(2)]
    backward_function_tester(rng, F.epsilon_insensitive_loss,
                             None, inputs,
                             func_args=[epsilon],
                             atol_b=5e-3, atol_accum=5e-3, ctx=ctx, func_name=func_name)
Exemple #28
0
def test_double_backward(seed, ishape, index, oshape, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*ishape).astype(np.float32), np.array(index) * 0.1]
    backward_function_tester(rng, F.scatter_nd, inputs,
                             func_args=[oshape], ctx=ctx, backward=[
                                 True, False],
                             atol_accum=1e-2)
Exemple #29
0
def test_norm_double_backward(seed, p, axis, keepdims, inshape, ctx, func_name):
    from nbla_test_utils import backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(*inshape).astype(np.float32)]
    backward_function_tester(rng, F.norm,
                             inputs=inputs,
                             func_args=[p, axis, keepdims],
                             ctx=ctx)
Exemple #30
0
def test_softplus_double_backward(seed, beta, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [rng.randn(2, 3, 4).astype(np.float32) * 2]
    backward_function_tester(rng,
                             F.softplus,
                             inputs,
                             func_args=[beta],
                             ctx=ctx)