示例#1
0
def test_r_div_scalar_forward_backward(seed, val, ctx, func_name):
    from nbla_test_utils import function_tester, cap_ignore_region
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 3, (-0.5, 0.5))]
    function_tester(rng, F.r_div_scalar, lambda x, y: y / x, inputs,
                    func_args=[val], dstep=1e-4, atol_b=1e-1,
                    ctx=ctx, func_name=func_name)
示例#2
0
def test_crelu_forward_backward(seed, axis, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2,
            (-1e-3, 1e-3))]
    function_tester(rng, F.crelu, ref_crelu, inputs, func_args=[axis],
                    ctx=ctx, func_name=func_name, atol_b=1e-2)
示例#3
0
文件: test_abs.py 项目: zwsong/nnabla
def test_abs_forward_backward(seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2,
            (-1e-3, 1e-3))]
    function_tester(rng, F.abs, np.abs, inputs,
                    ctx=ctx, func_name=func_name)
示例#4
0
def test_abs_forward_backward(seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = []
    for _ in range(2):
        inputs.append(
            cap_ignore_region(
                rng.randn(2, 3,).astype(np.float32) * 2,
                (-1e-3, 1e-3)))

    function_tester(rng, F.absolute_error, lambda x, y: np.abs(x - y), inputs,
                    ctx=ctx, func_name=func_name,
                    atol_b=1e-2)  # NOTE if atol_b=1e-3, then a numerical error occurs.
示例#5
0
def test_r_div_scalar_forward_backward(seed, val, ctx, func_name):
    from nbla_test_utils import function_tester, cap_ignore_region
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 3, (-0.5, 0.5))
    ]
    function_tester(rng,
                    F.r_div_scalar,
                    lambda x, y: y / x,
                    inputs,
                    func_args=[val],
                    dstep=1e-4,
                    atol_b=1e-1,
                    ctx=ctx,
                    func_name=func_name)
示例#6
0
def test_floor_forward_backward(seed,
                                ctx, func_name):
    from nbla_test_utils import cap_ignore_region, \
        function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2,
            (-1e-3, 1e-3))]

    function_tester(rng, F.floor,
                    ref_floor,
                    inputs,
                    atol_b=1e-3, backward=[True],
                    ctx=ctx, func_name=func_name,
                    ref_grad=ref_grad_floor)
示例#7
0
def test_leaky_relu_double_backward(seed, ctx, func_name, alpha, inplace):
    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]
    backward_function_tester(rng,
                             F.leaky_relu,
                             inputs=inputs,
                             func_args=[alpha, inplace],
                             func_kwargs={},
                             atol_accum=1e-3,
                             dstep=1e-3,
                             backward_b=[True, False],
                             ctx=ctx)
示例#8
0
def test_crelu_double_backward(seed, axis, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]
    backward_function_tester(rng,
                             F.crelu,
                             None,
                             inputs,
                             func_args=[axis],
                             ctx=ctx,
                             func_name=func_name,
                             atol_b=1e-3,
                             atol_accum=1e-3)
示例#9
0
def test_r_sub_scalar_double_backward(seed, val, ctx, func_name):
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(rng.randn(2, 3).astype(np.float32) * 3, (-0.5, 0.5))
    ]
    backward_function_tester(rng,
                             F.r_sub_scalar,
                             None,
                             inputs=inputs,
                             func_args=[val],
                             func_kwargs={},
                             atol_b=1e-3,
                             atol_accum=1e-3,
                             dstep=1e-3,
                             ctx=ctx,
                             func_name=None,
                             disable_half_test=False)
示例#10
0
def test_norm_normalization_forward_backward(eps, axis, p, shape, seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    from sys import platform
    if platform == "darwin":
        pytest.skip("NormNormalization is not supported in macOS.")

    rng = np.random.RandomState(seed)
    inputs = [cap_ignore_region(
        rng.randn(*shape).astype(np.float32) * 2, (-1e-3, 1e-3))]
    func_args = [p, axis, eps]
    function_tester(rng, F.norm_normalization, ref_norm_normalization, inputs,
                    ctx=ctx, func_name=func_name, func_args=func_args, backward=[
                        True], disable_half_test=False,
                    # The backward test on macOS doesn't pass with this tolerance.
                    # Does Eigen library used in CPU computation backend produce
                    # the different results on different platforms?
                    # atol_b=3e-3,
                    atol_b=1e-2, atol_accum=1e-2)
def test_norm_normalization_double_backward(eps, axis, p, shape, seed, ctx,
                                            func_name):
    from sys import platform
    if platform == "darwin":
        pytest.skip("NormNormalization is not supported in macOS.")

    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(*shape).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]
    func_args = [p, axis, eps]
    backward_function_tester(rng,
                             F.norm_normalization,
                             inputs=inputs,
                             func_args=func_args,
                             ctx=ctx)
示例#12
0
def test_hard_tanh_double_backward(seed, ctx, func_name):
    from nbla_test_utils import backward_function_tester, cap_ignore_region
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(rng.randn(2, 3).astype(np.float32), (-0.9, 0.9))
    ]
    backward_function_tester(rng,
                             F.hard_tanh,
                             None,
                             inputs=inputs,
                             func_args=[],
                             func_kwargs={},
                             atol_b=1e-3,
                             atol_accum=1e-3,
                             dstep=1e-3,
                             ctx=ctx,
                             func_name=None,
                             disable_half_test=True)
示例#13
0
def test_relu_double_backward(seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]
    backward_function_tester(rng,
                             F.relu,
                             None,
                             inputs=inputs,
                             func_args=[],
                             func_kwargs={},
                             atol_b=1e-3,
                             atol_accum=1e-3,
                             dstep=1e-3,
                             ctx=ctx,
                             func_name=None,
                             disable_half_test=False)
示例#14
0
def test_dropout_forward_backward(p, seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2, (-1e-3, 1e-3))
    ]  # Ensure there is no zero.
    i = nn.Variable(inputs[0].shape, need_grad=True)
    i.d = inputs[0]
    # NNabla forward
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.dropout(i, p)
    scale = 1. / (1. - p)
    mask = o.d != 0
    assert_allclose(o.d, i.d * mask * scale)
    assert o.parent.name == func_name

    # NNabla backward
    orig_grad = rng.randn(*i.shape).astype(i.data.dtype)
    i.g[...] = orig_grad
    o_grad = rng.randn(*i.shape).astype(i.data.dtype)
    o.backward(o_grad)
    ref_grad = o_grad * mask * scale

    # Verify
    assert_allclose(i.g, orig_grad + ref_grad)

    # Check if accum option works
    i.g[...] = 1
    o.g = o_grad
    o.parent.backward([i], [o], [False])
    assert_allclose(i.g, ref_grad)

    # Check accum=False with NaN gradient
    i.g = np.float32('nan')
    o.parent.backward([i], [o], [False])
    assert not np.any(np.isnan(i.g))

    # Check if need_grad works
    i.g[...] = 0
    i.need_grad = False
    o.backward(o_grad)
    assert np.all(i.g == 0)
示例#15
0
def test_dropout_forward_backward(p, seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = [
        cap_ignore_region(
            rng.randn(2, 3, 4).astype(np.float32) * 2,
            (-1e-3, 1e-3))]  # Ensure there is no zero.
    i = nn.Variable(inputs[0].shape, need_grad=True)
    i.d = inputs[0]
    # NNabla forward
    with nn.context_scope(ctx), nn.auto_forward():
        o = F.dropout(i, p)
    scale = 1. / (1. - p)
    mask = o.d != 0
    assert np.allclose(o.d, i.d * mask * scale)
    assert o.parent.name == func_name

    # NNabla backward
    orig_grad = rng.randn(*i.shape).astype(i.data.dtype)
    i.g[...] = orig_grad
    o_grad = rng.randn(*i.shape).astype(i.data.dtype)
    o.backward(o_grad)
    ref_grad = o_grad * mask * scale

    # Verify
    assert np.allclose(i.g, orig_grad + ref_grad)

    # Check if accum option works
    i.g[...] = 1
    o.g = o_grad
    o.parent.backward([i], [o], [False])
    assert np.allclose(i.g, ref_grad)

    # Check accum=False with NaN gradient
    i.g = np.float32('nan')
    o.parent.backward([i], [o], [False])
    assert not np.any(np.isnan(i.g))

    # Check if need_grad works
    i.g[...] = 0
    i.need_grad = False
    o.backward(o_grad)
    assert np.all(i.g == 0)
def test_abs_forward_backward(seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, function_tester
    rng = np.random.RandomState(seed)
    inputs = []
    for _ in range(2):
        inputs.append(
            cap_ignore_region(
                rng.randn(
                    2,
                    3,
                ).astype(np.float32) * 2, (-1e-3, 1e-3)))

    function_tester(
        rng,
        F.absolute_error,
        lambda x, y: np.abs(x - y),
        inputs,
        ctx=ctx,
        func_name=func_name,
        atol_b=1e-2)  # NOTE if atol_b=1e-3, then a numerical error occurs.
示例#17
0
def test_dropout_double_backward(p, seed, ctx, func_name):
    from nbla_test_utils import cap_ignore_region, backward_function_tester

    rng = np.random.RandomState(seed)
    inpd = cap_ignore_region(
        rng.randn(2, 3, 4).astype(np.float32) * 2,
        (-1e-3, 1e-3))  # Ensure there is no zero.
    inp = nn.Variable.from_numpy_array(inpd).apply(need_grad=True)
    # ONLY test the double backward
    with nn.context_scope(ctx):
        dout = F.dropout(inp, p, seed)
        out = F.sigmoid(dout)

    # Check gradient w.r.t. dy only since no backward w.r.t. x
    grads = nn.grad([out], [inp])
    grad = grads[0]
    grad.forward()
    grad.backward(1.0, clear_buffer=False)
    g_dy = grad.parent.inputs[1].g
    scale = 1. / (1. - p)
    mask = dout.d != 0
    assert np.allclose(g_dy, mask * scale)