Esempio n. 1
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu and gpu
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be],
                                  5, [dim] * 5, [flag] * 5,
                                  dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert_tensors_allclose(numpy_func_val,
                            backend_func_val,
                            rtol=1e-2,
                            atol=1e-2)
    assert_tensors_allclose(numerical_gradient,
                            autodiff_gradient,
                            rtol=1e-02,
                            atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
Esempio n. 2
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu and gpu
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    assert tensors_allclose(numerical_gradient, autodiff_gradient, rtol=1e-02, atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
Esempio n. 3
0
def test_gradients(custom_args):
    test_idx, f, flag, dim, dtype, backend_type = custom_args

    be = backend_type(default_dtype=dtype)

    # tensors
    tensors = gen_backend_tensors([np, be],
                                  5, [dim] * 5, [flag] * 5,
                                  dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    autodiff_gradient = get_audiff_gradient(f, be, tensors[1])

    # TODO: stricter test to fix numerical issues
    assert_tensors_allclose(numpy_func_val,
                            backend_func_val,
                            rtol=0.,
                            atol=1e-2)
    assert_tensors_allclose(numerical_gradient,
                            autodiff_gradient,
                            rtol=1e-02,
                            atol=1e-3)

    if backend_type is NervanaGPU:
        be.ctx.detach()
    del (be)
Esempio n. 4
0
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 4, [flag] * 4, dtype=dtype)

    # compare function values
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Esempio n. 5
0
def test_vs_numpy(custom_args):
    test_idx, f, flag, dim, dtype, backend_type = custom_args

    # backend
    be = gen_backend(backend_type, default_dtype=dtype)

    # tensors
    tensors = gen_backend_tensors(
        [np, be], 5, [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert_tensors_allclose(
        numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Esempio n. 6
0
def test_gradients(custom_args):
    test_idx, f, flag, dim, dtype, backend_type = custom_args
    be = gen_backend(backend_type, default_dtype=dtype)

    # tensors
    tensors = gen_backend_tensors(
        [np, be], 5, [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    autodiff_gradient = get_audiff_gradient(f, be, tensors[1])

    # TODO: stricter test to fix numerical issues
    assert_tensors_allclose(
        numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    assert_tensors_allclose(
        numerical_gradient, autodiff_gradient, rtol=1e-02, atol=1e-3)