Esempio n. 1
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu, gpu, and mkl
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    assert tensors_allclose(numerical_gradient, autodiff_gradient, rtol=1e-02, atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
Esempio n. 2
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu, gpu, and mkl
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert tensors_allclose(numpy_func_val,
                            backend_func_val,
                            rtol=1e-2,
                            atol=1e-2)
    assert tensors_allclose(numerical_gradient,
                            autodiff_gradient,
                            rtol=1e-02,
                            atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 4, [flag] * 4, dtype=dtype)

    # compare function values
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Esempio n. 4
0
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 4, [flag] * 4, dtype=dtype)

    # compare function values
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Esempio n. 5
0
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    try:
        assert tensors_allclose(numpy_func_val,
                                backend_func_val,
                                rtol=1e-2,
                                atol=1e-2)
    except AssertionError:
        # xfail for gpu backend on TITAN XP platforms
        if isinstance(NervanaObject.be, NervanaGPU):

            if os.getenv("PLATFORM"):
                platform = os.getenv("PLATFORM")
            else:
                if os.path.exists("/usr/bin/nvidia-smi"):
                    cmd = '/usr/bin/nvidia-smi -q | grep "Product Name" | tail -1 | cut -f 2 -d \':\' | \
                           cut -f 2,3 -d \' \''

                    gpu_info = subp.check_output(cmd, shell=True)
                else:
                    gpu_info = "unknown"

            if gpu_info == 'TITAN Xp\n':
                platform = "TITANXP"

            if platform == 'TITANXP':
                pytest.xfail(reason="xfail issue #854 with {} PLATFORM".format(
                    platform))
            else:
                assert tensors_allclose(numpy_func_val,
                                        backend_func_val,
                                        rtol=1e-2,
                                        atol=1e-2)
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    try:
        assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    except AssertionError:
        # xfail for gpu backend on TITAN XP platforms
        if isinstance(NervanaObject.be, NervanaGPU):

            if os.getenv("PLATFORM"):
                platform = os.getenv("PLATFORM")
            else:
                if os.path.exists("/usr/bin/nvidia-smi"):
                    cmd = '/usr/bin/nvidia-smi -q | grep "Product Name" | tail -1 | cut -f 2 -d \':\' | \
                           cut -f 2,3 -d \' \''
                    gpu_info = subp.check_output(cmd, shell=True)
                else:
                    gpu_info = "unknown"

            if gpu_info == 'TITAN Xp\n':
                platform = "TITANXP"

            if platform == 'TITANXP':
                pytest.xfail(reason="xfail issue #854 with {} PLATFORM".format(platform))
            else:
                assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)