Example #1
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu, gpu, and mkl
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert tensors_allclose(numpy_func_val,
                            backend_func_val,
                            rtol=1e-2,
                            atol=1e-2)
    assert tensors_allclose(numerical_gradient,
                            autodiff_gradient,
                            rtol=1e-02,
                            atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
Example #2
0
def test_gradients(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend_tests fixture will parameterize over cpu, gpu, and mkl
    # backends as well as float16 and float32
    # pull the be and dtype from the actions of the fixture
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])
    numerical_gradient = get_numerical_gradient(f, tensors[0])
    ad = get_audiff_gradient(f, be, tensors[1])
    autodiff_gradient = ad.get_grad_asnumpyarray(tensors[1])

    # TODO: stricter test to fix numerical issues
    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    assert tensors_allclose(numerical_gradient, autodiff_gradient, rtol=1e-02, atol=1e-3)

    # cleanup diff tree
    ad.cleanup()
    dtype = None
    be = None
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 4, [flag] * 4, dtype=dtype)

    # compare function values
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Example #4
0
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 4, [flag] * 4, dtype=dtype)

    # compare function values
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Example #5
0
    def download(self, download_func, request):
        """download
        """
        def process_request(request):
            """ process request """
            for method in self.methods["process_request"]:
                method(request)
            return download_func(request)

        def process_response(response):
            """ process response """
            for method in self.methods["process_response"]:
                response = method(request, response)
                if isinstance(response, Request):
                    return response
            return response

        def process_exception(exception):
            """ process exception """
            for method in self.methods["process_exception"]:
                response = method(request, exception)
                if response:
                    return response
            return exception

        return call_func(process_request, process_exception, process_response,
                         request)
    def download(self, download_func, request):
        """download
        """
        def process_request(request):
            """ process request """
            for method in self.methods["process_request"]:
                method(request)
            return download_func(request)

        def process_response(response):
            """ process response """
            for method in self.methods["process_response"]:
                response = method(request, response)
                if isinstance(response, Request):
                    return response
            return response

        def process_exception(exception):
            """ process exception """
            for method in self.methods["process_exception"]:
                response = method(request, exception)
                if response:
                    return response
            return exception

        return call_func(process_request, process_exception,
                         process_response, request)
Example #7
0
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    try:
        assert tensors_allclose(numpy_func_val,
                                backend_func_val,
                                rtol=1e-2,
                                atol=1e-2)
    except AssertionError:
        # xfail for gpu backend on TITAN XP platforms
        if isinstance(NervanaObject.be, NervanaGPU):

            if os.getenv("PLATFORM"):
                platform = os.getenv("PLATFORM")
            else:
                if os.path.exists("/usr/bin/nvidia-smi"):
                    cmd = '/usr/bin/nvidia-smi -q | grep "Product Name" | tail -1 | cut -f 2 -d \':\' | \
                           cut -f 2,3 -d \' \''

                    gpu_info = subp.check_output(cmd, shell=True)
                else:
                    gpu_info = "unknown"

            if gpu_info == 'TITAN Xp\n':
                platform = "TITANXP"

            if platform == 'TITANXP':
                pytest.xfail(reason="xfail issue #854 with {} PLATFORM".format(
                    platform))
            else:
                assert tensors_allclose(numpy_func_val,
                                        backend_func_val,
                                        rtol=1e-2,
                                        atol=1e-2)
def test_vs_numpy(backend_tests, custom_args):
    test_idx, f, flag, dim = custom_args

    # backend
    be = NervanaObject.be
    dtype = be.default_dtype

    # tensors
    tensors = gen_backend_tensors([np, be], [dim] * 5, [flag] * 5, dtype=dtype)

    # compare function value and gradient
    numpy_func_val = call_func(f, np, tensors[0])
    backend_func_val = call_func(f, be, tensors[1])

    try:
        assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
    except AssertionError:
        # xfail for gpu backend on TITAN XP platforms
        if isinstance(NervanaObject.be, NervanaGPU):

            if os.getenv("PLATFORM"):
                platform = os.getenv("PLATFORM")
            else:
                if os.path.exists("/usr/bin/nvidia-smi"):
                    cmd = '/usr/bin/nvidia-smi -q | grep "Product Name" | tail -1 | cut -f 2 -d \':\' | \
                           cut -f 2,3 -d \' \''
                    gpu_info = subp.check_output(cmd, shell=True)
                else:
                    gpu_info = "unknown"

            if gpu_info == 'TITAN Xp\n':
                platform = "TITANXP"

            if platform == 'TITANXP':
                pytest.xfail(reason="xfail issue #854 with {} PLATFORM".format(platform))
            else:
                assert tensors_allclose(numpy_func_val, backend_func_val, rtol=1e-2, atol=1e-2)
Example #9
0
 def handle_noargs(self, **options):
     for d in settings.DAEMON_FNS:
         print "Starting daemon %s" % d
         call_func(d)
     reactor.run()