示例#1
0
def test_gradients_complex(ffmt, integrator, use_richardson_extrapolation,
                           device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    class NNController(torch.nn.Module):
        def __init__(self,
                     in_dim=2,
                     out_dim=2,
                     inter_dim=50,
                     append_time=False):
            super().__init__()

            self.append_time = append_time

            self.net = torch.nn.Sequential(
                torch.nn.Linear(in_dim + (1 if append_time else 0), inter_dim),
                torch.nn.Softplus(), torch.nn.Linear(inter_dim, out_dim),
                torch.nn.Sigmoid())

            for idx, m in enumerate(self.net.modules()):
                if isinstance(m, torch.nn.Linear):
                    torch.nn.init.xavier_normal_(m.weight, gain=1.0)
                    torch.nn.init.constant_(m.bias, 0.0)

        def forward(self, t, y, dy):
            if self.append_time:
                return self.net(
                    torch.cat([y.view(-1), dy.view(-1),
                               t.view(-1)]))
            else:
                return self.net(torch.cat([y, dy]))

    class SimpleODE(torch.nn.Module):
        def __init__(self, inter_dim=10, k=1.0):
            super().__init__()
            self.nn_controller = NNController(in_dim=4,
                                              out_dim=1,
                                              inter_dim=inter_dim)
            self.A = torch.nn.Parameter(
                torch.tensor([[0.0, 1.0], [-k, -1.0]], requires_grad=False))

        def forward(self, t, y, params=None):
            if not isinstance(t, torch.Tensor):
                torch_t = torch.tensor(t)
            else:
                torch_t = t
            if not isinstance(y, torch.Tensor):
                torch_y = torch.tensor(y)
            else:
                torch_y = y
            if params is not None:
                if not isinstance(params, torch.Tensor):
                    torch_params = torch.tensor(params)
                else:
                    torch_params = params

            dy = torch.matmul(self.A, torch_y)

            controller_effect = self.nn_controller(
                torch_t, torch_y, dy) if params is None else params

            return dy + torch.cat(
                [torch.tensor([0.0]).to(dy), (controller_effect * 2.0 - 1.0)])

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        yi1 = D.array([1.0, 0.0], requires_grad=True).to(device)
        df = SimpleODE(k=1.0)

        a = de.OdeSystem(df,
                         yi1,
                         t=(0, 0.1),
                         dt=1e-3,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        dyfdyi = D.jacobian(a.y[-1], a.y[0])
        dyi = D.array([0.0, 1.0]).to(device) * D.epsilon()**0.5
        dyf = D.einsum("nk,k->n", dyfdyi, dyi)
        yi2 = yi1 + dyi

        print(a.y[-1].device)

        b = de.OdeSystem(df,
                         yi2,
                         t=(0, a.t[-1]),
                         dt=a.dt,
                         rtol=a.rtol,
                         atol=a.atol)
        b.set_method(method)
        b.integrate(eta=True)

        true_diff = b.y[-1] - a.y[-1]

        print(D.norm(true_diff - dyf), D.epsilon()**0.5)

        assert (D.allclose(true_diff, dyf, rtol=4 * a.rtol, atol=4 * a.atol))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
def test_gradients():
    for ffmt in D.available_float_fmt():
        D.set_float_fmt(ffmt)

        print("Testing {} float format".format(D.float_fmt()))

        import torch

        torch.set_printoptions(precision=17)
        torch.set_num_threads(1)

        torch.autograd.set_detect_anomaly(True)

        class NNController(torch.nn.Module):
            def __init__(self,
                         in_dim=2,
                         out_dim=2,
                         inter_dim=50,
                         append_time=False):
                super().__init__()

                self.append_time = append_time

                self.net = torch.nn.Sequential(
                    torch.nn.Linear(in_dim + (1 if append_time else 0),
                                    inter_dim), torch.nn.Softplus(),
                    torch.nn.Linear(inter_dim, out_dim), torch.nn.Sigmoid())

                for idx, m in enumerate(self.net.modules()):
                    if isinstance(m, torch.nn.Linear):
                        torch.nn.init.xavier_normal_(m.weight, gain=1.0)
                        torch.nn.init.constant_(m.bias, 0.0)

            def forward(self, t, y, dy):
                if self.append_time:
                    return self.net(
                        torch.cat([y.view(-1),
                                   dy.view(-1),
                                   t.view(-1)]))
                else:
                    return self.net(torch.cat([y, dy]))

        class SimpleODE(torch.nn.Module):
            def __init__(self, inter_dim=10, k=1.0):
                super().__init__()
                self.nn_controller = NNController(in_dim=4,
                                                  out_dim=1,
                                                  inter_dim=inter_dim)
                self.A = torch.tensor([[0.0, 1.0], [-k, -1.0]],
                                      requires_grad=True)

            def forward(self, t, y, params=None):
                if not isinstance(t, torch.Tensor):
                    torch_t = torch.tensor(t)
                else:
                    torch_t = t
                if not isinstance(y, torch.Tensor):
                    torch_y = torch.tensor(y)
                else:
                    torch_y = y
                if params is not None:
                    if not isinstance(params, torch.Tensor):
                        torch_params = torch.tensor(params)
                    else:
                        torch_params = params

                dy = torch.matmul(self.A, torch_y)

                controller_effect = self.nn_controller(
                    torch_t, torch_y, dy) if params is None else params

                return dy + torch.cat(
                    [torch.tensor([0.0]), (controller_effect * 2.0 - 1.0)])

        with de.utilities.BlockTimer(section_label="Integrator Tests"):
            for i in sorted(set(de.available_methods(False).values()),
                            key=lambda x: x.__name__):
                try:
                    yi1 = D.array([1.0, 0.0], requires_grad=True)
                    df = SimpleODE(k=1.0)

                    a = de.OdeSystem(df,
                                     yi1,
                                     t=(0, 1.),
                                     dt=0.0675,
                                     rtol=D.epsilon()**0.5,
                                     atol=D.epsilon()**0.5)
                    a.set_method(i)
                    a.integrate(eta=True)

                    dyfdyi = D.jacobian(a.y[-1], a.y[0])
                    dyi = D.array([0.0, 1.0]) * D.epsilon()**0.5
                    dyf = D.einsum("nk,k->n", dyfdyi, dyi)
                    yi2 = yi1 + dyi

                    b = de.OdeSystem(df,
                                     yi2,
                                     t=(0, 1.),
                                     dt=0.0675,
                                     rtol=D.epsilon()**0.5,
                                     atol=D.epsilon()**0.5)
                    b.set_method(i)
                    b.integrate(eta=True)

                    true_diff = b.y[-1] - a.y[-1]

                    print(D.norm(true_diff - dyf), D.epsilon()**0.5)

                    assert (D.allclose(true_diff,
                                       dyf,
                                       rtol=4 * D.epsilon()**0.5,
                                       atol=4 * D.epsilon()**0.5))
                    print("{} method test succeeded!".format(a.integrator))
                except:
                    raise RuntimeError(
                        "Test failed for integration method: {}".format(
                            a.integrator))
            print("")

        print("{} backend test passed successfully!".format(D.backend()))
示例#3
0
def test_einsum_within_tolerance(ffmt):
    D.set_float_fmt(ffmt)
    assert (-2 * D.epsilon() <= D.einsum(
        "nm->", D.array([[1.0, 2.0], [-2.0, -1.0]])) <= 2 * D.epsilon())
示例#4
0
def test_backend():
    try:
        import desolver as de
        import desolver.backend as D
        import numpy as np
        import scipy
        
        if "DES_BACKEND" in os.environ:
            assert(D.backend() == os.environ['DES_BACKEND'])
        
        if D.backend() not in ['torch']:
            # Default datatype test
            for i in D.available_float_fmt():
                D.set_float_fmt(i)
                assert(D.array(1.0).dtype  == D.float_fmts[D.float_fmt()])

        expected_eps = {'float16': 5e-3, 'float32': 5e-7, 'float64': 5e-16, 'gdual_double': 5e-16, 'gdual_vdouble': 5e-16, 'gdual_real128': 5e-16}
        test_array   = np.array([1], dtype=np.int64)
        # Test Function Evals
        for i in D.available_float_fmt():
            D.set_float_fmt(i)
            assert(D.float_fmt() == str(i))
            assert(D.epsilon() == expected_eps[str(i)])
            assert(isinstance(D.available_float_fmt(), list))
            if not i.startswith('gdual'):
                assert(D.cast_to_float_fmt(test_array).dtype == str(i))
            
            arr1 = D.array([[2.0, 1.0],[1.0, 0.0]])
            arr2 = D.array([[1.0, 1.0],[-1.0, 1.0]])
            
            if not i.startswith('gdual'):
                arr3 = D.contract_first_ndims(arr1, arr2, 1)
                arr4 = D.contract_first_ndims(arr1, arr2, 2)
            
                true_arr3 = D.array([1.0, 1.0])
                true_arr4 = D.array(2.)

                assert(D.norm(arr3 - true_arr3) <= 2 * D.epsilon())
                assert(D.norm(arr4 - true_arr4) <= 2 * D.epsilon())
            
            de.utilities.warning("Testing float format {}".format(D.float_fmt()))
            
            pi = D.to_float(D.pi)
            
            assert(np.pi - 2*D.epsilon()          <= pi                        <= np.pi + 2*D.epsilon())
            assert(np.e - 2*D.epsilon()           <= D.to_float(D.e)           <= np.e + 2*D.epsilon())
            assert(np.euler_gamma - 2*D.epsilon() <= D.to_float(D.euler_gamma) <= np.euler_gamma + 2*D.epsilon())
            
            assert(-2*D.epsilon() <= D.sin(pi) <= 2*D.epsilon())
            assert(-2*D.epsilon() <= D.cos(pi)+1 <= 2*D.epsilon())
            assert(-2*D.epsilon() <= D.tan(pi) <= 2*D.epsilon())

            assert(D.asin(D.to_float(1)) == pi/2)
            assert(D.acos(D.to_float(1)) == 0)
            assert(D.atan(D.to_float(1)) == pi/4)
            assert(D.atan2(D.to_float(1), D.to_float(1)) == pi/4)

            assert(D.sinh(pi)        == np.sinh(pi))
            assert(D.cosh(pi)        == np.cosh(pi))
            assert(D.tanh(pi)        == np.tanh(pi))

            assert(-3.141592653589793 - 2*D.epsilon()  <= D.neg(pi)   <= -3.141592653589793 + 2*D.epsilon())
            assert(31.00627668029982 - 10*D.epsilon()  <= D.pow(pi,3) <= 31.00627668029982 + 10*D.epsilon())
            assert(3.141592653589793 - 2*D.epsilon()   <= D.abs(pi)   <= 3.141592653589793 + 2*D.epsilon())
            assert(1.77245385090551603 - 2*D.epsilon() <= D.sqrt(pi)  <= 1.77245385090551603 + 2*D.epsilon())
            assert(23.1406926327792690 - 10*D.epsilon()<= D.exp(pi)   <= 23.1406926327792690 + 10*D.epsilon())
            assert(22.1406926327792690 - 10*D.epsilon()<= D.expm1(pi) <= 22.1406926327792690 + 10*D.epsilon())
            assert(1.14472988584940017 - 2*D.epsilon() <= D.log(pi)   <= 1.14472988584940017 + 2*D.epsilon())
            assert(1.14472988584940017 - 2*D.epsilon() <= D.log(pi)   <= 1.14472988584940017 + 2*D.epsilon())
            assert(0.49714987269413385 - 2*D.epsilon() <= D.log10(pi) <= 0.49714987269413385 + 2*D.epsilon())
            assert(1.42108041279429263 - 2*D.epsilon() <= D.log1p(pi) <= 1.42108041279429263 + 2*D.epsilon())
            assert(1.65149612947231880 - 2*D.epsilon() <= D.log2(pi)  <= 1.65149612947231880 + 2*D.epsilon())

            assert(4.14159265358979324 - 2*D.epsilon() <= D.add(pi,1) <= 4.14159265358979324 + 2*D.epsilon())
            assert(2.14159265358979324 - 2*D.epsilon() <= D.sub(pi,1) <= 2.14159265358979324 + 2*D.epsilon())
            assert(D.div(pi,1)       == pi)
            assert(D.mul(pi,1)       == pi)

            assert(0.31830988618379067 - 2*D.epsilon() <= D.reciprocal(pi)  <= 0.31830988618379067 + 2*D.epsilon())
            
            if not i.startswith('gdual'):
                assert(0.14159265358979324 - 2*D.epsilon() <= D.remainder(pi,3) <= 0.14159265358979324 + 2*D.epsilon())
                assert(D.ceil(pi)        == 4)
                assert(D.floor(pi)       == 3)
                assert(D.round(pi)       == 3)
                assert(1.1415926535897931 - 2*D.epsilon()  <= D.fmod(pi,2) <= 1.1415926535897931 + 2*D.epsilon())

                assert(D.clip(pi,1,2)    == 2)
                assert(D.sign(pi)        == 1)
                assert(D.trunc(pi)       == 3)

                assert(0.9772133079420067 - 2*D.epsilon()    <= D.digamma(pi)             <= 0.9772133079420067 + 2*D.epsilon())
                assert(0.4769362762044699 - 2*D.epsilon()    <= D.erfinv(D.to_float(0.5)) <= 0.4769362762044699 + 2*D.epsilon())
                assert(1.7891115385869942 - 2*D.epsilon()    <= D.mvlgamma(pi, 2)         <= 1.7891115385869942 + 2*D.epsilon())
                assert(D.frac(pi)            == pi - 3)
                
            assert(0.9999911238536324 - 2*D.epsilon()    <= D.erf(pi)                 <= 0.9999911238536324 + 2*D.epsilon())
            assert(8.8761463676416054e-6 - 2*D.epsilon() <= D.erfc(pi)                <= 8.8761463676416054e-6 + 2*D.epsilon())
            
            assert(0.9585761678336372 - 2*D.epsilon()    <= D.sigmoid(pi)             <= 0.9585761678336372 + 2*D.epsilon())

            assert(0.5641895835477563 - 2*D.epsilon()    <= D.rsqrt(pi)               <= 0.5641895835477563 + 2*D.epsilon())
            assert(pi + 0.5 - 2*D.epsilon()              <= D.lerp(pi,pi+1,0.5)       <= pi + 0.5 + 2*D.epsilon())
            
            
            assert(D.addcdiv(pi,1,D.to_float(3),D.to_float(2))   == pi + (1 * (3 / 2)))
            assert(D.addcmul(pi,1,D.to_float(3),D.to_float(2))   == pi + (1 * (3 * 2)))
            
            if not i.startswith('gdual'):
                assert(-2*D.epsilon() <= D.einsum("nm->", D.array([[1.0, 2.0], [-2.0, -1.0]])) <= 2*D.epsilon())
    except:
        print("{} Backend Test Failed".format(D.backend()))
        raise
    print("{} Backend Test Succeeded".format(D.backend()))