Esempio n. 1
0
def test_float_formats_typical_shape(ffmt, integrator,
                                     use_richardson_extrapolation, device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

        device = torch.device(device)

    print("Testing {} float format".format(D.float_fmt()))

    from .common import set_up_basic_system

    de_mat, rhs, analytic_soln, y_init, dt, _ = set_up_basic_system(
        integrator, hook_jacobian=True)

    y_init = D.array([1., 0.])

    if D.backend() == 'torch':
        y_init = y_init.to(device)

    a = de.OdeSystem(rhs,
                     y0=y_init,
                     dense_output=False,
                     t=(0, D.pi / 4),
                     dt=D.pi / 64,
                     rtol=D.epsilon()**0.5,
                     atol=D.epsilon()**0.5)

    method = integrator
    method_tolerance = a.atol * 10 + D.epsilon()
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)
        method_tolerance = method_tolerance * 5

    with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer:
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        print("Average step-size:",
              D.mean(D.abs(D.array(a.t[1:]) - D.array(a.t[:-1]))))
        max_diff = D.max(D.abs(analytic_soln(a.t[-1], y_init) - a.y[-1]))
        if a.integrator.adaptive:
            assert max_diff <= method_tolerance, "{} Failed with max_diff from analytical solution = {}".format(
                a.integrator, max_diff)
        if a.integrator.__implicit__:
            assert rhs.analytic_jacobian_called and a.njev > 0, "Analytic jacobian was called as part of integration"
        a.reset()
    print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 2
0
def test_brentsroot_same_sign():
    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    ac_prod = D.array(np.random.uniform(0.9, 1.1))
    a = D.array(1.0)
    b = D.array(1.0)

    gt_root = -b / a
    lb, ub = -b / a - 1, -b / a - 2

    fun = lambda x: a * x + b

    assert (D.to_numpy(D.to_float(D.abs(fun(gt_root)))) <= 32 * D.epsilon())

    root, success = de.utilities.optimizer.brentsroot(fun, [lb, ub],
                                                      4 * D.epsilon(),
                                                      verbose=True)

    assert (np.isinf(root))
    assert (not success)
Esempio n. 3
0
def test_brentsroot_wrong_order():
    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    a = D.array(1.0)
    b = D.array(1.0)

    gt_root = -b / a
    lb, ub = -b / a - 1, -b / a + 1

    fun = lambda x: a * x + b

    assert (D.to_numpy(D.to_float(D.abs(fun(gt_root)))) <= 32 * D.epsilon())

    root, success = de.utilities.optimizer.brentsroot(fun, [ub, lb],
                                                      4 * D.epsilon(),
                                                      verbose=True)

    assert (success)
    assert (np.allclose(D.to_numpy(D.to_float(gt_root)),
                        D.to_numpy(D.to_float(root)), 32 * D.epsilon(),
                        32 * D.epsilon()))
Esempio n. 4
0
def test_newtonraphson_pytorch_jacobian(ffmt, tol):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)
    np.random.seed(21)

    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(False)

    if ffmt == 'gdual_vdouble':
        pytest.skip("Root-finding is ill-conceived with vectorised gduals")

    for _ in range(10):
        ac_prod = D.array(np.random.uniform(0.9, 1.1))
        a = D.array(np.random.uniform(-1, 1))
        a = D.to_float(-1 * (a <= 0) + 1 * (a > 0))
        c = ac_prod / a
        b = D.sqrt(0.01 + 4 * ac_prod)

        gt_root1 = -b / (2 * a) - 0.1 / (2 * a)
        gt_root2 = -b / (2 * a) + 0.1 / (2 * a)

        ub = -b / (2 * a) - 0.2 / (2 * a)
        lb = -b / (2 * a) - 0.4 / (2 * a)

        x0 = D.array(np.random.uniform(ub, lb))

        fun = lambda x: a * x**2 + b * x + c

        assert (D.to_numpy(D.to_float(D.abs(fun(gt_root1)))) <=
                32 * D.epsilon())
        assert (D.to_numpy(D.to_float(D.abs(fun(gt_root2)))) <=
                32 * D.epsilon())

        root, (success, num_iter,
               prec) = de.utilities.optimizer.newtonraphson(fun,
                                                            x0,
                                                            tol=tol,
                                                            verbose=True)

        if tol is None:
            tol = D.epsilon()
        conv_root1 = np.allclose(D.to_numpy(D.to_float(gt_root1)),
                                 D.to_numpy(D.to_float(root)), 128 * tol,
                                 32 * tol)
        conv_root2 = np.allclose(D.to_numpy(D.to_float(gt_root2)),
                                 D.to_numpy(D.to_float(root)), 128 * tol,
                                 32 * tol)
        print(conv_root1, conv_root2, root, gt_root1, gt_root2, x0,
              root - gt_root1, root - gt_root2, num_iter, prec)

        assert (success)
        assert (conv_root1 or conv_root2)
        assert (D.to_numpy(D.to_float(D.abs(fun(root)))) <= 32 * tol)
Esempio n. 5
0
def test_non_callable_rhs(ffmt):
    with pytest.raises(TypeError):
        D.set_float_fmt(ffmt)

        if D.backend() == 'torch':
            import torch

            torch.set_printoptions(precision=17)

            torch.autograd.set_detect_anomaly(True)

        print("Testing {} float format".format(D.float_fmt()))

        from . import common

        (de_mat, rhs, analytic_soln, y_init, dt,
         _) = common.set_up_basic_system()

        a = de.OdeSystem(de_mat,
                         y0=y_init,
                         dense_output=False,
                         t=(0, 2 * D.pi),
                         dt=dt,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5,
                         constants=dict(k=1.0))

        a.tf = 0.0
Esempio n. 6
0
 def rhs(t, state, **kwargs):
     nonlocal de_mat
     if D.backend() == 'torch':
         de_mat = de_mat.to(state.device)
     out = de_mat @ state
     out[1] += t
     return out
Esempio n. 7
0
def test_dt_dir_fix(ffmt):
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    print("Testing {} float format".format(D.float_fmt()))

    de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]])

    @de.rhs_prettifier("""[vx, -x+t]""")
    def rhs(t, state, k, **kwargs):
        return de_mat @ state + D.array([0.0, t])

    y_init = D.array([1., 0.])

    a = de.OdeSystem(rhs,
                     y0=y_init,
                     dense_output=False,
                     t=(0, 2 * D.pi),
                     dt=-0.01,
                     rtol=D.epsilon()**0.5,
                     atol=D.epsilon()**0.5,
                     constants=dict(k=1.0))
Esempio n. 8
0
 def rhs(t, state, **kwargs):
     nonlocal de_mat
     extra = D.array([0.0, t])
     if D.backend() == 'torch':
         de_mat = de_mat.to(state.device)
         extra = extra.to(state.device)
     return D.sum(de_mat[:, :, None, None, None] * state,
                  axis=1) + extra[:, None, None, None]
Esempio n. 9
0
class PyAudiTestCase:
    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_double' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_double(self):
        D.set_float_fmt('gdual_double')
        x1 = D.gdual_double(-0.5, 'x', 5)
        x2 = D.gdual_double(0.5, 'y', 5)
        self.do(x1, x2)

    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_vdouble' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_vdouble(self):
        D.set_float_fmt('gdual_vdouble')
        x1 = D.gdual_vdouble([-0.5, -0.5], 'x', 5)
        x2 = D.gdual_vdouble([0.5, 0.5], 'y', 5)
        self.do(x1, x2)
Esempio n. 10
0
def test_newtonraphson_dims(ffmt, tol, dim):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)
    np.random.seed(30)

    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(False)

    if ffmt == 'gdual_vdouble':
        pytest.skip("Root-finding is ill-conceived with vectorised gduals")

    shift = D.array(np.random.uniform(1, 10, size=(dim, )))
    exponent = D.array(np.random.uniform(1, 5, size=(dim, )))
    gt_root1 = shift**(1 / exponent)
    gt_root2 = -shift**(1 / exponent)

    def fun(x):
        return x**exponent - shift

    def jac(x):
        return D.diag(exponent * D.reshape(x, (-1, ))**(exponent - 1))

    x0 = D.array(np.random.uniform(1, 3, size=(dim, )))
    print(gt_root1, gt_root2)
    print(x0)
    print(fun(x0))
    print(jac(x0))

    root, (success, num_iter,
           prec) = de.utilities.optimizer.newtonraphson(fun,
                                                        x0,
                                                        jac=jac,
                                                        tol=tol,
                                                        verbose=True)

    if tol is None:
        tol = D.epsilon()
    assert (success)
    conv_root1 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r1)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r1 in zip(root, gt_root1)
    ])
    conv_root2 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r2)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r2 in zip(root, gt_root2)
    ])
    assert (D.all(conv_root1 | conv_root2))
Esempio n. 11
0
def test_dense_output(ffmt, use_richardson_extrapolation):
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    print("Testing {} float format".format(D.float_fmt()))

    from . import common

    (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system()

    assert (a.integration_status == "Integration has not been run.")

    if use_richardson_extrapolation:
        a.method = de.integrators.generate_richardson_integrator(a.method)
    a.rtol = a.atol = D.epsilon()**0.75
    a.integrate()

    assert (a.integration_status == "Integration completed successfully.")

    assert (D.max(D.abs(a[0].y - analytic_soln(a[0].t, y_init))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(a[0].t)) <= 4 * D.epsilon())
    assert (D.max(D.abs(a[-1].y - analytic_soln(a[-1].t, y_init))) <=
            10 * D.epsilon()**0.5)

    assert (D.max(D.abs(a[a[0].t].y - analytic_soln(a[0].t, y_init))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(a[a[0].t].t)) <= 4 * D.epsilon())
    assert (D.max(D.abs(a[a[-1].t].y - analytic_soln(a[-1].t, y_init))) <=
            10 * D.epsilon()**0.5)

    assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t].y) - D.stack(a.y))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[:a[-1].t].y) - D.stack(a.y))) <=
            4 * D.epsilon())

    assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t:2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[a[0].t::2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[:a[-1].t:2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())

    np.random.seed(42)
    sample_points = D.array(np.random.uniform(a.t[0], a.t[-1], 1024))
    assert (D.max(
        D.abs(a.sol(sample_points) -
              analytic_soln(sample_points, y_init).T)).item() <= D.epsilon()**
            0.5)
Esempio n. 12
0
class PyAudiMatrixTestCase:
    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_double' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_double_matrix(self):
        D.set_float_fmt('gdual_double')
        A = D.array([
            [D.gdual_double(-1.0, 'a11', 5),
             D.gdual_double(3 / 2, 'a12', 5)],
            [D.gdual_double(1.0, 'a21', 5),
             D.gdual_double(-1.0, 'a22', 5)],
        ])
        self.do(A)

    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_double' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_double_matrix_big(self):
        D.set_float_fmt('gdual_double')
        np.random.seed(23)
        A1 = self.generate_random_nondegenerate_matrix(4)

        A = []
        for idx in range(A1.shape[0]):
            A.append([])
            for jdx in range(A1.shape[1]):
                A[idx].append(
                    D.gdual_double(A1[idx, jdx],
                                   'a{}{}'.format(idx + 1, jdx + 1), 1))
        A = D.array(A)
        self.do(A)

    def generate_random_nondegenerate_matrix(self, size):
        A = np.random.normal(size=(size, size))
        while np.abs(np.linalg.det(D.to_float(A))) <= 1e-5:
            A = np.random.normal(size=(size, size), std=250.0)
        return A

    def do(self, A):
        pass
def test_event_detection():
    for ffmt in D.available_float_fmt():
        if ffmt == 'float16':
            continue
        D.set_float_fmt(ffmt)

        print("Testing event detection for float format {}".format(D.float_fmt()))

        de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]])

        @de.rhs_prettifier("""[vx, -x+t]""")
        def rhs(t, state, **kwargs):    
            return de_mat @ state + D.array([0.0, t])

        def analytic_soln(t, initial_conditions):
            c1 = initial_conditions[0]
            c2 = initial_conditions[1] - 1

            return D.array([
                c2 * D.sin(t) + c1 * D.cos(t) + t,
                c2 * D.cos(t) - c1 * D.sin(t) + 1
            ])
        
        y_init = D.array([1., 0.])

        def time_event(t, y, **kwargs):
            return t - D.pi/8
        
        time_event.is_terminal = True
        time_event.direction   = 0

        a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, D.pi/4), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5)

        with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer:
            for i in sorted(set(de.available_methods(False).values()), key=lambda x:x.__name__):
                try:
                    a.set_method(i)
                    print("Testing {}".format(a.integrator))
                    a.integrate(eta=True, events=time_event)

                    if D.abs(a.t[-1] - D.pi/8) > 10*D.epsilon():
                        print("Event detection with integrator {} failed with t[-1] = {}".format(a.integrator, a.t[-1]))
                        raise RuntimeError("Failed to detect event for integrator {}".format(str(i)))
                    else:
                        print("Event detection with integrator {} succeeded with t[-1] = {}".format(a.integrator, a.t[-1]))
                    a.reset()
                except Exception as e:
                    raise e
                    raise RuntimeError("Test failed for integration method: {}".format(a.integrator))
            print("")

        print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 14
0
def test_brentsrootvec(ffmt, tol):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)
    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    if ffmt == 'gdual_vdouble':
        pytest.skip("Root-finding is ill-conceived with vectorised gduals")

    for _ in range(10):
        slope_list = D.array(
            np.copysign(np.random.uniform(0.9, 1.1, size=25),
                        np.random.uniform(-1, 1, size=25)))
        intercept_list = slope_list

        gt_root_list = -intercept_list / slope_list

        fun_list = [(lambda m, b: lambda x: m * x + b)(m, b)
                    for m, b in zip(slope_list, intercept_list)]

        assert (all(
            map((lambda i: D.to_numpy(D.to_float(D.abs(i))) <= 32 * D.epsilon(
            )), map((lambda x: x[0](x[1])), zip(fun_list, gt_root_list)))))

        root_list, success = de.utilities.optimizer.brentsrootvec(
            fun_list, [D.min(gt_root_list) - 1.,
                       D.max(gt_root_list) + 1.],
            tol,
            verbose=True)

        assert (np.all(D.to_numpy(success)))
        assert (np.allclose(D.to_numpy(D.to_float(gt_root_list)),
                            D.to_numpy(D.to_float(root_list)),
                            32 * D.epsilon(), 32 * D.epsilon()))

        assert (all(
            map((lambda i: D.to_numpy(D.to_float(D.abs(i))) <= 32 * D.epsilon(
            )), map((lambda x: x[0](x[1])), zip(fun_list, root_list)))))
Esempio n. 15
0
def test_wrong_tf(ffmt):
    with pytest.raises(ValueError):
        D.set_float_fmt(ffmt)

        if D.backend() == 'torch':
            import torch

            torch.set_printoptions(precision=17)

            torch.autograd.set_detect_anomaly(True)

        print("Testing {} float format".format(D.float_fmt()))

        from . import common

        (de_mat, rhs, analytic_soln, y_init, dt,
         a) = common.set_up_basic_system()

        a.tf = 0.0
Esempio n. 16
0
def test_brentsroot(ffmt, tol):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)

    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    for _ in range(10):
        ac_prod = D.array(np.random.uniform(0.9, 1.1))
        a = D.array(np.random.uniform(-1, 1))
        a = D.to_float(-1 * (a <= 0) + 1 * (a > 0))
        c = ac_prod / a
        b = D.sqrt(0.01 + 4 * ac_prod)

        gt_root = -b / (2 * a) - 0.1 / (2 * a)

        ub = -b / (2 * a)
        lb = -b / (2 * a) - 1.0 / (2 * a)

        fun = lambda x: a * x**2 + b * x + c

        assert (D.to_numpy(D.to_float(D.abs(fun(gt_root)))) <=
                32 * D.epsilon())

        root, success = de.utilities.optimizer.brentsroot(fun, [lb, ub],
                                                          tol,
                                                          verbose=True)

        assert (success)
        assert (np.allclose(D.to_numpy(D.to_float(gt_root)),
                            D.to_numpy(D.to_float(root)), 32 * D.epsilon(),
                            32 * D.epsilon()))
        assert (D.to_numpy(D.to_float(D.abs(fun(root)))) <= 32 * D.epsilon())
Esempio n. 17
0
def test_integration_and_nearest_float_no_dense_output(ffmt):
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    print("Testing {} float format".format(D.float_fmt()))

    de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]])

    @de.rhs_prettifier("""[vx, -x+t]""")
    def rhs(t, state, k, **kwargs):
        return de_mat @ state + D.array([0.0, t])

    y_init = D.array([1., 0.])

    a = de.OdeSystem(rhs,
                     y0=y_init,
                     dense_output=False,
                     t=(0, 2 * D.pi),
                     dt=0.01,
                     rtol=D.epsilon()**0.5,
                     atol=D.epsilon()**0.5,
                     constants=dict(k=1.0))

    assert (a.integration_status == "Integration has not been run.")

    a.integrate()

    assert (a.sol is None)

    assert (a.integration_status == "Integration completed successfully.")

    assert (D.abs(a.t[-2] - a[2 * D.pi].t) <= D.abs(a.dt))
Esempio n. 18
0
def test_integration_and_representation(ffmt):
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    print("Testing {} float format".format(D.float_fmt()))

    from . import common

    (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system()

    assert (a.integration_status == "Integration has not been run.")

    a.integrate()

    assert (a.integration_status == "Integration completed successfully.")

    print(str(a))
    print(repr(a))
    assert (D.max(D.abs(a.sol(a.t[0]) - y_init)) <= 8 * D.epsilon()**0.5)
    assert (D.max(D.abs(a.sol(a.t[-1]) - analytic_soln(a.t[-1], y_init))) <=
            8 * D.epsilon()**0.5)
    assert (D.max(D.abs(a.sol(a.t).T - analytic_soln(a.t, y_init))) <=
            8 * D.epsilon()**0.5)

    for i in a:
        assert (D.max(D.abs(i.y - analytic_soln(i.t, y_init))) <=
                8 * D.epsilon()**0.5)

    assert (len(a.y) == len(a))
    assert (len(a.t) == len(a))
Esempio n. 19
0
def test_gradients_simple_oscillator(ffmt, integrator,
                                     use_richardson_extrapolation, device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    def rhs(t, state, k, m, **kwargs):
        return D.array([[0.0, 1.0], [-k / m, 0.0]], device=device) @ state

    csts = dict(k=1.0, m=1.0)
    T = 2 * D.pi * D.sqrt(D.array(csts['m'] / csts['k'])).to(device)
    dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))),
             5e-2)

    def true_solution_sho(t, initial_state, k, m):
        w2 = D.array(k / m).to(device)
        w = D.sqrt(w2)
        A = D.sqrt(initial_state[0]**2 + initial_state[1]**2 / w2)
        phi = D.atan2(-initial_state[1], w * initial_state[0])
        return D.stack([A * D.cos(w * t + phi), -w * A * D.sin(w * t + phi)]).T

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        y_init = D.array([1., 1.], requires_grad=True).to(device)

        a = de.OdeSystem(rhs,
                         y_init,
                         t=(0, T),
                         dt=T * dt,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5,
                         constants=csts)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        Jy = D.jacobian(a.y[-1], a.y[0])
        true_Jy = D.jacobian(true_solution_sho(a.t[-1], a.y[0], **csts),
                             a.y[0])

        print(a.integrator.adaptive,
              D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))),
              D.norm(true_Jy - Jy),
              D.epsilon()**0.5)

        if a.integrator.adaptive:
            assert (D.allclose(true_Jy,
                               Jy,
                               rtol=4 * a.rtol**0.75,
                               atol=4 * a.atol**0.75))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 20
0
def test_gradients_complex(ffmt, integrator, use_richardson_extrapolation,
                           device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    class NNController(torch.nn.Module):
        def __init__(self,
                     in_dim=2,
                     out_dim=2,
                     inter_dim=50,
                     append_time=False):
            super().__init__()

            self.append_time = append_time

            self.net = torch.nn.Sequential(
                torch.nn.Linear(in_dim + (1 if append_time else 0), inter_dim),
                torch.nn.Softplus(), torch.nn.Linear(inter_dim, out_dim),
                torch.nn.Sigmoid())

            for idx, m in enumerate(self.net.modules()):
                if isinstance(m, torch.nn.Linear):
                    torch.nn.init.xavier_normal_(m.weight, gain=1.0)
                    torch.nn.init.constant_(m.bias, 0.0)

        def forward(self, t, y, dy):
            if self.append_time:
                return self.net(
                    torch.cat([y.view(-1), dy.view(-1),
                               t.view(-1)]))
            else:
                return self.net(torch.cat([y, dy]))

    class SimpleODE(torch.nn.Module):
        def __init__(self, inter_dim=10, k=1.0):
            super().__init__()
            self.nn_controller = NNController(in_dim=4,
                                              out_dim=1,
                                              inter_dim=inter_dim)
            self.A = torch.nn.Parameter(
                torch.tensor([[0.0, 1.0], [-k, -1.0]], requires_grad=False))

        def forward(self, t, y, params=None):
            if not isinstance(t, torch.Tensor):
                torch_t = torch.tensor(t)
            else:
                torch_t = t
            if not isinstance(y, torch.Tensor):
                torch_y = torch.tensor(y)
            else:
                torch_y = y
            if params is not None:
                if not isinstance(params, torch.Tensor):
                    torch_params = torch.tensor(params)
                else:
                    torch_params = params

            dy = torch.matmul(self.A, torch_y)

            controller_effect = self.nn_controller(
                torch_t, torch_y, dy) if params is None else params

            return dy + torch.cat(
                [torch.tensor([0.0]).to(dy), (controller_effect * 2.0 - 1.0)])

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        yi1 = D.array([1.0, 0.0], requires_grad=True).to(device)
        df = SimpleODE(k=1.0)

        a = de.OdeSystem(df,
                         yi1,
                         t=(0, 0.1),
                         dt=1e-3,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        dyfdyi = D.jacobian(a.y[-1], a.y[0])
        dyi = D.array([0.0, 1.0]).to(device) * D.epsilon()**0.5
        dyf = D.einsum("nk,k->n", dyfdyi, dyi)
        yi2 = yi1 + dyi

        print(a.y[-1].device)

        b = de.OdeSystem(df,
                         yi2,
                         t=(0, a.t[-1]),
                         dt=a.dt,
                         rtol=a.rtol,
                         atol=a.atol)
        b.set_method(method)
        b.integrate(eta=True)

        true_diff = b.y[-1] - a.y[-1]

        print(D.norm(true_diff - dyf), D.epsilon()**0.5)

        assert (D.allclose(true_diff, dyf, rtol=4 * a.rtol, atol=4 * a.atol))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 21
0
def test_gradients_simple_decay(ffmt, integrator, use_richardson_extrapolation,
                                device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)
    if integrator.__symplectic__:
        pytest.skip(
            "Exponential decay system is not in the form compatible with symplectic integrators"
        )
    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    def rhs(t, state, k, **kwargs):
        return -k * state

    def rhs_jac(t, state, k, **kwargs):
        return -k

    rhs.jac = rhs_jac

    y_init = D.array(5.0, requires_grad=True)
    csts = dict(k=D.array(1.0, device=device))
    dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))),
             5e-2)

    def true_solution_decay(t, initial_state, k):
        return initial_state * D.exp(-k * t)

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        y_init = D.ones((1, ), requires_grad=True).to(device)
        y_init = y_init * D.e

        a = de.OdeSystem(rhs,
                         y_init,
                         t=(0, 1.0),
                         dt=dt,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5,
                         constants=csts)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        Jy = D.jacobian(a.y[-1], a.y[0])
        true_Jy = D.jacobian(true_solution_decay(a.t[-1], a.y[0], **csts),
                             a.y[0])

        print(a.y[-1], true_solution_decay(a.t[-1], a.y[0], **csts),
              D.abs(a.y[-1] - true_solution_decay(a.t[-1], a.y[0], **csts)))
        print(a.integrator.adaptive,
              D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))),
              D.norm(true_Jy - Jy), 32 * a.rtol)
        print(a.integrator.adaptive, true_Jy, Jy)

        if a.integrator.adaptive:
            assert (D.allclose(true_Jy, Jy, rtol=32 * a.rtol,
                               atol=32 * a.atol))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 22
0
import pytest
import desolver as de
import desolver.backend as D
import numpy as np
from .common import ffmt_param, integrator_param, richardson_param, device_param, dt_param, dense_output_param


@pytest.mark.torch_gradients
@pytest.mark.skipif(D.backend() != 'torch', reason="PyTorch Unavailable")
@ffmt_param
@integrator_param
@richardson_param
@device_param
def test_gradients_simple_decay(ffmt, integrator, use_richardson_extrapolation,
                                device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)
    if integrator.__symplectic__:
        pytest.skip(
            "Exponential decay system is not in the form compatible with symplectic integrators"
        )
    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)
Esempio n. 23
0
class PyAudiLinearSystemTestCase:
    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_double' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_double_solve_linear(self):
        D.set_float_fmt('gdual_double')
        A = D.array([
            [D.gdual_double(-1.0, 'a11', 5),
             D.gdual_double(3 / 2, 'a12', 5)],
            [D.gdual_double(1.0, 'a21', 5),
             D.gdual_double(-1.0, 'a22', 5)],
        ])
        b = D.array([[D.gdual_double(1.0, 'b1', 5)],
                     [D.gdual_double(1.0, 'b2', 5)]])
        self.do(A, b)

    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_vdouble' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_vdouble_solve_linear(self):
        D.set_float_fmt('gdual_vdouble')
        A = D.array([
            [
                D.gdual_vdouble([-1.0, 1 / 2], 'a11', 5),
                D.gdual_vdouble([3 / 2, 3 / 2], 'a12', 5)
            ],
            [
                D.gdual_vdouble([1.0, 1.0], 'a21', 5),
                D.gdual_vdouble([-1.0, -1.0], 'a22', 5)
            ],
        ])
        b = D.array([[D.gdual_vdouble([1.0, -1.0], 'b1', 5)],
                     [D.gdual_vdouble([1.0, 1.0], 'b2', 5)]])
        self.do(A, b)

    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_double' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_double_solve_linear_big(self):
        D.set_float_fmt('gdual_double')
        np.random.seed(22)
        A1 = self.generate_random_nondegenerate_matrix(60)

        A = []
        b = []
        for idx in range(A1.shape[0]):
            A.append([])
            for jdx in range(A1.shape[1]):
                A[idx].append(
                    D.gdual_double(A1[idx, jdx],
                                   'a{}{}'.format(idx + 1, jdx + 1), 1))
            b.append([D.gdual_double(1.0, 'b{}'.format(idx + 1), 1)])

        A = D.array(A)
        b = D.array(b)
        self.do(A, b)

    @pytest.mark.skipif(D.backend() != 'numpy'
                        or 'gdual_vdouble' not in D.available_float_fmt(),
                        reason="PyAudi Tests")
    def test_gdual_vdouble_solve_linear_big(self):
        D.set_float_fmt('gdual_vdouble')
        np.random.seed(22)
        A1 = self.generate_random_nondegenerate_matrix(12)
        A2 = self.generate_random_nondegenerate_matrix(12)

        A = []
        b = []
        for idx in range(A1.shape[0]):
            A.append([])
            for jdx in range(A1.shape[1]):
                A[idx].append(
                    D.gdual_vdouble([A1[idx, jdx], A2[idx, jdx]],
                                    'a{}{}'.format(idx + 1, jdx + 1), 1))
            b.append([D.gdual_vdouble([1.0, -1.0], 'b{}'.format(idx + 1), 1)])

        A = D.array(A)
        b = D.array(b)
        self.do(A, b)

    def generate_random_nondegenerate_matrix(self, size):
        A = np.random.normal(size=(size, size))
        while np.abs(np.linalg.det(D.to_float(A))) <= 1e-5:
            A = np.random.normal(size=(size, size), std=250.0)
        return A

    def do(self, A, b):
        pass
Esempio n. 24
0
import desolver as de
import desolver.backend as D
import pytest


integrator_set = set(de.available_methods(False).values())
integrator_set = sorted(integrator_set, key=lambda x: x.__name__)
explicit_integrator_set = [
    pytest.param(intg, marks=pytest.mark.explicit) for intg in integrator_set if not intg.__implicit__
]
implicit_integrator_set = [
    pytest.param(intg, marks=pytest.mark.implicit) for intg in integrator_set if intg.__implicit__
]


if D.backend() == 'torch':
    devices_set = [pytest.param('cpu', marks=pytest.mark.cpu)]
    import torch
    if torch.cuda.is_available():
        devices_set.insert(0, pytest.param('cuda', marks=pytest.mark.gpu))
else:
    devices_set = [None]
    
dt_set   = [D.pi / 307, D.pi / 512]
ffmt_set = D.available_float_fmt()

ffmt_param         = pytest.mark.parametrize('ffmt', ffmt_set)
integrator_param   = pytest.mark.parametrize('integrator', explicit_integrator_set + implicit_integrator_set)
richardson_param   = pytest.mark.parametrize('use_richardson_extrapolation', [True, False])
device_param       = pytest.mark.parametrize('device', devices_set)
dt_param           = pytest.mark.parametrize('dt', dt_set)
Esempio n. 25
0
    ref = D.array([True, False, True, False], dtype=D.bool)
    where = D.array([True, False, False, True], dtype=D.bool)
    assert (D.all(D.logical_xor(a, b, where=where)[where] == ref[where]))


def test_logical_xor_out_where():
    a = D.array([True, False, False, True], dtype=D.bool)
    b = D.array([False, False, True, True], dtype=D.bool)
    ref = D.array([True, False, True, False], dtype=D.bool)
    out = D.zeros_like(a, dtype=D.bool)
    where = D.array([True, False, False, True], dtype=D.bool)
    D.logical_xor(a, b, out=out, where=where)
    assert (D.all(out[where] == ref[where]))


@pytest.mark.skipif(D.backend() == 'numpy', reason="Numpy is Reference")
def test_linspace():
    ref_lin = np.linspace(1.0, 10.0, num=100)
    test_lin = D.linspace(1.0, 10.0, num=100)
    assert (np.all(
        np.abs(ref_lin - test_lin.cpu().numpy()) / ref_lin <= 10 *
        D.epsilon()))


@pytest.mark.skipif(D.backend() == 'numpy', reason="Numpy is Reference")
def test_logspace():
    ref_lin = np.logspace(-10.0, 10.0, num=100)
    test_lin = D.logspace(-10.0, 10.0, num=100)
    assert (np.all(
        np.abs(ref_lin - test_lin.cpu().numpy()) / ref_lin <= 10 *
        D.epsilon()))
Esempio n. 26
0
 def rhs_jac(t, state, **kwargs):
     nonlocal de_mat
     rhs.analytic_jacobian_called = True
     if D.backend() == 'torch':
         de_mat = de_mat.to(state.device)
     return de_mat
Esempio n. 27
0
        tol = D.epsilon()
    assert (success)
    conv_root1 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r1)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r1 in zip(root, gt_root1)
    ])
    conv_root2 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r2)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r2 in zip(root, gt_root2)
    ])
    assert (D.all(conv_root1 | conv_root2))


@pytest.mark.skipif(D.backend() != 'torch',
                    reason="Pytorch backend required to test jacobian via AD")
@pytest.mark.parametrize('ffmt', D.available_float_fmt())
@pytest.mark.parametrize('tol', [None, 40, 1])
def test_newtonraphson_pytorch_jacobian(ffmt, tol):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)
    np.random.seed(21)

    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)
def test_gradients():
    for ffmt in D.available_float_fmt():
        D.set_float_fmt(ffmt)

        print("Testing {} float format".format(D.float_fmt()))

        import torch

        torch.set_printoptions(precision=17)
        torch.set_num_threads(1)

        torch.autograd.set_detect_anomaly(True)

        class NNController(torch.nn.Module):
            def __init__(self,
                         in_dim=2,
                         out_dim=2,
                         inter_dim=50,
                         append_time=False):
                super().__init__()

                self.append_time = append_time

                self.net = torch.nn.Sequential(
                    torch.nn.Linear(in_dim + (1 if append_time else 0),
                                    inter_dim), torch.nn.Softplus(),
                    torch.nn.Linear(inter_dim, out_dim), torch.nn.Sigmoid())

                for idx, m in enumerate(self.net.modules()):
                    if isinstance(m, torch.nn.Linear):
                        torch.nn.init.xavier_normal_(m.weight, gain=1.0)
                        torch.nn.init.constant_(m.bias, 0.0)

            def forward(self, t, y, dy):
                if self.append_time:
                    return self.net(
                        torch.cat([y.view(-1),
                                   dy.view(-1),
                                   t.view(-1)]))
                else:
                    return self.net(torch.cat([y, dy]))

        class SimpleODE(torch.nn.Module):
            def __init__(self, inter_dim=10, k=1.0):
                super().__init__()
                self.nn_controller = NNController(in_dim=4,
                                                  out_dim=1,
                                                  inter_dim=inter_dim)
                self.A = torch.tensor([[0.0, 1.0], [-k, -1.0]],
                                      requires_grad=True)

            def forward(self, t, y, params=None):
                if not isinstance(t, torch.Tensor):
                    torch_t = torch.tensor(t)
                else:
                    torch_t = t
                if not isinstance(y, torch.Tensor):
                    torch_y = torch.tensor(y)
                else:
                    torch_y = y
                if params is not None:
                    if not isinstance(params, torch.Tensor):
                        torch_params = torch.tensor(params)
                    else:
                        torch_params = params

                dy = torch.matmul(self.A, torch_y)

                controller_effect = self.nn_controller(
                    torch_t, torch_y, dy) if params is None else params

                return dy + torch.cat(
                    [torch.tensor([0.0]), (controller_effect * 2.0 - 1.0)])

        with de.utilities.BlockTimer(section_label="Integrator Tests"):
            for i in sorted(set(de.available_methods(False).values()),
                            key=lambda x: x.__name__):
                try:
                    yi1 = D.array([1.0, 0.0], requires_grad=True)
                    df = SimpleODE(k=1.0)

                    a = de.OdeSystem(df,
                                     yi1,
                                     t=(0, 1.),
                                     dt=0.0675,
                                     rtol=D.epsilon()**0.5,
                                     atol=D.epsilon()**0.5)
                    a.set_method(i)
                    a.integrate(eta=True)

                    dyfdyi = D.jacobian(a.y[-1], a.y[0])
                    dyi = D.array([0.0, 1.0]) * D.epsilon()**0.5
                    dyf = D.einsum("nk,k->n", dyfdyi, dyi)
                    yi2 = yi1 + dyi

                    b = de.OdeSystem(df,
                                     yi2,
                                     t=(0, 1.),
                                     dt=0.0675,
                                     rtol=D.epsilon()**0.5,
                                     atol=D.epsilon()**0.5)
                    b.set_method(i)
                    b.integrate(eta=True)

                    true_diff = b.y[-1] - a.y[-1]

                    print(D.norm(true_diff - dyf), D.epsilon()**0.5)

                    assert (D.allclose(true_diff,
                                       dyf,
                                       rtol=4 * D.epsilon()**0.5,
                                       atol=4 * D.epsilon()**0.5))
                    print("{} method test succeeded!".format(a.integrator))
                except:
                    raise RuntimeError(
                        "Test failed for integration method: {}".format(
                            a.integrator))
            print("")

        print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 29
0
def test_float_formats_atypical_shape(ffmt, integrator,
                                      use_richardson_extrapolation, device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

        device = torch.device(device)

    print("Testing {} float format".format(D.float_fmt()))

    from .common import set_up_basic_system

    de_mat, _, analytic_soln, y_init, dt, _ = set_up_basic_system(integrator)

    @de.rhs_prettifier("""[vx, -x+t]""")
    def rhs(t, state, **kwargs):
        nonlocal de_mat
        extra = D.array([0.0, t])
        if D.backend() == 'torch':
            de_mat = de_mat.to(state.device)
            extra = extra.to(state.device)
        return D.sum(de_mat[:, :, None, None, None] * state,
                     axis=1) + extra[:, None, None, None]

    y_init = D.array([[[[1., 0.]] * 1] * 1] * 3).T

    print(rhs(0.0, y_init).shape)

    if D.backend() == 'torch':
        y_init = y_init.contiguous().to(device)

    a = de.OdeSystem(rhs,
                     y0=y_init,
                     dense_output=False,
                     t=(0, D.pi / 4),
                     dt=D.pi / 64,
                     rtol=D.epsilon()**0.5,
                     atol=D.epsilon()**0.5)

    method = integrator
    method_tolerance = a.atol * 10 + D.epsilon()
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)
        method_tolerance = method_tolerance * 5

    with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer:
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        max_diff = D.max(D.abs(analytic_soln(a.t[-1], y_init) - a.y[-1]))
        if a.integrator.adaptive:
            assert max_diff <= method_tolerance, "{} Failed with max_diff from analytical solution = {}".format(
                a.integrator, max_diff)
        a.reset()
    print("")

    print("{} backend test passed successfully!".format(D.backend()))
import desolver as de
import desolver.backend as D
import numpy as np


@np.testing.dec.skipif(D.backend() != 'torch', "PyTorch Unavailable")
def test_gradients():
    for ffmt in D.available_float_fmt():
        D.set_float_fmt(ffmt)

        print("Testing {} float format".format(D.float_fmt()))

        import torch

        torch.set_printoptions(precision=17)
        torch.set_num_threads(1)

        torch.autograd.set_detect_anomaly(True)

        class NNController(torch.nn.Module):
            def __init__(self,
                         in_dim=2,
                         out_dim=2,
                         inter_dim=50,
                         append_time=False):
                super().__init__()

                self.append_time = append_time

                self.net = torch.nn.Sequential(
                    torch.nn.Linear(in_dim + (1 if append_time else 0),