Esempio n. 1
0
def test_newtonraphson_dims(ffmt, tol, dim):
    print("Set dtype to:", ffmt)
    D.set_float_fmt(ffmt)
    np.random.seed(30)

    if tol is not None:
        tol = tol * D.epsilon()

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(False)

    if ffmt == 'gdual_vdouble':
        pytest.skip("Root-finding is ill-conceived with vectorised gduals")

    shift = D.array(np.random.uniform(1, 10, size=(dim, )))
    exponent = D.array(np.random.uniform(1, 5, size=(dim, )))
    gt_root1 = shift**(1 / exponent)
    gt_root2 = -shift**(1 / exponent)

    def fun(x):
        return x**exponent - shift

    def jac(x):
        return D.diag(exponent * D.reshape(x, (-1, ))**(exponent - 1))

    x0 = D.array(np.random.uniform(1, 3, size=(dim, )))
    print(gt_root1, gt_root2)
    print(x0)
    print(fun(x0))
    print(jac(x0))

    root, (success, num_iter,
           prec) = de.utilities.optimizer.newtonraphson(fun,
                                                        x0,
                                                        jac=jac,
                                                        tol=tol,
                                                        verbose=True)

    if tol is None:
        tol = D.epsilon()
    assert (success)
    conv_root1 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r1)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r1 in zip(root, gt_root1)
    ])
    conv_root2 = D.stack([
        D.array(np.allclose(D.to_numpy(D.to_float(r2)),
                            D.to_numpy(D.to_float(r)), 128 * tol, 32 * tol),
                dtype=D.bool) for r, r2 in zip(root, gt_root2)
    ])
    assert (D.all(conv_root1 | conv_root2))
Esempio n. 2
0
def test_dense_output(ffmt, use_richardson_extrapolation):
    D.set_float_fmt(ffmt)

    if D.backend() == 'torch':
        import torch

        torch.set_printoptions(precision=17)

        torch.autograd.set_detect_anomaly(True)

    print("Testing {} float format".format(D.float_fmt()))

    from . import common

    (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system()

    assert (a.integration_status == "Integration has not been run.")

    if use_richardson_extrapolation:
        a.method = de.integrators.generate_richardson_integrator(a.method)
    a.rtol = a.atol = D.epsilon()**0.75
    a.integrate()

    assert (a.integration_status == "Integration completed successfully.")

    assert (D.max(D.abs(a[0].y - analytic_soln(a[0].t, y_init))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(a[0].t)) <= 4 * D.epsilon())
    assert (D.max(D.abs(a[-1].y - analytic_soln(a[-1].t, y_init))) <=
            10 * D.epsilon()**0.5)

    assert (D.max(D.abs(a[a[0].t].y - analytic_soln(a[0].t, y_init))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(a[a[0].t].t)) <= 4 * D.epsilon())
    assert (D.max(D.abs(a[a[-1].t].y - analytic_soln(a[-1].t, y_init))) <=
            10 * D.epsilon()**0.5)

    assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t].y) - D.stack(a.y))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[:a[-1].t].y) - D.stack(a.y))) <=
            4 * D.epsilon())

    assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t:2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[a[0].t::2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())
    assert (D.max(D.abs(D.stack(a[:a[-1].t:2].y) - D.stack(a.y[::2]))) <=
            4 * D.epsilon())

    np.random.seed(42)
    sample_points = D.array(np.random.uniform(a.t[0], a.t[-1], 1024))
    assert (D.max(
        D.abs(a.sol(sample_points) -
              analytic_soln(sample_points, y_init).T)).item() <= D.epsilon()**
            0.5)
Esempio n. 3
0
 def analytic_soln(t, initial_conditions):
     c1 = initial_conditions[0]
     c2 = initial_conditions[1] - 1
     
     return D.stack([
         c2 * D.sin(D.to_float(D.asarray(t))) + c1 * D.cos(D.to_float(D.asarray(t))) + D.asarray(t),
         c2 * D.cos(D.to_float(D.asarray(t))) - c1 * D.sin(D.to_float(D.asarray(t))) + 1
     ])
Esempio n. 4
0
def test_gradients_simple_oscillator(ffmt, integrator,
                                     use_richardson_extrapolation, device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)

    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    def rhs(t, state, k, m, **kwargs):
        return D.array([[0.0, 1.0], [-k / m, 0.0]], device=device) @ state

    csts = dict(k=1.0, m=1.0)
    T = 2 * D.pi * D.sqrt(D.array(csts['m'] / csts['k'])).to(device)
    dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))),
             5e-2)

    def true_solution_sho(t, initial_state, k, m):
        w2 = D.array(k / m).to(device)
        w = D.sqrt(w2)
        A = D.sqrt(initial_state[0]**2 + initial_state[1]**2 / w2)
        phi = D.atan2(-initial_state[1], w * initial_state[0])
        return D.stack([A * D.cos(w * t + phi), -w * A * D.sin(w * t + phi)]).T

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        y_init = D.array([1., 1.], requires_grad=True).to(device)

        a = de.OdeSystem(rhs,
                         y_init,
                         t=(0, T),
                         dt=T * dt,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5,
                         constants=csts)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        Jy = D.jacobian(a.y[-1], a.y[0])
        true_Jy = D.jacobian(true_solution_sho(a.t[-1], a.y[0], **csts),
                             a.y[0])

        print(a.integrator.adaptive,
              D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))),
              D.norm(true_Jy - Jy),
              D.epsilon()**0.5)

        if a.integrator.adaptive:
            assert (D.allclose(true_Jy,
                               Jy,
                               rtol=4 * a.rtol**0.75,
                               atol=4 * a.atol**0.75))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 5
0
def test_gradients_simple_decay(ffmt, integrator, use_richardson_extrapolation,
                                device):
    if use_richardson_extrapolation and integrator.__implicit__:
        pytest.skip(
            "Richardson Extrapolation is too slow with implicit methods")
    D.set_float_fmt(ffmt)
    if integrator.__symplectic__:
        pytest.skip(
            "Exponential decay system is not in the form compatible with symplectic integrators"
        )
    print("Testing {} float format".format(D.float_fmt()))

    import torch

    torch.set_printoptions(precision=17)

    device = torch.device(device)

    torch.autograd.set_detect_anomaly(False)  # Enable if a test fails

    def rhs(t, state, k, **kwargs):
        return -k * state

    def rhs_jac(t, state, k, **kwargs):
        return -k

    rhs.jac = rhs_jac

    y_init = D.array(5.0, requires_grad=True)
    csts = dict(k=D.array(1.0, device=device))
    dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))),
             5e-2)

    def true_solution_decay(t, initial_state, k):
        return initial_state * D.exp(-k * t)

    method = integrator
    if use_richardson_extrapolation:
        method = de.integrators.generate_richardson_integrator(method)

    with de.utilities.BlockTimer(section_label="Integrator Tests"):
        y_init = D.ones((1, ), requires_grad=True).to(device)
        y_init = y_init * D.e

        a = de.OdeSystem(rhs,
                         y_init,
                         t=(0, 1.0),
                         dt=dt,
                         rtol=D.epsilon()**0.5,
                         atol=D.epsilon()**0.5,
                         constants=csts)
        a.set_method(method)
        print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt))

        a.integrate(eta=True)

        Jy = D.jacobian(a.y[-1], a.y[0])
        true_Jy = D.jacobian(true_solution_decay(a.t[-1], a.y[0], **csts),
                             a.y[0])

        print(a.y[-1], true_solution_decay(a.t[-1], a.y[0], **csts),
              D.abs(a.y[-1] - true_solution_decay(a.t[-1], a.y[0], **csts)))
        print(a.integrator.adaptive,
              D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))),
              D.norm(true_Jy - Jy), 32 * a.rtol)
        print(a.integrator.adaptive, true_Jy, Jy)

        if a.integrator.adaptive:
            assert (D.allclose(true_Jy, Jy, rtol=32 * a.rtol,
                               atol=32 * a.atol))
        print("{} method test succeeded!".format(a.integrator))
        print("")

    print("{} backend test passed successfully!".format(D.backend()))
Esempio n. 6
0
 def true_solution_sho(t, initial_state, k, m):
     w2 = D.array(k / m).to(device)
     w = D.sqrt(w2)
     A = D.sqrt(initial_state[0]**2 + initial_state[1]**2 / w2)
     phi = D.atan2(-initial_state[1], w * initial_state[0])
     return D.stack([A * D.cos(w * t + phi), -w * A * D.sin(w * t + phi)]).T