def test_float_formats_typical_shape(ffmt, integrator, use_richardson_extrapolation, device): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(False) # Enable if a test fails device = torch.device(device) print("Testing {} float format".format(D.float_fmt())) from .common import set_up_basic_system de_mat, rhs, analytic_soln, y_init, dt, _ = set_up_basic_system( integrator, hook_jacobian=True) y_init = D.array([1., 0.]) if D.backend() == 'torch': y_init = y_init.to(device) a = de.OdeSystem(rhs, y0=y_init, dense_output=False, t=(0, D.pi / 4), dt=D.pi / 64, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) method = integrator method_tolerance = a.atol * 10 + D.epsilon() if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) method_tolerance = method_tolerance * 5 with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer: a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True) print("Average step-size:", D.mean(D.abs(D.array(a.t[1:]) - D.array(a.t[:-1])))) max_diff = D.max(D.abs(analytic_soln(a.t[-1], y_init) - a.y[-1])) if a.integrator.adaptive: assert max_diff <= method_tolerance, "{} Failed with max_diff from analytical solution = {}".format( a.integrator, max_diff) if a.integrator.__implicit__: assert rhs.analytic_jacobian_called and a.njev > 0, "Analytic jacobian was called as part of integration" a.reset() print("") print("{} backend test passed successfully!".format(D.backend()))
def test_dt_dir_fix(ffmt): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=False, t=(0, 2 * D.pi), dt=-0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0))
def test_integration_and_nearestfloat_no_dense_output(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.stack([ c2 * D.sin(D.to_float(D.asarray(t))) + c1 * D.cos(D.to_float(D.asarray(t))) + D.asarray(t), c2 * D.cos(D.to_float(D.asarray(t))) - c1 * D.sin(D.to_float(D.asarray(t))) + 1 ]) y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=False, t=(0, 2*D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) assert(a.integration_status() == "Integration has not been run.") a.integrate() assert(a.integration_status() == "Integration completed successfully.") assert(D.abs(a.t[-2] - a[2*D.pi].t) <= D.abs(a.dt))
def test_non_callable_rhs(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.stack([ c2 * D.sin(D.to_float(D.asarray(t))) + c1 * D.cos(D.to_float(D.asarray(t))) + D.asarray(t), c2 * D.cos(D.to_float(D.asarray(t))) - c1 * D.sin(D.to_float(D.asarray(t))) + 1 ]) y_init = D.array([1., 0.]) a = de.OdeSystem(de_mat, y0=y_init, dense_output=False, t=(0, 2*D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) a.tf = 0.0
def test_non_callable_rhs(ffmt): with pytest.raises(TypeError): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) from . import common (de_mat, rhs, analytic_soln, y_init, dt, _) = common.set_up_basic_system() a = de.OdeSystem(de_mat, y0=y_init, dense_output=False, t=(0, 2 * D.pi), dt=dt, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) a.tf = 0.0
def test_dense_output(ffmt, use_richardson_extrapolation): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) from . import common (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system() assert (a.integration_status == "Integration has not been run.") if use_richardson_extrapolation: a.method = de.integrators.generate_richardson_integrator(a.method) a.rtol = a.atol = D.epsilon()**0.75 a.integrate() assert (a.integration_status == "Integration completed successfully.") assert (D.max(D.abs(a[0].y - analytic_soln(a[0].t, y_init))) <= 4 * D.epsilon()) assert (D.max(D.abs(a[0].t)) <= 4 * D.epsilon()) assert (D.max(D.abs(a[-1].y - analytic_soln(a[-1].t, y_init))) <= 10 * D.epsilon()**0.5) assert (D.max(D.abs(a[a[0].t].y - analytic_soln(a[0].t, y_init))) <= 4 * D.epsilon()) assert (D.max(D.abs(a[a[0].t].t)) <= 4 * D.epsilon()) assert (D.max(D.abs(a[a[-1].t].y - analytic_soln(a[-1].t, y_init))) <= 10 * D.epsilon()**0.5) assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t].y) - D.stack(a.y))) <= 4 * D.epsilon()) assert (D.max(D.abs(D.stack(a[:a[-1].t].y) - D.stack(a.y))) <= 4 * D.epsilon()) assert (D.max(D.abs(D.stack(a[a[0].t:a[-1].t:2].y) - D.stack(a.y[::2]))) <= 4 * D.epsilon()) assert (D.max(D.abs(D.stack(a[a[0].t::2].y) - D.stack(a.y[::2]))) <= 4 * D.epsilon()) assert (D.max(D.abs(D.stack(a[:a[-1].t:2].y) - D.stack(a.y[::2]))) <= 4 * D.epsilon()) np.random.seed(42) sample_points = D.array(np.random.uniform(a.t[0], a.t[-1], 1024)) assert (D.max( D.abs(a.sol(sample_points) - analytic_soln(sample_points, y_init).T)).item() <= D.epsilon()** 0.5)
def test_integration_and_representation(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.stack([ c2 * D.sin(D.to_float(D.asarray(t))) + c1 * D.cos(D.to_float(D.asarray(t))) + D.asarray(t), c2 * D.cos(D.to_float(D.asarray(t))) - c1 * D.sin(D.to_float(D.asarray(t))) + 1 ]) def kbinterrupt_cb(ode_sys): if ode_sys[-1][0] > D.pi: raise KeyboardInterrupt("Test Interruption and Catching") y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2 * D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) a.integrate() try: print(str(a)) print(repr(a)) assert (D.max(D.abs(a.sol(a.t[0]) - y_init)) <= 8 * D.epsilon()**0.5) assert (D.max( D.abs(a.sol(a.t[-1]) - analytic_soln(a.t[-1], y_init))) <= 8 * D.epsilon()**0.5) assert (D.max(D.abs(a.sol(a.t).T - analytic_soln(a.t, y_init))) <= 8 * D.epsilon()**0.5) except: raise
def test_event_detection(): for ffmt in D.available_float_fmt(): if ffmt == 'float16': continue D.set_float_fmt(ffmt) print("Testing event detection for float format {}".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.array([ c2 * D.sin(t) + c1 * D.cos(t) + t, c2 * D.cos(t) - c1 * D.sin(t) + 1 ]) y_init = D.array([1., 0.]) def time_event(t, y, **kwargs): return t - D.pi/8 time_event.is_terminal = True time_event.direction = 0 a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, D.pi/4), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer: for i in sorted(set(de.available_methods(False).values()), key=lambda x:x.__name__): try: a.set_method(i) print("Testing {}".format(a.integrator)) a.integrate(eta=True, events=time_event) if D.abs(a.t[-1] - D.pi/8) > 10*D.epsilon(): print("Event detection with integrator {} failed with t[-1] = {}".format(a.integrator, a.t[-1])) raise RuntimeError("Failed to detect event for integrator {}".format(str(i))) else: print("Event detection with integrator {} succeeded with t[-1] = {}".format(a.integrator, a.t[-1])) a.reset() except Exception as e: raise e raise RuntimeError("Test failed for integration method: {}".format(a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_integration_and_representation(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.stack([ c2 * D.sin(D.to_float(D.asarray(t))) + c1 * D.cos(D.to_float(D.asarray(t))) + D.asarray(t), c2 * D.cos(D.to_float(D.asarray(t))) - c1 * D.sin(D.to_float(D.asarray(t))) + 1 ]) y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2*D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) assert(a.integration_status() == "Integration has not been run.") a.integrate() assert(a.integration_status() == "Integration completed successfully.") try: print(str(a)) print(repr(a)) assert(D.max(D.abs(a.sol(a.t[0]) - y_init)) <= 8*D.epsilon()**0.5) assert(D.max(D.abs(a.sol(a.t[-1]) - analytic_soln(a.t[-1], y_init))) <= 8*D.epsilon()**0.5) assert(D.max(D.abs(a.sol(a.t).T - analytic_soln(a.t, y_init))) <= 8*D.epsilon()**0.5) except: raise for i in a: assert(D.max(D.abs(i.y - analytic_soln(i.t, y_init))) <= 8*D.epsilon()**0.5) assert(len(a.y) == len(a)) assert(len(a.t) == len(a))
def test_wrong_tf(ffmt): with pytest.raises(ValueError): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) from . import common (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system() a.tf = 0.0
def test_integration_and_nearest_float_no_dense_output(ffmt): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, k, **kwargs): return de_mat @ state + D.array([0.0, t]) y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=False, t=(0, 2 * D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=dict(k=1.0)) assert (a.integration_status == "Integration has not been run.") a.integrate() assert (a.sol is None) assert (a.integration_status == "Integration completed successfully.") assert (D.abs(a.t[-2] - a[2 * D.pi].t) <= D.abs(a.dt))
def test_integration_and_representation(ffmt): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) from . import common (de_mat, rhs, analytic_soln, y_init, dt, a) = common.set_up_basic_system() assert (a.integration_status == "Integration has not been run.") a.integrate() assert (a.integration_status == "Integration completed successfully.") print(str(a)) print(repr(a)) assert (D.max(D.abs(a.sol(a.t[0]) - y_init)) <= 8 * D.epsilon()**0.5) assert (D.max(D.abs(a.sol(a.t[-1]) - analytic_soln(a.t[-1], y_init))) <= 8 * D.epsilon()**0.5) assert (D.max(D.abs(a.sol(a.t).T - analytic_soln(a.t, y_init))) <= 8 * D.epsilon()**0.5) for i in a: assert (D.max(D.abs(i.y - analytic_soln(i.t, y_init))) <= 8 * D.epsilon()**0.5) assert (len(a.y) == len(a)) assert (len(a.t) == len(a))
def test_gradients_simple_decay(ffmt, integrator, use_richardson_extrapolation, device): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) if integrator.__symplectic__: pytest.skip( "Exponential decay system is not in the form compatible with symplectic integrators" ) print("Testing {} float format".format(D.float_fmt())) import torch torch.set_printoptions(precision=17) device = torch.device(device) torch.autograd.set_detect_anomaly(False) # Enable if a test fails def rhs(t, state, k, **kwargs): return -k * state def rhs_jac(t, state, k, **kwargs): return -k rhs.jac = rhs_jac y_init = D.array(5.0, requires_grad=True) csts = dict(k=D.array(1.0, device=device)) dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))), 5e-2) def true_solution_decay(t, initial_state, k): return initial_state * D.exp(-k * t) method = integrator if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) with de.utilities.BlockTimer(section_label="Integrator Tests"): y_init = D.ones((1, ), requires_grad=True).to(device) y_init = y_init * D.e a = de.OdeSystem(rhs, y_init, t=(0, 1.0), dt=dt, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=csts) a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True) Jy = D.jacobian(a.y[-1], a.y[0]) true_Jy = D.jacobian(true_solution_decay(a.t[-1], a.y[0], **csts), a.y[0]) print(a.y[-1], true_solution_decay(a.t[-1], a.y[0], **csts), D.abs(a.y[-1] - true_solution_decay(a.t[-1], a.y[0], **csts))) print(a.integrator.adaptive, D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))), D.norm(true_Jy - Jy), 32 * a.rtol) print(a.integrator.adaptive, true_Jy, Jy) if a.integrator.adaptive: assert (D.allclose(true_Jy, Jy, rtol=32 * a.rtol, atol=32 * a.atol)) print("{} method test succeeded!".format(a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_getter_setters(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0],[-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.array([ c2 * D.sin(t) + c1 * D.cos(t) + t, c2 * D.cos(t) - c1 * D.sin(t) + 1 ]) def kbinterrupt_cb(ode_sys): if ode_sys[-1][0] > D.pi: raise KeyboardInterrupt("Test Interruption and Catching") y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2*D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) assert(a.t0 == 0) assert(a.tf == 2 * D.pi) assert(a.dt == 0.01) assert(a.get_current_time() == a.t0) assert(a.rtol == D.epsilon()**0.5) assert(a.atol == D.epsilon()**0.5) assert(D.norm(a.y[0] - y_init) <= 2 * D.epsilon()) assert(D.norm(a.y[-1] - y_init) <= 2 * D.epsilon()) a.set_kick_vars([True, False]) assert(a.staggered_mask == [True, False]) pval = 3 * D.pi a.tf = pval assert(a.tf == pval) pval = -1.0 a.t0 = pval assert(a.t0 == pval) assert(a.dt == 0.01) a.rtol = 1e-3 assert(a.rtol == 1e-3) a.atol = 1e-3 assert(a.atol == 1e-3) for method in de.available_methods(): a.set_method(method) assert(isinstance(a.integrator, de.available_methods(False)[method])) for method in de.available_methods(): a.method = method assert(isinstance(a.integrator, de.available_methods(False)[method])) a.constants['k'] = 5.0 assert(a.constants['k'] == 5.0) a.constants.pop('k') assert('k' not in a.constants.keys()) new_constants = dict(k=10.0) a.constants = new_constants assert(a.constants['k'] == 10.0) del a.constants assert(not bool(a.constants))
def test_event_detection_numerous_events(ffmt, integrator, use_richardson_extrapolation, device, dt, dense_output=False): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(False) # Enable if a test fails device = torch.device(device) print("Testing event detection for float format {}".format(D.float_fmt())) from .common import set_up_basic_system de_mat, rhs, analytic_soln, y_init, _, _ = set_up_basic_system( integrator, hook_jacobian=True) if D.backend() == 'torch': y_init = y_init.to(device) event_times = D.linspace(0, D.pi / 4, 32) class ev_proto: def __init__(self, ev_time, component): self.ev_time = ev_time self.component = component def __call__(self, t, y, **csts): return y[self.component] - analytic_soln(self.ev_time, y_init)[self.component] def __repr__(self): return "<ev_proto({}, {})>".format(self.ev_time, self.component) events = [ev_proto(ev_t, 0) for ev_t in event_times] a = de.OdeSystem(rhs, y0=y_init, dense_output=dense_output, t=(0, D.pi / 4), dt=dt, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.75) method = integrator if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer: a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True, events=events) print(a) print(a.events) print(len(events) - len(a.events)) assert (len(events) - 3 <= len(a.events) <= len(events)) for ev_detected in a.events: assert (D.max( D.abs( ev_detected.event(ev_detected.t, ev_detected.y, ** a.constants))) <= 4 * D.epsilon())
def test_float_formats(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.array([ c2 * D.sin(t) + c1 * D.cos(t) + t, c2 * D.cos(t) - c1 * D.sin(t) + 1 ]) def kbinterrupt_cb(ode_sys): if ode_sys[-1][0] > D.pi: raise KeyboardInterrupt("Test Interruption and Catching") y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2 * D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) with de.utilities.BlockTimer( section_label="Integrator Tests") as sttimer: for i in sorted(set(de.available_methods(False).values()), key=lambda x: x.__name__): if "Heun-Euler" in i.__name__ and D.float_fmt( ) == "gdual_real128": print( "skipping {} due to ridiculous timestep requirements.". format(i)) continue try: a.set_method(i) print("Testing {}".format(a.integrator)) try: a.integrate(callback=kbinterrupt_cb, eta=True) except KeyboardInterrupt as e: pass try: a.integrate(eta=True) except: raise max_diff = D.max( D.abs(analytic_soln(a.t[-1], a.y[0]) - a.y[-1])) if a.method.__adaptive__ and max_diff >= a.atol * 10 + D.epsilon( ): print( "{} Failed with max_diff from analytical solution = {}" .format(a.integrator, max_diff)) raise RuntimeError( "Failed to meet tolerances for adaptive integrator {}" .format(str(i))) else: print( "{} Succeeded with max_diff from analytical solution = {}" .format(a.integrator, max_diff)) a.reset() except Exception as e: print(e) raise RuntimeError( "Test failed for integration method: {}".format( a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_set_float_fmt(ffmt): D.set_float_fmt(ffmt) assert (D.float_fmt() == str(ffmt))
def test_default_float_fmt(ffmt): D.set_float_fmt(ffmt) assert (D.array(1.0).dtype == D.float_fmts[D.float_fmt()])
def test_event_detection_single(ffmt, integrator, use_richardson_extrapolation, device, dt, dense_output=False): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(False) # Enable if a test fails device = torch.device(device) print("Testing event detection for float format {}".format(D.float_fmt())) from .common import set_up_basic_system de_mat, rhs, analytic_soln, y_init, _, _ = set_up_basic_system( integrator, hook_jacobian=True) if D.backend() == 'torch': y_init = y_init.to(device) def time_event(t, y, **kwargs): out = D.array(t - D.pi / 8) if D.backend() == 'torch': out = out.to(device) return out time_event.is_terminal = True time_event.direction = 0 a = de.OdeSystem(rhs, y0=y_init, dense_output=dense_output, t=(0, D.pi / 4), dt=dt, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.75) a.set_kick_vars(D.array([0, 1], dtype=bool)) method = integrator if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer: a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True, events=time_event) assert (a.integration_status == "Integration terminated upon finding a triggered event.") print(a) print(a.events) assert (D.abs(a.t[-1] - D.pi / 8) <= D.epsilon()**0.5) assert (len(a.events) == 1) assert (a.events[0].event == time_event) print( "Event detection with integrator {} succeeded with t[-1] = {}, diff = {}" .format(a.integrator, a.t[-1], a.t[-1] - D.pi / 8))
def test_gradients_complex(ffmt, integrator, use_richardson_extrapolation, device): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) import torch torch.set_printoptions(precision=17) device = torch.device(device) torch.autograd.set_detect_anomaly(False) # Enable if a test fails class NNController(torch.nn.Module): def __init__(self, in_dim=2, out_dim=2, inter_dim=50, append_time=False): super().__init__() self.append_time = append_time self.net = torch.nn.Sequential( torch.nn.Linear(in_dim + (1 if append_time else 0), inter_dim), torch.nn.Softplus(), torch.nn.Linear(inter_dim, out_dim), torch.nn.Sigmoid()) for idx, m in enumerate(self.net.modules()): if isinstance(m, torch.nn.Linear): torch.nn.init.xavier_normal_(m.weight, gain=1.0) torch.nn.init.constant_(m.bias, 0.0) def forward(self, t, y, dy): if self.append_time: return self.net( torch.cat([y.view(-1), dy.view(-1), t.view(-1)])) else: return self.net(torch.cat([y, dy])) class SimpleODE(torch.nn.Module): def __init__(self, inter_dim=10, k=1.0): super().__init__() self.nn_controller = NNController(in_dim=4, out_dim=1, inter_dim=inter_dim) self.A = torch.nn.Parameter( torch.tensor([[0.0, 1.0], [-k, -1.0]], requires_grad=False)) def forward(self, t, y, params=None): if not isinstance(t, torch.Tensor): torch_t = torch.tensor(t) else: torch_t = t if not isinstance(y, torch.Tensor): torch_y = torch.tensor(y) else: torch_y = y if params is not None: if not isinstance(params, torch.Tensor): torch_params = torch.tensor(params) else: torch_params = params dy = torch.matmul(self.A, torch_y) controller_effect = self.nn_controller( torch_t, torch_y, dy) if params is None else params return dy + torch.cat( [torch.tensor([0.0]).to(dy), (controller_effect * 2.0 - 1.0)]) method = integrator if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) with de.utilities.BlockTimer(section_label="Integrator Tests"): yi1 = D.array([1.0, 0.0], requires_grad=True).to(device) df = SimpleODE(k=1.0) a = de.OdeSystem(df, yi1, t=(0, 0.1), dt=1e-3, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True) dyfdyi = D.jacobian(a.y[-1], a.y[0]) dyi = D.array([0.0, 1.0]).to(device) * D.epsilon()**0.5 dyf = D.einsum("nk,k->n", dyfdyi, dyi) yi2 = yi1 + dyi print(a.y[-1].device) b = de.OdeSystem(df, yi2, t=(0, a.t[-1]), dt=a.dt, rtol=a.rtol, atol=a.atol) b.set_method(method) b.integrate(eta=True) true_diff = b.y[-1] - a.y[-1] print(D.norm(true_diff - dyf), D.epsilon()**0.5) assert (D.allclose(true_diff, dyf, rtol=4 * a.rtol, atol=4 * a.atol)) print("{} method test succeeded!".format(a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_gradients(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) import torch torch.set_printoptions(precision=17) torch.set_num_threads(1) torch.autograd.set_detect_anomaly(True) class NNController(torch.nn.Module): def __init__(self, in_dim=2, out_dim=2, inter_dim=50, append_time=False): super().__init__() self.append_time = append_time self.net = torch.nn.Sequential( torch.nn.Linear(in_dim + (1 if append_time else 0), inter_dim), torch.nn.Softplus(), torch.nn.Linear(inter_dim, out_dim), torch.nn.Sigmoid()) for idx, m in enumerate(self.net.modules()): if isinstance(m, torch.nn.Linear): torch.nn.init.xavier_normal_(m.weight, gain=1.0) torch.nn.init.constant_(m.bias, 0.0) def forward(self, t, y, dy): if self.append_time: return self.net( torch.cat([y.view(-1), dy.view(-1), t.view(-1)])) else: return self.net(torch.cat([y, dy])) class SimpleODE(torch.nn.Module): def __init__(self, inter_dim=10, k=1.0): super().__init__() self.nn_controller = NNController(in_dim=4, out_dim=1, inter_dim=inter_dim) self.A = torch.tensor([[0.0, 1.0], [-k, -1.0]], requires_grad=True) def forward(self, t, y, params=None): if not isinstance(t, torch.Tensor): torch_t = torch.tensor(t) else: torch_t = t if not isinstance(y, torch.Tensor): torch_y = torch.tensor(y) else: torch_y = y if params is not None: if not isinstance(params, torch.Tensor): torch_params = torch.tensor(params) else: torch_params = params dy = torch.matmul(self.A, torch_y) controller_effect = self.nn_controller( torch_t, torch_y, dy) if params is None else params return dy + torch.cat( [torch.tensor([0.0]), (controller_effect * 2.0 - 1.0)]) with de.utilities.BlockTimer(section_label="Integrator Tests"): for i in sorted(set(de.available_methods(False).values()), key=lambda x: x.__name__): try: yi1 = D.array([1.0, 0.0], requires_grad=True) df = SimpleODE(k=1.0) a = de.OdeSystem(df, yi1, t=(0, 1.), dt=0.0675, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) a.set_method(i) a.integrate(eta=True) dyfdyi = D.jacobian(a.y[-1], a.y[0]) dyi = D.array([0.0, 1.0]) * D.epsilon()**0.5 dyf = D.einsum("nk,k->n", dyfdyi, dyi) yi2 = yi1 + dyi b = de.OdeSystem(df, yi2, t=(0, 1.), dt=0.0675, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) b.set_method(i) b.integrate(eta=True) true_diff = b.y[-1] - a.y[-1] print(D.norm(true_diff - dyf), D.epsilon()**0.5) assert (D.allclose(true_diff, dyf, rtol=4 * D.epsilon()**0.5, atol=4 * D.epsilon()**0.5)) print("{} method test succeeded!".format(a.integrator)) except: raise RuntimeError( "Test failed for integration method: {}".format( a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_getter_setters(ffmt): D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(True) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): return de_mat @ state + D.array([0.0, t]) y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2 * D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) assert (a.t0 == 0) assert (a.tf == 2 * D.pi) assert (a.dt == 0.01) assert (a.get_current_time() == a.t0) assert (a.rtol == D.epsilon()**0.5) assert (a.atol == D.epsilon()**0.5) assert (D.norm(a.y[0] - y_init) <= 2 * D.epsilon()) assert (D.norm(a.y[-1] - y_init) <= 2 * D.epsilon()) a.set_kick_vars([True, False]) assert (a.staggered_mask == [True, False]) pval = 3 * D.pi a.tf = pval assert (a.tf == pval) pval = -1.0 a.t0 = pval assert (a.t0 == pval) assert (a.dt == 0.01) a.rtol = 1e-3 assert (a.rtol == 1e-3) a.atol = 1e-3 assert (a.atol == 1e-3) for method in de.available_methods(): a.set_method(method) assert (isinstance(a.integrator, de.available_methods(False)[method])) for method in de.available_methods(): a.method = method assert (isinstance(a.integrator, de.available_methods(False)[method])) a.constants['k'] = 5.0 assert (a.constants['k'] == 5.0) a.constants.pop('k') assert ('k' not in a.constants.keys()) new_constants = dict(k=10.0) a.constants = new_constants assert (a.constants['k'] == 10.0) del a.constants assert (not bool(a.constants))
def test_gradients_simple_oscillator(ffmt, integrator, use_richardson_extrapolation, device): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) import torch torch.set_printoptions(precision=17) device = torch.device(device) torch.autograd.set_detect_anomaly(False) # Enable if a test fails def rhs(t, state, k, m, **kwargs): return D.array([[0.0, 1.0], [-k / m, 0.0]], device=device) @ state csts = dict(k=1.0, m=1.0) T = 2 * D.pi * D.sqrt(D.array(csts['m'] / csts['k'])).to(device) dt = max(0.5 * (D.epsilon()**0.5)**(1.0 / (max(2, integrator.order - 1))), 5e-2) def true_solution_sho(t, initial_state, k, m): w2 = D.array(k / m).to(device) w = D.sqrt(w2) A = D.sqrt(initial_state[0]**2 + initial_state[1]**2 / w2) phi = D.atan2(-initial_state[1], w * initial_state[0]) return D.stack([A * D.cos(w * t + phi), -w * A * D.sin(w * t + phi)]).T method = integrator if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) with de.utilities.BlockTimer(section_label="Integrator Tests"): y_init = D.array([1., 1.], requires_grad=True).to(device) a = de.OdeSystem(rhs, y_init, t=(0, T), dt=T * dt, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5, constants=csts) a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True) Jy = D.jacobian(a.y[-1], a.y[0]) true_Jy = D.jacobian(true_solution_sho(a.t[-1], a.y[0], **csts), a.y[0]) print(a.integrator.adaptive, D.mean(D.abs(D.stack(a.t[1:]) - D.stack(a.t[:-1]))), D.norm(true_Jy - Jy), D.epsilon()**0.5) if a.integrator.adaptive: assert (D.allclose(true_Jy, Jy, rtol=4 * a.rtol**0.75, atol=4 * a.atol**0.75)) print("{} method test succeeded!".format(a.integrator)) print("") print("{} backend test passed successfully!".format(D.backend()))
def test_getter_setters(): for ffmt in D.available_float_fmt(): D.set_float_fmt(ffmt) print("Testing {} float format".format(D.float_fmt())) de_mat = D.array([[0.0, 1.0], [-1.0, 0.0]]) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): return de_mat @ state + D.array([0.0, t]) def analytic_soln(t, initial_conditions): c1 = initial_conditions[0] c2 = initial_conditions[1] - 1 return D.array([ c2 * D.sin(t) + c1 * D.cos(t) + t, c2 * D.cos(t) - c1 * D.sin(t) + 1 ]) def kbinterrupt_cb(ode_sys): if ode_sys[-1][0] > D.pi: raise KeyboardInterrupt("Test Interruption and Catching") y_init = D.array([1., 0.]) a = de.OdeSystem(rhs, y0=y_init, dense_output=True, t=(0, 2 * D.pi), dt=0.01, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) assert (a.get_end_time() == 2 * D.pi) assert (a.get_start_time() == 0) assert (a.dt == 0.01) assert (a.rtol == D.epsilon()**0.5) assert (a.atol == D.epsilon()**0.5) assert (D.norm(a.y[0] - y_init) <= 2 * D.epsilon()) assert (D.norm(a.y[-1] - y_init) <= 2 * D.epsilon()) try: a.set_kick_vars([True, False]) except Exception as e: raise RuntimeError("set_kick_vars failed with: {}".format(e)) assert (a.staggered_mask == [True, False]) pval = 3 * D.pi try: a.set_end_time(pval) except Exception as e: raise RuntimeError("set_end_time failed with: {}".format(e)) assert (a.get_end_time() == pval) pval = -1.0 try: a.set_start_time(pval) except Exception as e: raise RuntimeError("set_start_time failed with: {}".format(e)) assert (a.get_start_time() == pval) assert (a.get_step_size() == 0.01) try: a.set_rtol(1e-3) except Exception as e: raise RuntimeError("set_rtol failed with: {}".format(e)) assert (a.get_rtol() == 1e-3) try: a.set_atol(1e-3) except Exception as e: raise RuntimeError("set_atol failed with: {}".format(e)) assert (a.get_atol() == 1e-3) try: a.set_method("RK45CK") except Exception as e: raise RuntimeError("set_method failed with: {}".format(e)) assert (isinstance(a.integrator, de.available_methods(False)["RK45CK"])) try: a.add_constants(k=5.0) except Exception as e: raise RuntimeError("add_constants failed with: {}".format(e)) assert (a.consts['k'] == 5.0) try: a.remove_constants('k') except Exception as e: raise RuntimeError("remove_constants failed with: {}".format(e)) assert ('k' not in a.consts.keys())
def test_float_formats_atypical_shape(ffmt, integrator, use_richardson_extrapolation, device): if use_richardson_extrapolation and integrator.__implicit__: pytest.skip( "Richardson Extrapolation is too slow with implicit methods") D.set_float_fmt(ffmt) if D.backend() == 'torch': import torch torch.set_printoptions(precision=17) torch.autograd.set_detect_anomaly(False) # Enable if a test fails device = torch.device(device) print("Testing {} float format".format(D.float_fmt())) from .common import set_up_basic_system de_mat, _, analytic_soln, y_init, dt, _ = set_up_basic_system(integrator) @de.rhs_prettifier("""[vx, -x+t]""") def rhs(t, state, **kwargs): nonlocal de_mat extra = D.array([0.0, t]) if D.backend() == 'torch': de_mat = de_mat.to(state.device) extra = extra.to(state.device) return D.sum(de_mat[:, :, None, None, None] * state, axis=1) + extra[:, None, None, None] y_init = D.array([[[[1., 0.]] * 1] * 1] * 3).T print(rhs(0.0, y_init).shape) if D.backend() == 'torch': y_init = y_init.contiguous().to(device) a = de.OdeSystem(rhs, y0=y_init, dense_output=False, t=(0, D.pi / 4), dt=D.pi / 64, rtol=D.epsilon()**0.5, atol=D.epsilon()**0.5) method = integrator method_tolerance = a.atol * 10 + D.epsilon() if use_richardson_extrapolation: method = de.integrators.generate_richardson_integrator(method) method_tolerance = method_tolerance * 5 with de.utilities.BlockTimer(section_label="Integrator Tests") as sttimer: a.set_method(method) print("Testing {} with dt = {:.4e}".format(a.integrator, a.dt)) a.integrate(eta=True) max_diff = D.max(D.abs(analytic_soln(a.t[-1], y_init) - a.y[-1])) if a.integrator.adaptive: assert max_diff <= method_tolerance, "{} Failed with max_diff from analytical solution = {}".format( a.integrator, max_diff) a.reset() print("") print("{} backend test passed successfully!".format(D.backend()))
def test_backend(): try: import desolver as de import desolver.backend as D import numpy as np import scipy if "DES_BACKEND" in os.environ: assert(D.backend() == os.environ['DES_BACKEND']) if D.backend() not in ['torch']: # Default datatype test for i in D.available_float_fmt(): D.set_float_fmt(i) assert(D.array(1.0).dtype == D.float_fmts[D.float_fmt()]) expected_eps = {'float16': 5e-3, 'float32': 5e-7, 'float64': 5e-16, 'gdual_double': 5e-16, 'gdual_vdouble': 5e-16, 'gdual_real128': 5e-16} test_array = np.array([1], dtype=np.int64) # Test Function Evals for i in D.available_float_fmt(): D.set_float_fmt(i) assert(D.float_fmt() == str(i)) assert(D.epsilon() == expected_eps[str(i)]) assert(isinstance(D.available_float_fmt(), list)) if not i.startswith('gdual'): assert(D.cast_to_float_fmt(test_array).dtype == str(i)) arr1 = D.array([[2.0, 1.0],[1.0, 0.0]]) arr2 = D.array([[1.0, 1.0],[-1.0, 1.0]]) if not i.startswith('gdual'): arr3 = D.contract_first_ndims(arr1, arr2, 1) arr4 = D.contract_first_ndims(arr1, arr2, 2) true_arr3 = D.array([1.0, 1.0]) true_arr4 = D.array(2.) assert(D.norm(arr3 - true_arr3) <= 2 * D.epsilon()) assert(D.norm(arr4 - true_arr4) <= 2 * D.epsilon()) de.utilities.warning("Testing float format {}".format(D.float_fmt())) pi = D.to_float(D.pi) assert(np.pi - 2*D.epsilon() <= pi <= np.pi + 2*D.epsilon()) assert(np.e - 2*D.epsilon() <= D.to_float(D.e) <= np.e + 2*D.epsilon()) assert(np.euler_gamma - 2*D.epsilon() <= D.to_float(D.euler_gamma) <= np.euler_gamma + 2*D.epsilon()) assert(-2*D.epsilon() <= D.sin(pi) <= 2*D.epsilon()) assert(-2*D.epsilon() <= D.cos(pi)+1 <= 2*D.epsilon()) assert(-2*D.epsilon() <= D.tan(pi) <= 2*D.epsilon()) assert(D.asin(D.to_float(1)) == pi/2) assert(D.acos(D.to_float(1)) == 0) assert(D.atan(D.to_float(1)) == pi/4) assert(D.atan2(D.to_float(1), D.to_float(1)) == pi/4) assert(D.sinh(pi) == np.sinh(pi)) assert(D.cosh(pi) == np.cosh(pi)) assert(D.tanh(pi) == np.tanh(pi)) assert(-3.141592653589793 - 2*D.epsilon() <= D.neg(pi) <= -3.141592653589793 + 2*D.epsilon()) assert(31.00627668029982 - 10*D.epsilon() <= D.pow(pi,3) <= 31.00627668029982 + 10*D.epsilon()) assert(3.141592653589793 - 2*D.epsilon() <= D.abs(pi) <= 3.141592653589793 + 2*D.epsilon()) assert(1.77245385090551603 - 2*D.epsilon() <= D.sqrt(pi) <= 1.77245385090551603 + 2*D.epsilon()) assert(23.1406926327792690 - 10*D.epsilon()<= D.exp(pi) <= 23.1406926327792690 + 10*D.epsilon()) assert(22.1406926327792690 - 10*D.epsilon()<= D.expm1(pi) <= 22.1406926327792690 + 10*D.epsilon()) assert(1.14472988584940017 - 2*D.epsilon() <= D.log(pi) <= 1.14472988584940017 + 2*D.epsilon()) assert(1.14472988584940017 - 2*D.epsilon() <= D.log(pi) <= 1.14472988584940017 + 2*D.epsilon()) assert(0.49714987269413385 - 2*D.epsilon() <= D.log10(pi) <= 0.49714987269413385 + 2*D.epsilon()) assert(1.42108041279429263 - 2*D.epsilon() <= D.log1p(pi) <= 1.42108041279429263 + 2*D.epsilon()) assert(1.65149612947231880 - 2*D.epsilon() <= D.log2(pi) <= 1.65149612947231880 + 2*D.epsilon()) assert(4.14159265358979324 - 2*D.epsilon() <= D.add(pi,1) <= 4.14159265358979324 + 2*D.epsilon()) assert(2.14159265358979324 - 2*D.epsilon() <= D.sub(pi,1) <= 2.14159265358979324 + 2*D.epsilon()) assert(D.div(pi,1) == pi) assert(D.mul(pi,1) == pi) assert(0.31830988618379067 - 2*D.epsilon() <= D.reciprocal(pi) <= 0.31830988618379067 + 2*D.epsilon()) if not i.startswith('gdual'): assert(0.14159265358979324 - 2*D.epsilon() <= D.remainder(pi,3) <= 0.14159265358979324 + 2*D.epsilon()) assert(D.ceil(pi) == 4) assert(D.floor(pi) == 3) assert(D.round(pi) == 3) assert(1.1415926535897931 - 2*D.epsilon() <= D.fmod(pi,2) <= 1.1415926535897931 + 2*D.epsilon()) assert(D.clip(pi,1,2) == 2) assert(D.sign(pi) == 1) assert(D.trunc(pi) == 3) assert(0.9772133079420067 - 2*D.epsilon() <= D.digamma(pi) <= 0.9772133079420067 + 2*D.epsilon()) assert(0.4769362762044699 - 2*D.epsilon() <= D.erfinv(D.to_float(0.5)) <= 0.4769362762044699 + 2*D.epsilon()) assert(1.7891115385869942 - 2*D.epsilon() <= D.mvlgamma(pi, 2) <= 1.7891115385869942 + 2*D.epsilon()) assert(D.frac(pi) == pi - 3) assert(0.9999911238536324 - 2*D.epsilon() <= D.erf(pi) <= 0.9999911238536324 + 2*D.epsilon()) assert(8.8761463676416054e-6 - 2*D.epsilon() <= D.erfc(pi) <= 8.8761463676416054e-6 + 2*D.epsilon()) assert(0.9585761678336372 - 2*D.epsilon() <= D.sigmoid(pi) <= 0.9585761678336372 + 2*D.epsilon()) assert(0.5641895835477563 - 2*D.epsilon() <= D.rsqrt(pi) <= 0.5641895835477563 + 2*D.epsilon()) assert(pi + 0.5 - 2*D.epsilon() <= D.lerp(pi,pi+1,0.5) <= pi + 0.5 + 2*D.epsilon()) assert(D.addcdiv(pi,1,D.to_float(3),D.to_float(2)) == pi + (1 * (3 / 2))) assert(D.addcmul(pi,1,D.to_float(3),D.to_float(2)) == pi + (1 * (3 * 2))) if not i.startswith('gdual'): assert(-2*D.epsilon() <= D.einsum("nm->", D.array([[1.0, 2.0], [-2.0, -1.0]])) <= 2*D.epsilon()) except: print("{} Backend Test Failed".format(D.backend())) raise print("{} Backend Test Succeeded".format(D.backend()))