def test_auto_regression_operator_on_ode_with_isolated_perturbations(): set_random_seed(0) diff_eq = LotkaVolterraEquation(2., 1., .8, 1.) cp = ConstrainedProblem(diff_eq) ic = ContinuousInitialCondition(cp, lambda _: np.array([1., 2.])) ivp = InitialValueProblem(cp, (0., 10.), ic) oracle = ODEOperator('DOP853', .001) ref_solution = oracle.solve(ivp) ml_op = AutoRegressionOperator(2.5, True) ml_op.train(ivp, oracle, RandomForestRegressor(), 25, lambda t, y: y + np.random.normal(0., .01, size=y.shape), isolate_perturbations=True) ml_solution = ml_op.solve(ivp) assert ml_solution.vertex_oriented assert ml_solution.d_t == 2.5 assert ml_solution.discrete_y().shape == (4, 2) diff = ref_solution.diff([ml_solution]) assert np.all(diff.matching_time_points == np.linspace(2.5, 10., 4)) assert np.max(np.abs(diff.differences[0])) < .01
def test_pidon_operator_on_spherical_pde(): set_random_seed(0) diff_eq = DiffusionEquation(3) mesh = Mesh( [(1., 11.), (0., 2 * np.pi), (.25 * np.pi, .75 * np.pi)], [2., np.pi / 5., np.pi / 4], CoordinateSystem.SPHERICAL) bcs = [ (DirichletBoundaryCondition( lambda x, t: np.ones((len(x), 1)), is_static=True), DirichletBoundaryCondition( lambda x, t: np.full((len(x), 1), 1. / 11.), is_static=True)), (NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 1)), is_static=True), NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 1)), is_static=True)), (NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 1)), is_static=True), NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 1)), is_static=True)) ] cp = ConstrainedProblem(diff_eq, mesh, bcs) ic = ContinuousInitialCondition(cp, lambda x: 1. / x[:, :1]) t_interval = (0., .5) ivp = InitialValueProblem(cp, t_interval, ic) sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .001, True) training_loss_history, test_loss_history = pidon.train( cp, t_interval, training_data_args=DataArgs( y_0_functions=[ic.y_0], n_domain_points=20, n_boundary_points=10, n_batches=1 ), model_args=ModelArgs( latent_output_size=20, branch_hidden_layer_sizes=[30, 30], trunk_hidden_layer_sizes=[30, 30], ), optimization_args=OptimizationArgs( optimizer=optimizers.Adam(learning_rate=2e-5), epochs=3, verbose=False ) ) assert len(training_loss_history) == 3 for i in range(2): assert np.all( training_loss_history[i + 1].weighted_total_loss.numpy() < training_loss_history[i].weighted_total_loss.numpy()) solution = pidon.solve(ivp) assert solution.d_t == .001 assert solution.discrete_y().shape == (500, 6, 11, 3, 1)
def test_pidon_operator_on_ode_with_analytic_solution(): set_random_seed(0) r = 4. y_0 = 1. diff_eq = PopulationGrowthEquation(r) cp = ConstrainedProblem(diff_eq) t_interval = (0., .25) ic = ContinuousInitialCondition(cp, lambda _: np.array([y_0])) sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .001, True) training_loss_history, test_loss_history = pidon.train( cp, t_interval, training_data_args=DataArgs( y_0_functions=[ic.y_0], n_domain_points=25, n_batches=5, n_ic_repeats=5 ), model_args=ModelArgs( latent_output_size=1, trunk_hidden_layer_sizes=[50, 50, 50], ), optimization_args=OptimizationArgs( optimizer=optimizers.SGD(), epochs=100, verbose=False ), secondary_optimization_args=SecondaryOptimizationArgs( max_iterations=100, verbose=False ) ) assert len(training_loss_history) == 101 assert len(test_loss_history) == 0 assert training_loss_history[-1].weighted_total_loss.numpy() < 5e-5 ivp = InitialValueProblem( cp, t_interval, ic, lambda _ivp, t, x: np.array([y_0 * np.e ** (r * t)]) ) solution = pidon.solve(ivp) assert solution.d_t == .001 assert solution.discrete_y().shape == (250, 1) analytic_y = np.array([ivp.exact_y(t) for t in solution.t_coordinates]) assert np.mean(np.abs(analytic_y - solution.discrete_y())) < 1e-3 assert np.max(np.abs(analytic_y - solution.discrete_y())) < 2.5e-3
def test_pidon_operator_on_pde_system(): set_random_seed(0) diff_eq = NavierStokesEquation() mesh = Mesh([(-2.5, 2.5), (0., 4.)], [1., 1.]) ic_function = vectorize_ic_function(lambda x: [ 2. * x[0] - 4., 2. * x[0] ** 2 + 3. * x[1] - x[0] * x[1] ** 2, 4. * x[0] - x[1] ** 2, 2. * x[0] * x[1] - 3. ]) bcs = [ (DirichletBoundaryCondition( lambda x, t: ic_function(x), is_static=True), DirichletBoundaryCondition( lambda x, t: ic_function(x), is_static=True)) ] * 2 cp = ConstrainedProblem(diff_eq, mesh, bcs) ic = ContinuousInitialCondition(cp, ic_function) t_interval = (0., .5) ivp = InitialValueProblem(cp, t_interval, ic) sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .001, True) training_loss_history, test_loss_history = pidon.train( cp, t_interval, training_data_args=DataArgs( y_0_functions=[ic.y_0], n_domain_points=20, n_boundary_points=10, n_batches=1 ), model_args=ModelArgs( latent_output_size=20, branch_hidden_layer_sizes=[20, 20], trunk_hidden_layer_sizes=[20, 20], ), optimization_args=OptimizationArgs( optimizer=optimizers.Adam(learning_rate=1e-5), epochs=3, verbose=False ) ) assert len(training_loss_history) == 3 for i in range(2): assert np.all( training_loss_history[i + 1].weighted_total_loss.numpy() < training_loss_history[i].weighted_total_loss.numpy()) solution = pidon.solve(ivp) assert solution.d_t == .001 assert solution.discrete_y().shape == (500, 6, 5, 4)
def test_pidon_operator_in_ar_mode_on_pde(): set_random_seed(0) diff_eq = WaveEquation(1) mesh = Mesh([(0., 1.)], (.2,)) bcs = [ (NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 2)), is_static=True), NeumannBoundaryCondition( lambda x, t: np.zeros((len(x), 2)), is_static=True)), ] cp = ConstrainedProblem(diff_eq, mesh, bcs) t_interval = (0., 1.) ic = BetaInitialCondition(cp, [(3.5, 3.5), (3.5, 3.5)]) ivp = InitialValueProblem(cp, t_interval, ic) training_y_0_functions = [ BetaInitialCondition(cp, [(p, p), (p, p)]).y_0 for p in [2., 3., 4., 5.] ] sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .25, False, auto_regression_mode=True) assert pidon.auto_regression_mode pidon.train( cp, (0., .25), training_data_args=DataArgs( y_0_functions=training_y_0_functions, n_domain_points=50, n_boundary_points=20, n_batches=2 ), model_args=ModelArgs( latent_output_size=50, branch_hidden_layer_sizes=[50, 50], trunk_hidden_layer_sizes=[50, 50], ), optimization_args=OptimizationArgs( optimizer=optimizers.Adam(learning_rate=1e-4), epochs=2, ic_loss_weight=10., verbose=False ) ) sol = pidon.solve(ivp) assert np.allclose(sol.t_coordinates, [.25, .5, .75, 1.]) assert sol.discrete_y().shape == (4, 5, 2)
def test_pidon_operator_in_ar_mode_on_ode(): set_random_seed(0) diff_eq = PopulationGrowthEquation() cp = ConstrainedProblem(diff_eq) t_interval = (0., 1.) ic = ContinuousInitialCondition(cp, lambda _: np.array([1.])) ivp = InitialValueProblem(cp, t_interval, ic) sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .25, True, auto_regression_mode=True) assert pidon.auto_regression_mode pidon.train( cp, (0., .25), training_data_args=DataArgs( y_0_functions=[ lambda _, _y_0=y_0: np.array([_y_0]) for y_0 in np.linspace(.5, 2., 10) ], n_domain_points=50, n_batches=1 ), model_args=ModelArgs( latent_output_size=1, trunk_hidden_layer_sizes=[50, 50, 50], ), optimization_args=OptimizationArgs( optimizer={ 'class_name': 'Adam', 'config': { 'learning_rate': optimizers.schedules.ExponentialDecay( 1e-2, decay_steps=25, decay_rate=.95) } }, epochs=5, verbose=False ) ) sol = pidon.solve(ivp) assert np.allclose(sol.t_coordinates, [.25, .5, .75, 1.]) assert sol.discrete_y().shape == (4, 1)
def test_auto_regression_operator_on_pde(): set_random_seed(0) diff_eq = WaveEquation(2) mesh = Mesh([(-5., 5.), (-5., 5.)], [1., 1.]) bcs = [(DirichletBoundaryCondition(lambda x, t: np.zeros((len(x), 2)), is_static=True), DirichletBoundaryCondition(lambda x, t: np.zeros((len(x), 2)), is_static=True))] * 2 cp = ConstrainedProblem(diff_eq, mesh, bcs) ic = GaussianInitialCondition( cp, [(np.array([0., 2.5]), np.array([[.1, 0.], [0., .1]]))] * 2, [3., .0]) ivp = InitialValueProblem(cp, (0., 10.), ic) oracle = FDMOperator(RK4(), ThreePointCentralDifferenceMethod(), .1) ref_solution = oracle.solve(ivp) ml_op = AutoRegressionOperator(2.5, True) ml_op.train( ivp, oracle, SKLearnKerasRegressor( DeepONet([ np.prod(cp.y_shape(True)).item(), 100, 50, diff_eq.y_dimension * 10 ], [1 + diff_eq.x_dimension, 50, 50, diff_eq.y_dimension * 10], diff_eq.y_dimension), optimizer=optimizers.Adam( learning_rate=optimizers.schedules.ExponentialDecay( 1e-2, decay_steps=500, decay_rate=.95)), batch_size=968, epochs=500, ), 20, lambda t, y: y + np.random.normal(0., t / 75., size=y.shape)) ml_solution = ml_op.solve(ivp) assert ml_solution.vertex_oriented assert ml_solution.d_t == 2.5 assert ml_solution.discrete_y().shape == (4, 11, 11, 2) diff = ref_solution.diff([ml_solution]) assert np.all(diff.matching_time_points == np.linspace(2.5, 10., 4)) assert np.max(np.abs(diff.differences[0])) < .5
from pararealml.utils.rand import set_random_seed from pararealml.utils.tf import use_cpu, use_deterministic_ops use_cpu() use_deterministic_ops() set_random_seed(0)
def test_pidon_operator_on_ode_system(): set_random_seed(0) diff_eq = LotkaVolterraEquation() cp = ConstrainedProblem(diff_eq) t_interval = (0., .5) sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .01, True) training_y_0_functions = [ lambda _: np.array([47.5, 25.]), lambda _: np.array([47.5, 27.5]), lambda _: np.array([50., 22.5]), lambda _: np.array([50., 27.5]), lambda _: np.array([52.5, 22.5]), lambda _: np.array([52.5, 25.]), ] test_y_0_functions = [ lambda _: np.array([47.5, 22.5]), lambda _: np.array([50., 25.]), lambda _: np.array([52.5, 27.5]) ] training_loss_history, test_loss_history = pidon.train( cp, t_interval, training_data_args=DataArgs( y_0_functions=training_y_0_functions, n_domain_points=50, n_batches=3 ), test_data_args=DataArgs( y_0_functions=test_y_0_functions, n_domain_points=20, n_batches=1 ), model_args=ModelArgs( latent_output_size=20, branch_hidden_layer_sizes=[20, 20, 20], trunk_hidden_layer_sizes=[20, 20, 20], ), optimization_args=OptimizationArgs( optimizer={ 'class_name': 'Adam', 'config': { 'learning_rate': 1e-4 } }, epochs=3, ic_loss_weight=2., verbose=False ) ) assert len(training_loss_history) == 3 assert len(test_loss_history) == 3 for i in range(2): assert np.sum( training_loss_history[i + 1].weighted_total_loss.numpy()) < \ np.sum(training_loss_history[i].weighted_total_loss.numpy()) assert np.sum(test_loss_history[i + 1].weighted_total_loss.numpy()) < \ np.sum(test_loss_history[i].weighted_total_loss.numpy()) ic = ContinuousInitialCondition(cp, lambda _: np.array([50., 25.])) ivp = InitialValueProblem(cp, t_interval, ic) solution = pidon.solve(ivp) assert solution.d_t == .01 assert solution.discrete_y().shape == (50, 2)
def test_pidon_operator_on_pde_with_dynamic_boundary_conditions(): set_random_seed(0) diff_eq = DiffusionEquation(1, .25) mesh = Mesh([(0., 1.)], (.1,)) bcs = [ (NeumannBoundaryCondition(lambda x, t: np.full((len(x), 1), t)), NeumannBoundaryCondition(lambda x, t: np.full((len(x), 1), t))), ] cp = ConstrainedProblem(diff_eq, mesh, bcs) t_interval = (0., .5) training_y_0_functions = [ BetaInitialCondition(cp, [(p, p)]).y_0 for p in [2., 3., 4., 5.] ] test_y_0_functions = [ BetaInitialCondition(cp, [(p, p)]).y_0 for p in [2.5, 3.5, 4.5] ] sampler = UniformRandomCollocationPointSampler() pidon = PIDONOperator(sampler, .001, True) training_loss_history, test_loss_history = pidon.train( cp, t_interval, training_data_args=DataArgs( y_0_functions=training_y_0_functions, n_domain_points=50, n_boundary_points=20, n_batches=2 ), test_data_args=DataArgs( y_0_functions=test_y_0_functions, n_domain_points=25, n_boundary_points=10, n_batches=1 ), model_args=ModelArgs( latent_output_size=50, branch_hidden_layer_sizes=[50, 50], trunk_hidden_layer_sizes=[50, 50], ), optimization_args=OptimizationArgs( optimizer={ 'class_name': 'Adam', 'config': { 'learning_rate': 1e-4 } }, epochs=3, ic_loss_weight=10., verbose=False ) ) assert len(training_loss_history) == 3 assert len(test_loss_history) == 3 for i in range(2): assert training_loss_history[i + 1].weighted_total_loss.numpy() < \ training_loss_history[i].weighted_total_loss.numpy() assert test_loss_history[i + 1].weighted_total_loss.numpy() < \ test_loss_history[i].weighted_total_loss.numpy() ic = BetaInitialCondition(cp, [(3.5, 3.5)]) ivp = InitialValueProblem(cp, t_interval, ic) solution = pidon.solve(ivp) assert solution.d_t == .001 assert solution.discrete_y().shape == (500, 11, 1)
import numpy as np from sklearn.ensemble import RandomForestRegressor from pararealml import * from pararealml.operators.ml.auto_regression import * from pararealml.operators.fdm import * from pararealml.utils.rand import SEEDS, set_random_seed set_random_seed(SEEDS[0]) gamma = .01 diff_eq = CahnHilliardEquation(2, gamma=gamma) mesh = Mesh([(0., 50.), (0., 50.)], [1., 1.]) bcs = [(NeumannBoundaryCondition(lambda x, t: np.zeros((len(x), 2)), is_static=True), NeumannBoundaryCondition(lambda x, t: np.zeros((len(x), 2)), is_static=True))] * 2 cp = ConstrainedProblem(diff_eq, mesh, bcs) diff = ThreePointCentralDifferenceMethod() y_0_0 = .05 * np.random.uniform(-1., 1., mesh.vertices_shape + (1, )) y_0_1 = y_0_0**3 - y_0_0 - gamma * diff.laplacian( y_0_0, mesh, cp.create_boundary_constraints(True)[1][:, :1]) ic = DiscreteInitialCondition(cp, np.concatenate([y_0_0, y_0_1], axis=-1), True) ivp = InitialValueProblem(cp, (0., 5.), ic) fdm_op = FDMOperator(CrankNicolsonMethod(), diff, .01) fdm_sol = fdm_op.solve(ivp) fdm_sol_y = fdm_sol.discrete_y(fdm_op.vertex_oriented)