def test_correct_caching2(self): # setup equation eqn = Seir(population=1) # setup ode solver ti = 0. tf = 2. n_steps = 3 rk = RKSolver(ti, tf, n_steps) rk.output_frequency = 1 rk.set_output_storing_flag(True) rk.equation = eqn u0 = np.array([100., 0., 10., 0.]) du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters())) rk.set_initial_condition(u0, du0_dp) rk.set_output_gradient_flag(True) # setup cached simulation object cached_sim = CachedSEIRSimulation(rk) params = np.array([2.3, 0.2, 1. / 3., 1. / 4.]) (f, df) = cached_sim(params) f1 = np.copy(f) df1 = np.copy(df) f *= 0. df += 0. (f2, df2) = cached_sim(params) assert np.allclose(f1, f2) assert np.allclose(df1, df2)
def setUpModel(self): # set ode solver ti = 0 tf = 20 n_steps = tf self.rk = RKSolver(ti, tf, n_steps) self.rk.output_frequency = 1 self.rk.set_output_storing_flag(True) eqn = Seir() eqn.tau = 5 self.rk.equation = eqn n_pop = 7E6 u0 = np.array([n_pop - 1, 0, 1, 0]) u0 /= n_pop du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters())) self.rk.set_initial_condition(u0, du0_dp) # set cached_sim object cached_sim = CachedSEIRSimulation2(self.rk) cached_sim.set_gradient_flag(True) # set theano model op object self.op_class = ModelGradOp(cached_sim)
def test_rk4_solver(self): theta = -1.2 eqn = SimpleEquation(theta) u_exact = lambda t: eqn.solution(t)[0] ti = 0 tf = 5*math.pi/3 u0 = u_exact(ti) rk = RKSolver(ti, tf) rk.equation = eqn rk.set_initial_condition(u0) n = 4 err = np.zeros((n,)) dt = np.zeros((n,)) n_steps = 100 for i in range(n): n_steps *= (i+1) dt[i] = 1./n_steps rk.n_steps = n_steps rk.solve() err[i] = abs(rk.state() - u_exact(tf)) conv_rate = np.polyfit(np.log(dt), np.log(err), 1)[0] self.assertAlmostEqual(conv_rate, 4.0, 1)
def test_correct_caching3(self): # setup equation eqn = Seir(population=1) # setup ode solver ti = 0. tf = 2. n_steps = 3 rk = RKSolver(ti, tf, n_steps) rk.output_frequency = 1 rk.set_output_storing_flag(True) rk.equation = eqn u0 = np.array([100., 0., 10., 0.]) rk.set_initial_condition(u0) # setup cached simulation object cached_sim = CachedSEIRSimulation(rk) cached_sim.set_gradient_flag(False) params = np.array([2.3, 0.2, 1. / 3., 1. / 4.]) f = cached_sim(params)[0] f1 = np.copy(f) params2 = np.array([2.32, 0.2, 1. / 3., 1. / 4.]) f2 = cached_sim(params2)[0] assert not np.allclose(f1, f2)
class TestModelGradOp(utt.InferShapeTester): rng = np.random.RandomState(43) def setUp(self): super(TestModelGradOp, self).setUp() self.setUpModel() def setUpModel(self): # set ode solver ti = 0 tf = 20 n_steps = tf self.rk = RKSolver(ti, tf, n_steps) self.rk.output_frequency = 1 self.rk.set_output_storing_flag(True) eqn = Seir() eqn.tau = 5 self.rk.equation = eqn n_pop = 7E6 u0 = np.array([n_pop - 1, 0, 1, 0]) u0 /= n_pop du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters())) self.rk.set_initial_condition(u0, du0_dp) # set cached_sim object cached_sim = CachedSEIRSimulation2(self.rk) cached_sim.set_gradient_flag(True) # set theano model op object self.op_class = ModelGradOp(cached_sim) def test_perform(self): b = theano.tensor.dscalar('myvar0') s = theano.tensor.dscalar('myvar1') g = theano.tensor.dscalar('myvar2') k = theano.tensor.dscalar('myvar3') t = theano.tensor.dscalar('myvar4') dL_df = theano.tensor.matrix() f = theano.function([b, s, g, k, t, dL_df], self.op_class((b, s, g, k, t), dL_df)) s_val = 1. / 5.2 g_val = 1. / 2.28 b_val = 2.13 * g_val k_val = 1.1 t_val = 10 dL_df_val = np.random.rand(1, 21) out = f(b_val, s_val, g_val, k_val, t_val, dL_df_val) self.rk.equation.beta = b_val self.rk.equation.sigma = s_val self.rk.equation.gamma = g_val self.rk.equation.kappa = k_val self.rk.equation.tint = t_val self.rk.solve() (_, _, df_dp) = self.rk.get_outputs() out_act = df_dp[0, :, :] @ dL_df_val.T out_act = np.reshape(out_act, (self.rk.equation.n_parameters(), )) assert np.allclose(out_act, out)
class TestModelOp(utt.InferShapeTester): rng = np.random.RandomState(43) def setUp(self): super(TestModelOp, self).setUp() self.setUpModel() def setUpModel(self): # set ode solver ti = 0 tf = 20 n_steps = tf self.rk = RKSolver(ti, tf, n_steps) self.rk.output_frequency = 1 self.rk.set_output_storing_flag(True) eqn = Seir() self.rk.equation = eqn n_pop = 7E6 u0 = np.array([n_pop - 1, 0, 1, 0]) u0 /= n_pop du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters())) self.rk.set_initial_condition(u0, du0_dp) # set cached_sim object cached_sim = CachedSEIRSimulation(self.rk) cached_sim.set_gradient_flag(True) # set theano model op object self.op_class = ModelOp(cached_sim) #@unittest.skip("changed output of SEIR eq") def test_perform(self): b = theano.tensor.dscalar('myvar0') s = theano.tensor.dscalar('myvar1') g = theano.tensor.dscalar('myvar2') f = theano.function([b, s, g], self.op_class((b, s, g))) s_val = 1. / 5.2 g_val = 1. / 2.28 b_val = 2.13 * g_val out = f(b_val, s_val, g_val) self.rk.equation.beta = b_val self.rk.equation.sigma = s_val self.rk.equation.gamma = g_val self.rk.solve() (_, out_act, _) = self.rk.get_outputs() assert np.allclose(out_act, out) def test_grad(self): s_val = 1. / 5.2 g_val = 1. / 2.28 b_val = 2.13 * g_val rng = np.random.RandomState(42) theano.tensor.verify_grad(self.op_class, [(b_val, s_val, g_val)], rng=rng)
def test_interface(self): rk = RKSolver(0, 1) rk.initial_time = 0.5 rk.final_time = 5 rk.n_steps = 10 self.assertEqual(rk.initial_time,0.5) self.assertEqual(rk.final_time,5) self.assertEqual(rk.n_steps,10) eqn = Seir() rk.equation = eqn self.assertEqual(rk.equation, eqn) rk.output_frequency = 20 self.assertEqual(rk.output_frequency, 20)
def test_rk4_gradient_computation2(self): n_pop = 7E6 sigma = 1/5.2 gamma = 1/2.28 beta = 2.13*gamma eqn = Seir(beta, sigma, gamma) ti = 0 tf = 218 n_steps = 2*tf rk = RKSolver(ti, tf, n_steps) rk.equation = eqn u0 = np.array([n_pop - 1, 0, 1, 0]) u0 /= n_pop du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters())) rk.set_initial_condition(u0, du0_dp) rk.set_output_gradient_flag(True) rk.solve() (u, du_dp) = rk.state() rk.set_output_gradient_flag(False) epsi = 0.001 # perturb beta0 eqn.beta = beta + epsi rk.solve() u_p1 = rk.state() eqn.beta = beta - epsi rk.solve() u_m1 = rk.state() diff = np.linalg.norm(du_dp[:,0] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0) np.testing.assert_almost_equal(diff, 0, 5) # reset eqn.beta = beta # perturb sigma eqn.sigma = sigma + epsi rk.solve() u_p1 = rk.state() eqn.sigma = sigma - epsi rk.solve() u_m1 = rk.state() diff = np.linalg.norm(du_dp[:,1] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0) np.testing.assert_almost_equal(diff, 0, 5) # reset eqn.sigma = sigma # perturb gamma eqn.gamma = gamma + epsi rk.solve() u_p1 = rk.state() eqn.gamma = gamma - epsi rk.solve() u_m1 = rk.state() diff = np.linalg.norm(du_dp[:,2] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0) np.testing.assert_almost_equal(diff, 0, 5) # reset eqn.gamma = gamma # perturb kappa kappa = 1 eqn.kappa = kappa + epsi rk.solve() u_p1 = rk.state() eqn.kappa = kappa - epsi rk.solve() u_m1 = rk.state() diff = np.linalg.norm(du_dp[:,3] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0) np.testing.assert_almost_equal(diff, 0, 5) # reset eqn.kappa = kappa
def test_constructor(self): rk = RKSolver(0.5, 3, 2) self.assertEqual(rk.initial_time,0.5) self.assertEqual(rk.final_time,3) self.assertEqual(rk.n_steps,2)
def test_rk4_gradient_computation(self): theta = -1.2 eqn = SimpleEquation(theta) ti = 0 tf = 5*math.pi/3 (u0, du0_dp) = eqn.solution(ti) rk = RKSolver(ti, tf) rk.equation = eqn rk.set_initial_condition(u0, du0_dp) rk.set_output_gradient_flag(True) n = 4 err_soln = np.zeros((n,)) err_grad = np.zeros((n,)) dt = np.zeros((n,)) n_steps = 100 for i in range(n): n_steps *= (i+1) dt[i] = 1./n_steps rk.n_steps = n_steps rk.solve() err_soln[i] = abs(rk.state()[0] - eqn.solution(tf)[0]) err_grad[i] = abs(rk.state()[1] - eqn.solution(tf)[1]) conv_rate = np.polyfit(np.log(dt), np.log(err_soln), 1)[0] self.assertAlmostEqual(conv_rate, 4.0, 1) conv_rate = np.polyfit(np.log(dt), np.log(err_grad), 1)[0] self.assertAlmostEqual(conv_rate, 4.0, 1)