Пример #1
0
    def test_correct_caching3(self):
        # setup equation
        eqn = Seir(population=1)

        # setup ode solver
        ti = 0.
        tf = 2.
        n_steps = 3
        rk = RKSolver(ti, tf, n_steps)
        rk.output_frequency = 1
        rk.set_output_storing_flag(True)
        rk.equation = eqn
        u0 = np.array([100., 0., 10., 0.])
        rk.set_initial_condition(u0)

        # setup cached simulation object
        cached_sim = CachedSEIRSimulation(rk)
        cached_sim.set_gradient_flag(False)

        params = np.array([2.3, 0.2, 1. / 3., 1. / 4.])

        f = cached_sim(params)[0]

        f1 = np.copy(f)

        params2 = np.array([2.32, 0.2, 1. / 3., 1. / 4.])

        f2 = cached_sim(params2)[0]

        assert not np.allclose(f1, f2)
Пример #2
0
    def test_correct_caching2(self):
        # setup equation
        eqn = Seir(population=1)

        # setup ode solver
        ti = 0.
        tf = 2.
        n_steps = 3
        rk = RKSolver(ti, tf, n_steps)
        rk.output_frequency = 1
        rk.set_output_storing_flag(True)
        rk.equation = eqn
        u0 = np.array([100., 0., 10., 0.])
        du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters()))
        rk.set_initial_condition(u0, du0_dp)
        rk.set_output_gradient_flag(True)

        # setup cached simulation object
        cached_sim = CachedSEIRSimulation(rk)

        params = np.array([2.3, 0.2, 1. / 3., 1. / 4.])

        (f, df) = cached_sim(params)

        f1 = np.copy(f)
        df1 = np.copy(df)

        f *= 0.
        df += 0.

        (f2, df2) = cached_sim(params)

        assert np.allclose(f1, f2)
        assert np.allclose(df1, df2)
Пример #3
0
    def test_rk4_gradient_computation(self):
        theta = -1.2
        eqn = SimpleEquation(theta)

        ti = 0
        tf = 5*math.pi/3
        (u0, du0_dp) = eqn.solution(ti)
        rk = RKSolver(ti, tf)
        rk.equation = eqn
        rk.set_initial_condition(u0, du0_dp)
        rk.set_output_gradient_flag(True)
        n = 4
        err_soln = np.zeros((n,))
        err_grad = np.zeros((n,))
        dt = np.zeros((n,))
        n_steps = 100
        for i in range(n):
            n_steps *= (i+1)
            dt[i] = 1./n_steps
            rk.n_steps = n_steps
            rk.solve()
            err_soln[i] = abs(rk.state()[0] -  eqn.solution(tf)[0])
            err_grad[i] = abs(rk.state()[1] -  eqn.solution(tf)[1])

        conv_rate = np.polyfit(np.log(dt), np.log(err_soln), 1)[0]
        self.assertAlmostEqual(conv_rate, 4.0, 1)

        conv_rate = np.polyfit(np.log(dt), np.log(err_grad), 1)[0]
        self.assertAlmostEqual(conv_rate, 4.0, 1)
Пример #4
0
    def test_rk4_solver(self):
        theta = -1.2
        eqn = SimpleEquation(theta)
        u_exact = lambda t: eqn.solution(t)[0]

        ti = 0
        tf = 5*math.pi/3
        u0 = u_exact(ti)
        rk = RKSolver(ti, tf)
        rk.equation = eqn
        rk.set_initial_condition(u0)

        n = 4
        err = np.zeros((n,))
        dt = np.zeros((n,))
        n_steps = 100
        for i in range(n):
            n_steps *= (i+1)
            dt[i] = 1./n_steps
            rk.n_steps = n_steps
            rk.solve()
            err[i] = abs(rk.state() -  u_exact(tf))

        conv_rate = np.polyfit(np.log(dt), np.log(err), 1)[0]
        self.assertAlmostEqual(conv_rate, 4.0, 1)
Пример #5
0
    def test_interface(self):
        rk = RKSolver(0, 1)
        rk.initial_time = 0.5
        rk.final_time = 5
        rk.n_steps = 10
        self.assertEqual(rk.initial_time,0.5)
        self.assertEqual(rk.final_time,5)
        self.assertEqual(rk.n_steps,10)

        eqn = Seir()
        rk.equation = eqn
        self.assertEqual(rk.equation, eqn)

        rk.output_frequency = 20
        self.assertEqual(rk.output_frequency, 20)
Пример #6
0
    def test_rk4_gradient_computation2(self):
        n_pop = 7E6
        sigma = 1/5.2
        gamma = 1/2.28
        beta = 2.13*gamma
        eqn = Seir(beta, sigma, gamma)

        ti = 0
        tf = 218
        n_steps = 2*tf
        rk = RKSolver(ti, tf, n_steps)
        rk.equation = eqn

        u0 = np.array([n_pop - 1, 0, 1, 0])
        u0 /= n_pop
        du0_dp = np.zeros((eqn.n_components(), eqn.n_parameters()))
        rk.set_initial_condition(u0, du0_dp)
        rk.set_output_gradient_flag(True)

        rk.solve()
        (u, du_dp) = rk.state()

        rk.set_output_gradient_flag(False)

        epsi = 0.001

        # perturb beta0
        eqn.beta = beta + epsi
        rk.solve()
        u_p1 = rk.state()

        eqn.beta = beta - epsi
        rk.solve()
        u_m1 = rk.state()
        diff = np.linalg.norm(du_dp[:,0] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0)
        np.testing.assert_almost_equal(diff, 0, 5)
        # reset
        eqn.beta = beta

        # perturb sigma
        eqn.sigma = sigma + epsi
        rk.solve()
        u_p1 = rk.state()

        eqn.sigma = sigma - epsi
        rk.solve()
        u_m1 = rk.state()
        diff = np.linalg.norm(du_dp[:,1] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0)
        np.testing.assert_almost_equal(diff, 0, 5)
        # reset
        eqn.sigma = sigma

        # perturb gamma
        eqn.gamma = gamma + epsi
        rk.solve()
        u_p1 = rk.state()

        eqn.gamma = gamma - epsi
        rk.solve()
        u_m1 = rk.state()
        diff = np.linalg.norm(du_dp[:,2] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0)
        np.testing.assert_almost_equal(diff, 0, 5)
        # reset
        eqn.gamma = gamma

        # perturb kappa
        kappa = 1
        eqn.kappa = kappa + epsi
        rk.solve()
        u_p1 = rk.state()

        eqn.kappa = kappa - epsi
        rk.solve()
        u_m1 = rk.state()
        diff = np.linalg.norm(du_dp[:,3] - (u_p1 - u_m1)/(2*epsi))/np.linalg.norm(u0)
        np.testing.assert_almost_equal(diff, 0, 5)
        # reset
        eqn.kappa = kappa