示例#1
0
    def test_chkrebtii(self):
        # LHS vector of ODE
        w_mat = np.array([[0.0, 0.0, 1.0]])

        # These parameters define the order of the ODE and the CAR(p) process
        n_deriv = [2]
        n_deriv_prior = [4]

        # it is assumed that the solution is sought on the interval [tmin, tmax].
        n_eval = 100
        tmin = 0
        tmax = 10

        # IBM process scale factor
        sigma = [.5]

        # Initial value, x0, for the IVP
        x0 = np.array([-1., 0., 1.])
        x0_state = zero_pad(x0, n_deriv, n_deriv_prior)
        W = zero_pad(w_mat, n_deriv, n_deriv_prior)

        # Get parameters needed to run the solver
        dt = (tmax - tmin) / n_eval
        # All necessary parameters are in kinit, namely, T, c, R, W
        kinit = ibm_init(dt, n_deriv_prior, sigma)
        z_state = rand_mat(n_eval, sum(n_deriv_prior))

        # Get Cython solution
        kode_cy = KalmanODE_cy(W, tmin, tmax, n_eval, chkrebtii_kalman,
                               **kinit)
        kode_cy.z_state = z_state
        # Run the solver to get an approximation
        ksim_cy = kode_cy.solve_sim(x0_state)

        # Get Eigen solution
        kode_ei = KalmanODE_ei(W, tmin, tmax, n_eval, chkrebtii_kalman,
                               **kinit)
        kode_ei.z_state = z_state
        ksim_ei = kode_ei.solve_sim(x0_state)

        # Get Numba solution
        kode_nb = KalmanODE_nb(W, tmin, tmax, n_eval, chkrebtii_kalman_nb,
                               **kinit)
        kode_nb.z_state = z_state
        ksim_nb = kode_nb.solve_sim(x0_state, W, None)

        # Get Python solution
        kalmanode_py = KalmanODE_py(W, tmin, tmax, n_eval, chkrebtii_kalman,
                                    **kinit)  # Initialize the class
        kalmanode_py.z_state = z_state
        ksim_py = kalmanode_py.solve_sim(x0_state, W)

        self.assertLessEqual(rel_err(ksim_cy[:, 0], ksim_py[:, 0]), 0.001)
        self.assertLessEqual(rel_err(ksim_cy[1:, 1], ksim_py[1:, 1]), 0.001)
        self.assertLessEqual(rel_err(ksim_cy[:, 0], ksim_ei[:, 0]), 0.001)
        self.assertLessEqual(rel_err(ksim_cy[1:, 1], ksim_ei[1:, 1]), 0.001)
        self.assertLessEqual(rel_err(ksim_cy[:, 0], ksim_nb[:, 0]), 0.001)
        self.assertLessEqual(rel_err(ksim_cy[1:, 1], ksim_nb[1:, 1]), 0.001)
示例#2
0
def chkrebtii_example():
    r"Produces the graph in Figure 1 of the paper."
    # LHS vector of ODE
    # 2.  Define the IVP

    W = np.array([[0.0, 0.0, 1.0]])  # LHS vector of ODE
    x0 = np.array([-1., 0., 1.])  # initial value for the IVP

    # Time interval on which a solution is sought.
    tmin = 0
    tmax = 10

    # 3.  Define the prior process
    #
    # (Perhaps best to describe this in text, not code comments)
    #
    # We're going to use a solution prior that has one more derivative than as specified in the IVP.
    # To do this, we'll pad the original IVP with zeros, for which we have the convenience function
    # zero_pad().

    n_deriv = [2]  # number of derivatives in IVP
    n_deriv_prior = [4]  # number of derivatives in IBM prior

    # zero padding
    W_pad = zero_pad(W, n_deriv, n_deriv_prior)
    x0_pad = zero_pad(x0, n_deriv, n_deriv_prior)

    # IBM process scale factor
    sigma = [.5]

    # 4.  Instantiate the ODE solver object.

    n_points = 80  # number of steps in which to discretize the time interval.
    dt = (tmax - tmin) / n_points  # step size

    # generate the Kalman parameters corresponding to the prior
    prior = ibm_init(dt, n_deriv_prior, sigma)

    # instantiate the ODE solver
    ode = KalmanODE(W=W_pad,
                    tmin=tmin,
                    tmax=tmax,
                    n_eval=n_points,
                    fun=ode_fun,
                    **prior)

    # 5.  Evaluate the ODE solution

    # deterministic output: posterior mean
    mut, Sigmat = ode.solve_mv(x0=x0_pad)

    # probabilistic output: draw from posterior
    xt = ode.solve_sim(x0=x0_pad)

    # Produces the graph in Figure 1
    draws = 100
    readme_graph(ode_fun, n_deriv, n_deriv_prior, tmin, tmax, W, x0, draws)
    return
示例#3
0
def readme_kalman_draw(fun, n_deriv, n_deriv_prior, n_eval, tmin, tmax, sigma,
                       w_mat, init, draws):
    dt = (tmax - tmin) / n_eval
    X = np.zeros((draws, n_eval + 1, sum(n_deriv_prior)))
    W = zero_pad(w_mat, n_deriv, n_deriv_prior)
    x0_state = zero_pad(init, n_deriv, n_deriv_prior)
    prior = ibm_init(dt, n_deriv_prior, sigma)
    kalmanode = KalmanODE(W, tmin, tmax, n_eval, fun, **prior)
    for i in range(draws):
        X[i] = kalmanode.solve_sim(x0_state, W)
        del kalmanode.z_state
    return X
示例#4
0
    def test_chkrebtii(self):
        # LHS vector of ODE
        w_mat = np.array([[0.0, 0.0, 1.0]])

        # These parameters define the order of the ODE and the CAR(p) process
        n_deriv = [2]
        n_deriv_prior = [4]

        # it is assumed that the solution is sought on the interval [tmin, tmax].
        n_eval = 300
        tmin = 0
        tmax = 10

        # IBM process scale factor
        sigma = [.5]

        # Initial value, x0, for the IVP
        x0 = np.array([-1., 0., 1.])
        x0_state = zero_pad(x0, n_deriv, n_deriv_prior)
        W = zero_pad(w_mat, n_deriv, n_deriv_prior)

        # Get parameters needed to run the solver
        dt = (tmax - tmin) / n_eval
        # All necessary parameters are in kinit, namely, T, c, R, W
        kinit = ibm_init(dt, n_deriv_prior, sigma)

        # Initialize the Kalman class
        kalmanode = KalmanODE(W, tmin, tmax, n_eval, chkrebtii_kalman, **kinit)
        # Run the solver to get an approximation
        kalman_sim = kalmanode.solve_sim(x0_state)

        # Get deterministic solution from odeint
        tseq = np.linspace(tmin, tmax, n_eval + 1)
        detode = integrate.odeint(chkrebtii_odeint, [-1, 0], tseq)
        self.assertLessEqual(rel_err(kalman_sim[:, 0], detode[:, 0]), 10.0)
        self.assertLessEqual(rel_err(kalman_sim[1:, 1], detode[1:, 1]), 10.0)
示例#5
0
文件: mseir.py 项目: mlysy/rodeo
def mseir_example():
    "Perform parameter inference using the MSEIR function."
    # These parameters define the order of the ODE and the CAR(p) process
    n_deriv = [1] * 5  # Total state
    n_deriv_prior = [3] * 5
    state_ind = [0, 3, 6, 9, 12]  # Index of 0th derivative of each state

    # it is assumed that the solution is sought on the interval [tmin, tmax].
    tmin = 0
    tmax = 40

    # The rest of the parameters can be tuned according to ODE
    # For this problem, we will use
    n_var = 5
    sigma = [.1] * n_var

    # Initial value, x0, for the IVP
    theta_true = (1.1, 0.7, 0.4, 0.005, 0.02, 0.03)  # True theta
    x0 = np.array([1000, 100, 50, 3, 3])
    v0 = mseir(x0, 0, theta_true)
    X0 = np.ravel([x0, v0], 'F')

    # W matrix: dimension is n_eq x sum(n_deriv)
    W_mat = np.zeros((len(n_deriv), sum(n_deriv) + len(n_deriv)))
    for i in range(len(n_deriv)):
        W_mat[i, sum(n_deriv[:i]) + i + 1] = 1
    W = zero_pad(W_mat, n_deriv, n_deriv_prior)

    # logprior parameters
    n_theta = len(theta_true)
    phi_sd = np.ones(n_theta)

    # Observation noise
    gamma = 0.2

    # Number of samples to draw from posterior
    n_samples = 100000

    # Initialize inference class and simulate observed data
    inf = inference(state_ind, tmin, tmax, mseir)
    Y_t = inf.simulate(mseir, x0, theta_true, gamma)

    # Parameter inference using Euler's approximation
    hlst = np.array([0.1, 0.05, 0.02, 0.01, 0.005])
    theta_euler = np.zeros((len(hlst), n_samples, n_theta))
    for i in range(len(hlst)):
        phi_hat, phi_var = inf.phi_fit(Y_t, x0, hlst[i], theta_true, phi_sd,
                                       gamma, False)
        theta_euler[i] = inf.theta_sample(phi_hat, phi_var, n_samples)

    # Parameter inference using Kalman solver
    theta_kalman = np.zeros((len(hlst), n_samples, n_theta))
    for i in range(len(hlst)):
        ode_init = ibm_init(hlst[i], n_deriv_prior, sigma)
        x0_state = zero_pad(X0, n_deriv, n_deriv_prior)
        kinit = indep_init(ode_init, n_deriv_prior)
        n_eval = int((tmax - tmin) / hlst[i])
        kode = KalmanODE(W, tmin, tmax, n_eval, mseir, **kinit)
        inf.kode = kode
        inf.W = W
        phi_hat, phi_var = inf.phi_fit(Y_t, x0_state, hlst[i], theta_true,
                                       phi_sd, gamma, True)
        theta_kalman[i] = inf.theta_sample(phi_hat, phi_var, n_samples)

    # Produces the graph in Figure 4
    inf.theta_plot(theta_euler, theta_kalman, theta_true, hlst)
    return
示例#6
0
# The rest of the parameters can be tuned according to ODE
# For this problem, we will use
n_var = 2
sigma = [.1] * n_var

# Initial value, x0, for the IVP
x0 = np.array([-1., 1.])
X0 = np.array([-1, 1, 1, 1 / 3])
w_mat = np.array([[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]])
W = zero_pad(w_mat, n_deriv, n_deriv_prior)
x0_state = zero_pad(X0, n_deriv, n_deriv_prior)
theta = np.array([0.2, 0.2, 3])

# Get parameters needed to run the solver
dt = (tmax - tmin) / n_eval
ode_init = ibm_init(dt, n_deriv_prior, sigma)
kinit = indep_init(ode_init, n_deriv_prior)
z_state = rand_mat(n_eval, p)

# pick ode function with ndarray or ctuple inputs
ode_fun = ode_fun_ct if use_ctuple else ode_fun_nd
if use_ctuple:
    theta = tuple(theta)

# Timings
n_loops = 100
# C++
kode_c = KalmanODE_c(W, tmin, tmax, n_eval, ode_fun, **kinit)
kode_c.z_state = z_state
time_c = timing(kode_c, x0_state, W, theta, n_loops)
示例#7
0
文件: fitz.py 项目: mlysy/rodeo
def fitz_example():
    "Perform parameter inference using the FitzHugh-Nagumo function."
    # These parameters define the order of the ODE and the CAR(p) process
    n_deriv = [1, 1]  # Total state
    n_deriv_prior = [3, 3]
    state_ind = [0, 3]  # Index of 0th derivative of each state

    # it is assumed that the solution is sought on the interval [tmin, tmax].
    tmin = 0
    tmax = 40

    # The rest of the parameters can be tuned according to ODE
    # For this problem, we will use
    n_var = 2
    sigma = [.1] * n_var

    # Initial value, x0, for the IVP
    x0 = np.array([-1., 1.])
    v0 = np.array([1, 1 / 3])
    X0 = np.ravel([x0, v0], 'F')

    # pad the inputs
    w_mat = np.array([[0., 1., 0., 0.], [0., 0., 0., 1.]])
    W = zero_pad(w_mat, n_deriv, n_deriv_prior)
    x0_state = zero_pad(X0, n_deriv, n_deriv_prior)

    # logprior parameters
    theta_true = np.array([0.2, 0.2, 3])  # True theta
    n_theta = len(theta_true)
    phi_sd = np.ones(n_theta)

    # Observation noise
    gamma = 0.2

    # Number of samples to draw from posterior
    n_samples = 100000

    # Initialize inference class and simulate observed data
    inf = inference(state_ind, tmin, tmax, fitz)
    Y_t = inf.simulate(fitz, x0, theta_true, gamma)

    # Parameter inference using Euler's approximation
    hlst = np.array([0.1, 0.05, 0.02, 0.01, 0.005])
    theta_euler = np.zeros((len(hlst), n_samples, n_theta))
    for i in range(len(hlst)):
        phi_hat, phi_var = inf.phi_fit(Y_t, x0, hlst[i], theta_true, phi_sd,
                                       gamma, False)
        theta_euler[i] = inf.theta_sample(phi_hat, phi_var, n_samples)

    # Parameter inference using Kalman solver
    theta_kalman = np.zeros((len(hlst), n_samples, n_theta))
    for i in range(len(hlst)):
        ode_init = ibm_init(hlst[i], n_deriv_prior, sigma)
        kinit = indep_init(ode_init, n_deriv_prior)
        n_eval = int((tmax - tmin) / hlst[i])
        kode = KalmanODE(W, tmin, tmax, n_eval, fitz, **kinit)
        inf.kode = kode
        inf.W = W
        phi_hat, phi_var = inf.phi_fit(Y_t, x0_state, hlst[i], theta_true,
                                       phi_sd, gamma, True)
        theta_kalman[i] = inf.theta_sample(phi_hat, phi_var, n_samples)

    # Produces the graph in Figure 3
    inf.theta_plot(theta_euler, theta_kalman, theta_true, hlst)
    return