示例#1
0
文件: BI_EKF.py 项目: wme7/CEDA
def BI_EKF(params):
    xb = params['initial_background_state']
    B = params['initial_background_covariance']
    sigma2_Q = params['initial_model_noise_variance']
    sigma2_R = params['initial_observation_noise_variance']
    f = params['model_dynamics']
    jacF = params['model_jacobian']
    h = params['observation_operator']
    jacH = params['observation_jacobian']
    Yo = params['observations']
    Xt = params['true_state']
    Nx = params['state_size']
    No = params['observation_size']
    T = params['temporal_window_size']
    tau = params['adaptive_parameter']

    Xa, Pa, Xf, Pf, H, sigma2_Q_adapt, sigma2_R_adapt = _bayesian_inference_EKF(
        Nx, No, T, xb, B, sigma2_Q, sigma2_R, Yo, f, jacF, h, jacH, tau)
    #Xs, Ps, Ps_lag, Xa, Pa, Xf, Pf, H = _EKS(Nx, No, T, xb, B, Q_adapt[:,:,T], sigma2_R_adapt*np.eye(No,No), Yo, f, jacF, h, jacH, alpha)
    loglik = _likelihood(Xf, Pf, Yo, sigma2_R_adapt[T] * np.eye(No, No), H)
    rmse = RMSE(Xa - Xt)

    res = {
        'filtered_states': Xa,
        #'smoothed_states'                      : Xs,
        'adaptive_model_noise_variance': sigma2_Q_adapt,
        'adaptive_observation_noise_variance': sigma2_R_adapt,
        'loglikelihood': loglik,
        'RMSE': rmse,
        'params': params
    }
    return res
示例#2
0
def CI_EKF(params):
    xb = params['initial_background_state']
    B = params['initial_background_covariance']
    lmbda = params['initial_multiplicative_inflation']
    sigma2_R = params['initial_observation_noise_variance']
    f = params['model_dynamics']
    jacF = params['model_jacobian']
    h = params['observation_operator']
    jacH = params['observation_jacobian']
    Yo = params['observations']
    Xt = params['true_state']
    Nx = params['state_size']
    No = params['observation_size']
    T = params['temporal_window_size']
    tau = params['adaptive_parameter']

    Xa, Pa, Xf, Pf, H, lambda_adapt, sigma2_R_adapt, F_all, K_all = _adaptive_covariance_inflation_EKF(
        Nx, No, T, xb, B, lmbda, sigma2_R, Yo, f, jacF, h, jacH, tau)

    loglik = _likelihood(Xf, Pf, Yo, sigma2_R_adapt[T] * np.eye(No, No), H)
    rmse = RMSE(Xa - Xt)

    res = {
        'filtered_states': Xa,
        'adaptive_multiplicative_inflation': lambda_adapt,
        'adaptive_observation_noise_variance': sigma2_R_adapt,
        'loglikelihood': loglik,
        'RMSE': rmse,
        'params': params
    }
    return res
示例#3
0
文件: LI_EKS.py 项目: wme7/CEDA
def LI_EKF(params):
    xb = params['initial_background_state']
    B = params['initial_background_covariance']
    Q = params['initial_model_noise_covariance']
    R = params['initial_observation_noise_covariance']
    f = params['model_dynamics']
    jacF = params['model_jacobian']
    h = params['observation_operator']
    jacH = params['observation_jacobian']
    Yo = params['observations']
    Xt = params['true_state']
    Nx = params['state_size']
    No = params['observation_size']
    T = params['temporal_window_size']
    alpha = params['inflation_factor']
    tau = params['adaptive_parameter']
    structQ = params['model_noise_covariance_structure']
    if structQ == 'const':
        baseQ = params['model_noise_covariance_matrix_template']
    else:
        baseQ = None

    Xa, Pa, Xf, Pf, H, Q_adapt, R_adapt = _adaptive_EKF(
        Nx, No, T, xb, B, Q, R, Yo, f, jacF, h, jacH, alpha, tau)
    #Xs, Ps, Ps_lag, Xa, Pa, Xf, Pf, H = _EKS(Nx, No, T, xb, B, np.nanmedian(Q_adapt,2), np.nanmedian(R_adapt,2), Yo, f, jacF, h, jacH, alpha) # Q_adapt[:,:,T] and R_adapt[:,:,T]

    loglik = _likelihood(Xf, Pf, Yo, R_adapt[:, :, T],
                         H)  # np.nanmedian(R_adapt,2)
    rmse = RMSE(Xa - Xt)

    res = {
        'filtered_states': Xa,
        'LI_model_noise_covariance': Q_adapt,
        'LI_observation_noise_covariance': R_adapt,
        'loglikelihood': loglik,
        'RMSE': rmse,
        'params': params
    }
    return res
示例#4
0
文件: BI_EKF.py 项目: wme7/CEDA
def _bayesian_inference_EKF(Nx, No, T, xb, B, sigma2_Q_init, sigma2_R_init, Yo,
                            f, jacF, h, jacH, tau):
    Xa = np.zeros((Nx, T + 1))
    Xf = np.zeros((Nx, T))
    Pa = np.zeros((Nx, Nx, T + 1))
    Pf = np.zeros((Nx, Nx, T))
    F_all = np.zeros((Nx, Nx, T))
    H_all = np.zeros((No, Nx, T))

    K_all = np.zeros((Nx, No, T))
    d_all = np.zeros((No, T))
    sigma2_Q_adapt = np.zeros((T + 1))
    sigma2_R_adapt = np.zeros((T + 1))

    x = xb
    Xa[:, 0] = x
    P = B
    Pa[:, :, 0] = P
    sigma2_Q_adapt[0] = sigma2_Q_init
    sigma2_R_adapt[0] = sigma2_R_init
    sigma2_Q = sigma2_Q_init
    sigma2_R = sigma2_R_init

    for t in range(T):

        # Linearization
        F = jacF(x)
        H = jacH(x)
        F_all[:, :, t] = F
        H_all[:, :, t] = H

        # Forecast
        x = f(x)
        P = F.dot(P).dot(
            F.T) + sigma2_Q * np.eye(Nx, Nx)  # STEP 1 of Stroud et al. 2017
        P = .5 * (P + P.T)
        Pf[:, :, t] = P
        Xf[:, t] = x

        # Update
        if not np.isnan(Yo[0, t]):
            d = Yo[:, t] - h(x)
            S = H.dot(P).dot(H.T) + sigma2_R * np.eye(No, No)

            lik = _likelihood(Xf[:, t], Pf[:, :, t], Yo[:, t],
                              sigma2_R * np.eye(No, No),
                              H)  # STEP 2 of Stroud et al. 2017

            mean_theta, cov_theta  # STEP 3 of Stroud et al. 2017

            #random.multivariate_normal(mean_theta,cov_theta,NB_PARTICULES) # STEP 4 of Stroud et al. 2017

            K = P.dot(H.T).dot(inv(S))
            P = (np.eye(Nx) - K.dot(H)).dot(P)
            x = x + K.dot(d)
            K_all[:, :, t] = K
            d_all[:, t] = d
            Pa[:, :, t + 1] = P
            Xa[:, t + 1] = x
            # T+1 ou t ???

            d_of = Yo[:, t] - h(f(Xa[:, t]))
            d_oa = Yo[:, t] - h(Xa[:, t + 1])

            sigma2_R_tmp = 1
            sigma2_R_adapt[t +
                           1] = sigma2_R_adapt[t] + (sigma2_R_tmp -
                                                     sigma2_R_adapt[t]) / tau

            sigma2_Q_tmp = 1
            sigma2_Q_adapt[t +
                           1] = sigma2_Q_adapt[t] + (sigma2_Q_tmp -
                                                     sigma2_Q_adapt[t]) / tau

        else:
            K_all[:, :, t] = K
            d_all[:, t] = d
            Pa[:, :, t + 1] = P
            Xa[:, t + 1] = x
            # T+1 ou t ???
            sigma2_Q_adapt[t + 1] = sigma2_Q_adapt[t]
            sigma2_R_adapt[t + 1] = sigma2_R_adapt[t]
        sigma2_Q = sigma2_Q_adapt[t + 1]
        sigma2_R = sigma2_R_adapt[t + 1]

    return Xa, Pa, Xf, Pf, H_all, sigma2_Q_adapt, sigma2_R_adapt
示例#5
0
def CI_EKS(params):
    xb = params['initial_background_state']
    B = params['initial_background_covariance']
    lmbda = params['initial_multiplicative_inflation']
    sigma2_R = params['initial_observation_noise_variance']
    f = params['model_dynamics']
    jacF = params['model_jacobian']
    h = params['observation_operator']
    jacH = params['observation_jacobian']
    Yo = params['observations']
    nIter = params['nb_iterations']
    Xt = params['true_state']
    Nx = params['state_size']
    No = params['observation_size']
    T = params['temporal_window_size']
    tau = params['adaptive_parameter']

    loglik = np.zeros(nIter)
    rmse = np.zeros(nIter)

    lmbda_all = np.zeros(np.r_[nIter + 1])
    sigma2_R_all = np.zeros(np.r_[nIter + 1])

    Xs_all = np.zeros([Nx, T + 1, nIter])

    lmbda_all[0] = lmbda
    sigma2_R_all[0] = sigma2_R

    for k in tqdm(range(nIter)):

        # adaptive-covariance-inflation-EKS
        Xs, Ps, Ps_lag, Xa, Pa, Xf, Pf, H = _adaptive_covariance_inflation_EKS(
            Nx, No, T, xb, B, lmbda, sigma2_R, Yo, f, jacF, h, jacH, tau)
        loglik[k] = _likelihood(Xf, Pf, Yo, sigma2_R * np.eye(No, No), H)
        rmse[k] = RMSE(Xs - Xt)

        # adaptive background state
        xb = Xs[:, 0]
        B = Ps[:, :, 0]

        # adaptive-covariance-inflation-EKF
        Xa, Pa, Xf, Pf, H, lmbda_adapt, sigma2_R_adapt, F_all, K_all = _adaptive_covariance_inflation_EKF(
            Nx, No, T, xb, B, lmbda, sigma2_R, Yo, f, jacF, h, jacH, tau)
        lmbda = np.nanmedian(lmbda_adapt)  # lmbda = lmbda_adapt[T]
        sigma2_R = np.nanmedian(sigma2_R_adapt)  # sigma2_R = sigma2_R_adapt[T]

        Xs_all[..., k] = Xs
        lmbda_all[k + 1] = lmbda
        sigma2_R_all[k + 1] = sigma2_R

    res = {
        'smoothed_states':
        Xs_all,  # PIERRE: WHY DO WE KEEP ALL THE STATES (KEEP ONLY THE LAST ONE?)
        'adaptive_multiplicative_inflation': lmbda_all,
        'adaptive_observation_noise_variance': sigma2_R_all,
        'loglikelihood': loglik,
        'RMSE': rmse,
        'params': params
    }

    return res
示例#6
0
文件: LI_EKS.py 项目: wme7/CEDA
def LI_EKS(params):
    xb = params['initial_background_state']
    B = params['initial_background_covariance']
    Q = params['initial_model_noise_covariance']
    R = params['initial_observation_noise_covariance']
    f = params['model_dynamics']
    jacF = params['model_jacobian']
    h = params['observation_operator']
    jacH = params['observation_jacobian']
    Yo = params['observations']
    nIter = params['nb_iterations']
    Xt = params['true_state']
    Nx = params['state_size']
    No = params['observation_size']
    T = params['temporal_window_size']
    alpha = params['inflation_factor']
    tau = params['adaptive_parameter']
    structQ = params['model_noise_covariance_structure']
    if structQ == 'const':
        baseQ = params['model_noise_covariance_matrix_template']
    else:
        baseQ = None

    loglik = np.zeros(nIter)
    rmse = np.zeros(nIter)
    cov_prob_li = np.zeros(nIter)

    Q_all = np.zeros(np.r_[Q.shape, nIter + 1])
    R_all = np.zeros(np.r_[R.shape, nIter + 1])

    Xs_all = np.zeros([Nx, T + 1, nIter])

    Q_all[:, :, 0] = Q
    R_all[:, :, 0] = R

    for k in tqdm(range(nIter)):

        Xs, Ps, Ps_lag, Xa, Pa, Xf, Pf, H = _EKS(Nx, No, T, xb, B, Q, R, Yo, f,
                                                 jacF, h, jacH, alpha)
        loglik[k] = _likelihood(Xf, Pf, Yo, R, H)
        rmse[k] = RMSE(Xs - Xt)
        cov_prob_li[k] = cov_prob(Xs, Ps, Xt)

        # adaptive background state
        xb = Xs[:, 0]
        B = Ps[:, :, 0]

        # adaptive-EKF
        Xa, Pa, Xf, Pf, H, Q_adapt, R_adapt = _adaptive_EKF(
            Nx, No, T, xb, B, Q, R, Yo, f, jacF, h, jacH, alpha, tau)
        Q = np.nanmedian(Q_adapt, 2)  # Q = Q_adapt[:,:,T]
        R = np.nanmedian(R_adapt, 2)  # R = R_adapt[:,:,T]

        Xs_all[..., k] = Xs
        Q_all[:, :, k + 1] = Q
        R_all[:, :, k + 1] = R

    res = {
        'smoothed_states':
        Xs_all,  # PIERRE: WHY DO WE KEEP ALL THE STATES (KEEP ONLY THE LAST ONE?)
        'LI_model_noise_covariance': Q_all,
        'LI_observation_noise_covariance': R_all,
        'loglikelihood': loglik,
        'RMSE': rmse,
        'cov_prob': cov_prob_li,
        'params': params
    }
    return res