示例#1
0
def test_kalman_fit():
    # check against MATLAB dataset
    kf = KalmanFilter(
        data.transition_matrix,
        data.observation_matrix,
        data.initial_transition_covariance,
        data.initial_observation_covariance,
        data.transition_offsets,
        data.observation_offset,
        data.initial_state_mean,
        data.initial_state_covariance,
        em_vars=['transition_covariance', 'observation_covariance'])

    loglikelihoods = np.zeros(5)
    for i in range(len(loglikelihoods)):
        loglikelihoods[i] = kf.loglikelihood(data.observations)
        kf.em(X=data.observations, n_iter=1)

    assert_true(np.allclose(loglikelihoods, data.loglikelihoods[:5]))

    # check that EM for all parameters is working
    kf.em_vars = 'all'
    n_timesteps = 30
    for i in range(len(loglikelihoods)):
        kf.em(X=data.observations[0:n_timesteps], n_iter=1)
        loglikelihoods[i] = kf.loglikelihood(data.observations[0:n_timesteps])
    for i in range(len(loglikelihoods) - 1):
        assert_true(loglikelihoods[i] < loglikelihoods[i + 1])
示例#2
0
def test_kalman_pickle():
    kf = KalmanFilter(
        data.transition_matrix,
        data.observation_matrix,
        data.transition_covariance,
        data.observation_covariance,
        data.transition_offsets,
        data.observation_offset,
        data.initial_state_mean,
        data.initial_state_covariance,
        em_vars='all')

    # train and get log likelihood
    X = data.observations[0:10]
    kf = kf.em(X, n_iter=5)
    loglikelihood = kf.loglikelihood(X)

    # pickle Kalman Filter
    store = StringIO()
    pickle.dump(kf, store)
    clf = pickle.load(StringIO(store.getvalue()))

    # check that parameters came out already
    np.testing.assert_almost_equal(loglikelihood, kf.loglikelihood(X))

    # store it as BytesIO as well
    store = BytesIO()
    pickle.dump(kf, store)
    kf = pickle.load(BytesIO(store.getvalue()))
示例#3
0
def negative_log_likelihood_kf(params, data):
    """
    :param params: phi, theta
    :param data:  measurements
    """
    phi, theta, sigma, sigma_eps = params

    if phi < -1 or theta < -1 or phi > 1 or theta > 1:
        return np.inf

    F_cur = np.array([[1, 0, 0],
                      [0, phi, theta],
                      [0, 0, 0]
                      ])

    Q_cur = np.array([[sigma * sigma * dt, 0, 0],
                      [0, sigma_eps ** 2, sigma_eps ** 2],
                      [0, sigma_eps ** 2, sigma_eps ** 2]
                      ])

    kf = KalmanFilter(transition_matrices=F_cur,
                      observation_matrices=H,
                      initial_state_mean=x_0,
                      initial_state_covariance=Q_cur * 3,
                      transition_covariance=Q_cur,
                      transition_offsets=np.zeros(3),
                      observation_offsets=np.zeros(1),
                      observation_covariance=R,
                      em_vars=None
                      )
    return -kf.loglikelihood(data)
示例#4
0
def kalman_fit():

    smooth_x_dot = group2['N1']['processed'][
        'deltaFOverF_bc_detr_derivs'][:][:, 0:1500]

    smooth_x = np.ones(
        (smooth_x_dot.shape[0], smooth_x_dot.shape[1]
         ))  #observations corresponding to times [0...n_timesteps-1]

    for row in range(len(smooth_x_dot)):

        smooth_row = np.cumsum(smooth_x_dot[row][0:(smooth_x_dot.shape[1])])
        smooth_row = smooth_row + smooth_x_dot[row][0]  #add x[0]
        smooth_x[row] = smooth_row

    smooth_x = smooth_x.T

    kf = KalmanFilter(n_dim_state=10, n_dim_obs=smooth_x_dot.shape[0])

    for i in range(30):

        kf = kf.em(X=smooth_x, n_iter=1, em_vars='all')

    train_likelihood = kf.loglikelihood(smooth_x)

    #testing data

    test_smooth_x_dot = group2['N1']['processed'][
        'deltaFOverF_bc_detr_derivs'][:][:, 1500:3000]

    test_smooth_x = np.ones((smooth_x_dot.shape[0], smooth_x_dot.shape[1]))

    for row in range(len(test_smooth_x_dot)):

        smooth_row = np.cumsum(
            test_smooth_x_dot[row][0:(test_smooth_x_dot.shape[1])])
        smooth_row = smooth_row + test_smooth_x_dot[row][0]  #add x[0]
        test_smooth_x[row] = smooth_row

    test_smooth_x = test_smooth_x.T

    test_likelihood = kf.loglikelihood(test_smooth_x)
示例#5
0
    def _log_likelihood(self, theta):
        Gamma0, Gamma1, Psi, Pi, C_in, obs_matrix, obs_offset = self._eval_matrix(theta, to_array=True)

        for mat in [Gamma0, Gamma1, Psi, Pi, C_in]:
            if isnan(mat).any() or isinf(mat).any():
                return -inf

        G1, C_out, impact, fmat, fwt, ywt, gev, eu, loose = gensys(Gamma0, Gamma1, C_in, Psi, Pi)

        if eu[0] == 1 and eu[1] == 1:
            # TODO add observation covariance to allow for measurment errors
            kf = KalmanFilter(G1, obs_matrix, impact @ impact.T, None, C_out.reshape(self.n_state),
                              obs_offset.reshape(self.n_obs))
            L = kf.loglikelihood(self.data)
        else:
            L = - inf

        return L
示例#6
0
def kalman(x, n_dim_state):
    #parameter: time series,

    kf = KalmanFilter(n_dim_state=n_dim_state, n_dim_obs=x.shape[0])

    x = x.T

    #loglikelihoods = range(1, 51)
    #estimates = np.ones(50)

    #for i in range(len(loglikelihoods)):
    #
    #    kf = kf.em(X=x, n_iter= 1, em_vars = 'all')
    #    estimates[i] = kf.loglikelihood(x)
    #    print(i)

    converged = False

    tol = 1e-8

    LL = []

    i = 0
    while converged == False:

        kf = kf.em(X=x, n_iter=1, em_vars='all')
        LL.append(kf.loglikelihood(x))

        LLold = LL[i]

        if i <= 2:

            LLbase = LL[i]

        elif ((LL[i] - LLbase) < (1 + tol) * (LLold - LLbase)):

            converged = True

        i = i + 1

    return i - 1
示例#7
0
def kalman(smooth_x_dot):

    #smooth_x_dot = group2['N1']['processed']['deltaFOverF_bc_detr_derivs'][:][:,0:1000]
    smooth_x_dot = group2['N3']['processed']['deltaFOverF_bc_detr_derivs'][:]

    smooth_x = np.ones(
        (smooth_x_dot.shape[0], smooth_x_dot.shape[1]
         ))  #observations corresponding to times [0...n_timesteps-1]

    for row in range(len(smooth_x_dot)):

        smooth_row = np.cumsum(smooth_x_dot[row][0:(smooth_x_dot.shape[1])])
        smooth_row = smooth_row + smooth_x_dot[row][0]  #add x[0]
        smooth_x[row] = smooth_row

    smooth_x = smooth_x.T

    kf = KalmanFilter(n_dim_state=10, n_dim_obs=smooth_x_dot.shape[0])

    #loglikelihoods = [10, 50, 100, 500, 1000]
    #loglikelihoods = [5, 10, 15, 20]
    loglikelihoods = range(1, 51)
    estimates = np.ones(50)

    for i in range(len(loglikelihoods)):

        kf = kf.em(X=smooth_x, n_iter=1, em_vars='all')
        estimates[i] = kf.loglikelihood(smooth_x)

    estimates3 = estimates

    plt.plot(estimates)
    plt.title('Log Likelihoods Worm 3')
    plt.xlabel('# of EM Steps')

    percent_difference = np.diff(estimates)

    for i in range(len(percent_difference)):
        percent_difference[i] = percent_difference[i] / estimates[i]

    #############
    #blind state estimates
    n_dim_state = kf.transition_matrices.shape[0]
    n_timesteps = smooth_x.shape[0]
    blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
    for t in range(n_timesteps - 1):
        if t == 0:
            blind_state_estimates[t] = kf.initial_state_mean
        blind_state_estimates[t + 1] = (
            np.dot(kf.transition_matrices, blind_state_estimates[t]) +
            kf.transition_offsets[t])

    ########

    filtered_state_estimates = kf.filter(smooth_x)[
        0]  #[n_timesteps, n_dim_obs]

    plt.plot(smooth_x[0])

    plt.plot(filtered_state_estimates[0])

    plt.plot(blind_state_estimates)

    print(kf.transition_matrices)

    print(kf.observation_matrices)

    #print(kf.transition_covariance)

    #print(kf.observation_covariance)

    #print(kf.transition_offsets)

    #print(kf.observation_offsets)

    #print(kf.initial_state_mean)

    #print(kf.initial_state_covariance)

    return kf.transition_matrices
            transition_offset=transition_offset,
            observation_offset=observation_offset)

        mean_bpf, covs_bpf, ess_bpf, n_unique_bpf, w_vars_bpf, liks_bpf, joint_liks_bpf, times_bpf = bpf.filter(
            observations)
        mean_apf, covs_apf, ess_apf, n_unique_apf, w_vars_apf, liks_apf, joint_liks_apf, times_apf = apf.filter(
            observations)
        mean_iapf, covs_iapf, ess_iapf, n_unique_iapf, w_vars_iapf, liks_iapf, joint_liks_iapf, times_iapf = iapf.filter(
            observations)
        mean_oapf, covs_npf, ess_npf, n_unique_npf, w_vars_npf, liks_oapf, joint_liks_oapf, times_iapf = oapf.filter(
            observations)
        mean_faapf, covs_faapf, ess_faapf, n_unique_faapf, w_vars_faapf, liks_faapf, joint_liks_faapf, times_faapf = faapf.filter(
            observations)

        # true results given by KF
        true_logliks, true_loglik = kf.loglikelihood(observations)
        joint_true_logliks = np.cumsum(true_logliks)

        mean_kf, covs_kf = kf.filter(observations)

        approximate_mean_kf = []
        for mean, cov in zip(mean_kf, covs_kf):
            samples = random_state.multivariate_normal(mean=mean,
                                                       cov=cov,
                                                       size=n_particle)
            # approximate KF mean
            approximate_mean_kf.append(np.average(samples, axis=0))
        approximate_mean_kf = np.asarray(approximate_mean_kf)

        #MEANS
        mean_deviations_bpf.append(np.average(mse(mean_bpf, mean_kf)))
示例#9
0
kf = KalmanFilter(
    transition_matrices=np.array([[1, 1], [0, 1]]),
    transition_covariance=([[0, 0], [0, 0]]),
    initial_state_mean=[500, 0]
    #,initial_state_covariance = [0, 0]
    ,
    em_vars=[
        'transition_matrices', 'transition_covariance', 'initial_state_mean'
        #,'initial_state_covariance'
    ])

loglikelihoods = np.zeros(10)
for i in range(len(loglikelihoods)):
    kf = kf.em(X=observations, n_iter=1)
    loglikelihoods[i] = kf.loglikelihood(observations)
    # X : [n_timesteps, n_dim_obs] array
'''
# Estimate the state without using any observations. 
#This will let us see how
# good we could do if we ran blind.
n_dim_state = data.transition_matrix.shape[0]
n_timesteps = data.observations.shape[0]
blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
for t in range(n_timesteps - 1):
    if t == 0:
        blind_state_estimates[t] = kf.initial_state_mean
    blind_state_estimates[t + 1] = (
      np.dot(kf.transition_matrices, blind_state_estimates[t])
      + kf.transition_offsets[t]
    )
示例#10
0
def runFilter(observations, params, dname, filterType):
    """
    Run Kalman Filter (UKF/KF) and return smoothed means/covariances
    observations : nsample x T
    params       : {'dname'... contains all the necessary parameters for KF/UKF}
    filterType   : 'KF' or 'UKF'
    """
    s1 = set(params[dname].keys())
    s2 = set(['trans_fxn','obs_fxn','trans_cov','obs_cov','init_mu','init_cov',
              'trans_mult','obs_mult','trans_drift','obs_drift','baseline'])
    for k in s2:
        assert k in s1,k+' not found in params'
    #assert s1.issubset(s2) and s1.issuperset(s2),'Missing in params: '+', '.join(list(s2.difference(s1)))
    assert filterType=='KF' or filterType=='UKF','Expecting KF/UKF'
    model,mean,var = None,None,None
    X = observations.squeeze()
    #assert len(X.shape)==2,'observations must be nsamples x T'
    if filterType=='KF':
        def setupArr(arr):
            if type(arr) is np.ndarray:
                return arr
            else:
                return np.array([arr])
        model=KalmanFilter(
                    transition_matrices   = setupArr(params[dname]['trans_mult']),  #multiplier for z_t-1
                    observation_matrices  = setupArr(params[dname]['obs_mult']).T, #multiplier for z_t
                    transition_covariance = setupArr(params[dname]['trans_cov_full']),  #transition cov
                    observation_covariance= setupArr(params[dname]['obs_cov_full']),  #obs cov
                    transition_offsets    = setupArr(params[dname]['trans_drift']),#additive const. in trans
                    observation_offsets   = setupArr(params[dname]['obs_drift']),   #additive const. in obs
                    initial_state_mean    = setupArr(params[dname]['init_mu']),
                    initial_state_covariance = setupArr(params[dname]['init_cov_full']))
    else:
        #In this case, the transition and emission function may have other parameters
        #Create wrapper functions that are instantiated w/ the true parameters
        #and pass them to the UKF
        def trans_fxn(z):
            return params[dname]['trans_fxn'](z, fxn_params = params[dname]['params'])
        def obs_fxn(z):
            return params[dname]['obs_fxn'](z, fxn_params = params[dname]['params'])

        model=AdditiveUnscentedKalmanFilter(
                    transition_functions  = trans_fxn, #params[dname]['trans_fxn'],
                    observation_functions = obs_fxn,  #params[dname]['obs_fxn'],
                    transition_covariance = np.array([params[dname]['trans_cov']]),  #transition cov
                    observation_covariance= np.array([params[dname]['obs_cov']]),  #obs cov
                    initial_state_mean    = np.array([params[dname]['init_mu']]),
                    initial_state_covariance = np.array(params[dname]['init_cov']))
    #Run smoothing algorithm with model
    dim_stoc = params[dname]['dim_stoc']
    if dim_stoc>1:
        mus     = np.zeros((X.shape[0],X.shape[1],dim_stoc))
        cov     = np.zeros((X.shape[0],X.shape[1],dim_stoc))
    else:
        mus     = np.zeros(X.shape)
        cov     = np.zeros(X.shape)
    ll      = 0
    for n in range(X.shape[0]):
        (smoothed_state_means, smoothed_state_covariances) = model.smooth(X[n,:])
        if dim_stoc>1:
            mus[n,:] = smoothed_state_means
            cov[n,:] = np.concatenate([np.diag(k)[None,:] for k in smoothed_state_covariances],axis=0)
        else:
            mus[n,:] = smoothed_state_means.ravel()
            cov[n,:] = smoothed_state_covariances.ravel()
        if filterType=='KF':
            ll      += model.loglikelihood(X[n,:])
    return mus,cov,ll
示例#11
0
    data.observation_offset,
    data.initial_state_mean,
    data.initial_state_covariance,
    em_vars=[
      'transition_matrices', 'observation_matrices',
      'transition_covariance', 'observation_covariance',
      'observation_offsets', 'initial_state_mean',
      'initial_state_covariance'
    ]
)

# Learn good values for parameters named in `em_vars` using the EM algorithm
loglikelihoods = np.zeros(10)
for i in range(len(loglikelihoods)):
    kf = kf.em(X=data.observations, n_iter=1)
    loglikelihoods[i] = kf.loglikelihood(data.observations)

# Estimate the state without using any observations.  This will let us see how
# good we could do if we ran blind.
n_dim_state = data.transition_matrix.shape[0]
n_timesteps = data.observations.shape[0]
blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
for t in range(n_timesteps - 1):
    if t == 0:
        blind_state_estimates[t] = kf.initial_state_mean
    blind_state_estimates[t + 1] = (
      np.dot(kf.transition_matrices, blind_state_estimates[t])
      + kf.transition_offsets[t]
    )

# Estimate the hidden states using observations up to and including
示例#12
0
    data.observation_offset,
    data.initial_state_mean,
    data.initial_state_covariance,
    em_vars=[
      'transition_matrices', 'observation_matrices',
      'transition_covariance', 'observation_covariance',
      'observation_offsets', 'initial_state_mean',
      'initial_state_covariance'
    ]
)

# Learn good values for parameters named in `em_vars` using the EM algorithm
loglikelihoods = np.zeros(10)
for i in range(len(loglikelihoods)):
    kf = kf.em(X=data.observations, n_iter=1)
    loglikelihoods[i] = kf.loglikelihood(data.observations)

# Estimate the state without using any observations.  This will let us see how
# good we could do if we ran blind.
n_dim_state = data.transition_matrix.shape[0]
n_timesteps = data.observations.shape[0]
blind_state_estimates = np.zeros((n_timesteps, n_dim_state))
for t in range(n_timesteps - 1):
    if t == 0:
        blind_state_estimates[t] = kf.initial_state_mean
    blind_state_estimates[t + 1] = (
      np.dot(kf.transition_matrices, blind_state_estimates[t])
      + kf.transition_offsets[t]
    )

# Estimate the hidden states using observations up to and including