Exemple #1
0
def fit_lds_gibbs(seq, inputs, guessed_dim, num_update_samples):
    """Fits LDS model via Gibbs sampling and EM. Returns fitted eigenvalues."""
    if inputs is None:
        model = DefaultLDS(D_obs=1, D_latent=guessed_dim, D_input=0)
    else:
        model = DefaultLDS(D_obs=1, D_latent=guessed_dim, D_input=1)
    model.add_data(seq, inputs=inputs)
    ll = np.zeros(num_update_samples)
    # Run the Gibbs sampler
    for i in xrange(num_update_samples):
        try:
            model.resample_model()
        except AssertionError as e:
            warnings.warn(str(e), sm_exceptions.ConvergenceWarning)
            eigs = np.linalg.eigvals(model.A)
            return eigs[np.argsort(np.abs(eigs))[::-1]]
        ll[i] = model.log_likelihood()
    # Rough estimate of convergence: judge converged if the change of maximum
    # log likelihood is less than tolerance.
    recent_steps = int(num_update_samples / 10)
    tol = 1.0
    if np.max(ll[-recent_steps:]) - np.max(ll[:-recent_steps]) > tol:
        warnings.warn(
            'Questionable convergence. Log likelihood values: ' + str(ll),
            sm_exceptions.ConvergenceWarning)
    eigs = np.linalg.eigvals(model.A)
    return eigs[np.argsort(eigs.real)[::-1]]
Exemple #2
0
def fit_gaussian_lds_model(Xs, N_samples=100):
    testmodel = DefaultLDS(n=D,p=K)

    for X in Xs:
        testmodel.add_data(X)

    samples = []
    lls = []
    for smpl in progprint_xrange(N_samples):
        testmodel.resample_model()

        samples.append(testmodel.copy_sample())
        lls.append(testmodel.log_likelihood())

    lls = np.array(lls)
    return lls
Exemple #3
0
def fit_gaussian_lds_model(Xs, N_samples=100):
    testmodel = DefaultLDS(n=D, p=K)

    for X in Xs:
        testmodel.add_data(X)

    samples = []
    lls = []
    for smpl in progprint_xrange(N_samples):
        testmodel.resample_model()

        samples.append(testmodel.copy_sample())
        lls.append(testmodel.log_likelihood())

    lls = np.array(lls)
    return lls
full_model.add_data(data, inputs=inputs)


# Fit with Gibbs sampling
def update(model):
    model.resample_model()
    return model.log_likelihood()


N_steps = 100
diag_lls = [update(diag_model) for _ in progprint_xrange(N_steps)]
full_lls = [update(full_model) for _ in progprint_xrange(N_steps)]

plt.figure()
plt.plot([0, N_steps],
         truemodel.log_likelihood() * np.ones(2),
         '--k',
         label="true")
plt.plot(diag_lls, label="diag cov.")
plt.plot(full_lls, label="full cov.")
plt.xlabel('iteration')
plt.ylabel('log likelihood')
plt.legend()

# Predict forward in time
T_given = 1800
T_predict = 200
given_data = data[:T_given]
given_inputs = inputs[:T_given]

preds = \
Exemple #5
0
## Mean field
# lls = [meanfield_update(model) for _ in progprint_xrange(N_samples)]

## SVI
# delay = 10.0
# forgetting_rate = 0.5
# stepsizes = (np.arange(N_samples) + delay)**(-forgetting_rate)
# minibatchsize = 500
# # [model.resample_model() for _ in progprint_xrange(100)]
# lls = [svi_update(model, stepsizes[itr], minibatchsize) for itr in progprint_xrange(N_samples)]

# Plot the log likelihood
plt.figure()
plt.plot(lls, '-b')
plt.plot([0, N_samples],
         truemodel.log_likelihood(data, mask=mask) * np.ones(2), '-k')
plt.xlabel('iteration')
plt.ylabel('log likelihood')

# Plot the inferred observation noise
plt.figure()
plt.plot(sigma_obs_smpls)
plt.xlabel("iteration")
plt.ylabel("sigma_obs")

# Smooth over missing data
smoothed_obs = model.states_list[0].smooth()
sample_predictive_obs = model.states_list[0].gaussian_states.dot(model.C.T)

plt.figure()
given_data = data.copy()
Exemple #6
0
model.add_data(data, inputs=inputs)


# Fit with mean field
def update(model):
    return model.meanfield_coordinate_descent_step()


for _ in progprint_xrange(100):
    model.resample_model()

N_steps = 100
vlbs = [update(model) for _ in progprint_xrange(N_steps)]

plt.figure(figsize=(3, 4))
plt.plot([0, N_steps], truemodel.log_likelihood() * np.ones(2), '--k')
plt.plot(vlbs)
plt.xlabel('iteration')
plt.ylabel('variational lower bound')

# Predict forward in time
T_given = 1800
T_predict = 200
given_data = data[:T_given]
given_inputs = inputs[:T_given]

preds = \
    model.sample_predictions(
        given_data, inputs=given_inputs,
        Tpred=T_predict,
        inputs_pred=inputs[T_given:T_given + T_predict])
Exemple #7
0
# The variable I represents all our background information

# Let's assume our prior belief in both hypotheses is equal: P(H1|I) = P(H2I)
# Now log_prior_odds is then log(1) = 0
log_prior_odds = 0

# Calculate log P(D|HI) by integrating out theta in p(Dtheta|HI)=p(D|thetaHI)p(theta|HI)
print('Hypothesis 1')
model = DefaultLDS(D_obs=1, D_latent=2)

log_p_D_given_H1I = []
for _ in range(num_mc_samples):
    model.resample_parameters()
    log_p_D_given_H1I.append(
        np.sum([
            model.log_likelihood(np.expand_dims(data[n], 1))
            for n in range(num_samples)
        ]))
# In the next line, we do a log-sum-exp over our list.
#  - The outer log puts the evidence on log scale
#  - The sum is over the MC samples
#  - The exp cancels the log in the distribution.logpdf()
log_p_D_given_H1I = logsumexp(log_p_D_given_H1I) - np.log(num_mc_samples)

# Calculate log P(D|H2I)
print('Hypothesis 2')
model1 = DefaultLDS(D_obs=1, D_latent=2)
model2 = DefaultLDS(D_obs=1, D_latent=2)

log_p_D_given_H2I = []
for i in range(num_mc_samples):
Exemple #8
0
C = np.array([[10.,0.]])
sigma_obs = 0.01*np.eye(1)
# C = np.eye(2)
# sigma_obs = 0.01*np.eye(2)

###################
#  generate data  #
###################

truemodel = LDS(
    dynamics_distn=AutoRegression(A=A,sigma=sigma_states),
    emission_distn=Regression(A=C,sigma=sigma_obs))

data, stateseq = truemodel.generate(2000)

###############
#  fit model  #
###############

model = DefaultLDS(n=2,p=data.shape[1]).add_data(data)

likes = []
for _ in progprint_xrange(50):
    model.EM_step()
    likes.append(model.log_likelihood())

plt.plot(likes)
plt.show()