예제 #1
0
def fit_lds_model(Xs, Xtest, N_samples=100):
    model = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu_0=np.zeros(D),
                                                        sigma_0=np.eye(D),
                                                        kappa_0=1.0,
                                                        nu_0=D + 1.0),
                           dynamics_distn=AutoRegression(nu_0=D + 1,
                                                         S_0=np.eye(D),
                                                         M_0=np.zeros((D, D)),
                                                         K_0=np.eye(D)),
                           sigma_C=1)

    for X in Xs:
        model.add_data(X)
    data = model.data_list[0]

    samples = []
    lls = []
    test_lls = []
    mc_test_lls = []
    pis = []
    psis = []
    zs = []
    timestamps = [time.time()]
    for smpl in progprint_xrange(N_samples):
        model.resample_model()
        timestamps.append(time.time())

        samples.append(model.copy_sample())
        # TODO: Use log_likelihood() to marginalize over z
        lls.append(model.log_likelihood())
        # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0])
        mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0])
        pis.append(model.pi(data))
        psis.append(model.psi(data))
        zs.append(data["states"].stateseq)

    lls = np.array(lls)
    test_lls = np.array(test_lls)
    pis = np.array(pis)
    psis = np.array(psis)
    zs = np.array(zs)
    timestamps = np.array(timestamps)
    timestamps -= timestamps[0]
    return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
예제 #2
0
def fit_lds_model(Xs, Xtest, N_samples=100):
    model = MultinomialLDS(K, D,
        init_dynamics_distn=Gaussian(mu_0=np.zeros(D), sigma_0=np.eye(D), kappa_0=1.0, nu_0=D+1.0),
        dynamics_distn=AutoRegression(nu_0=D+1,S_0=np.eye(D),M_0=np.zeros((D,D)),K_0=np.eye(D)),
        sigma_C=1
        )

    for X in Xs:
        model.add_data(X)
    data = model.data_list[0]

    samples = []
    lls = []
    test_lls = []
    mc_test_lls = []
    pis = []
    psis = []
    zs = []
    timestamps = [time.time()]
    for smpl in progprint_xrange(N_samples):
        model.resample_model()
        timestamps.append(time.time())

        samples.append(model.copy_sample())
        # TODO: Use log_likelihood() to marginalize over z
        lls.append(model.log_likelihood())
        # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0])
        mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0])
        pis.append(model.pi(data))
        psis.append(model.psi(data))
        zs.append(data["states"].stateseq)

    lls = np.array(lls)
    test_lls = np.array(test_lls)
    pis = np.array(pis)
    psis = np.array(psis)
    zs = np.array(zs)
    timestamps = np.array(timestamps)
    timestamps -= timestamps[0]
    return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
예제 #3
0
sigma_states = 0.1 * np.eye(D)

K = 3
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K - 1, D)

###################
#  generate data  #
###################

model = MultinomialLDS(K,
                       D,
                       init_dynamics_distn=Gaussian(mu=mu_init,
                                                    sigma=sigma_init),
                       dynamics_distn=AutoRegression(A=A, sigma=sigma_states),
                       C=C)
data = model.generate(T=T, N=N, keep=False)
# data["x"] = np.hstack([np.zeros((T,K-1)), np.ones((T,1))])

# Estimate the held out likelihood using Monte Carlo
M = 10000
hll_mc, std_mc = model._mc_heldout_log_likelihood(data["x"], M=M)

# Estimate the held out log likelihood
# hll_info, std_info = model._info_form_heldout_log_likelihood(data["x"], M=M)
hll_dist, std_dist = model._distn_form_heldout_log_likelihood(data["x"], M=M)

print("MC. Model: ", hll_mc, " +- ", std_mc)
# print "AN. Model (info): ", hll_info, " +- ", std_info
print("AN. Model (dist): ", hll_dist, " +- ", std_dist)
예제 #4
0
                   [np.sin(np.pi/24),  np.cos(np.pi/24)]])
sigma_states = 0.1*np.eye(D)

K = 3
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K-1, D)

###################
#  generate data  #
###################

model = MultinomialLDS(K, D,
    init_dynamics_distn=Gaussian(mu=mu_init,sigma=sigma_init),
    dynamics_distn=AutoRegression(A=A,sigma=sigma_states),
    C=C
    )
data = model.generate(T=T, N=N, keep=False)
# data["x"] = np.hstack([np.zeros((T,K-1)), np.ones((T,1))])

# Estimate the held out likelihood using Monte Carlo
M = 10000
hll_mc, std_mc = model._mc_heldout_log_likelihood(data["x"], M=M)

# Estimate the held out log likelihood
# hll_info, std_info = model._info_form_heldout_log_likelihood(data["x"], M=M)
hll_dist, std_dist = model._distn_form_heldout_log_likelihood(data["x"], M=M)

print("MC. Model: ", hll_mc, " +- ", std_mc)
# print "AN. Model (info): ", hll_info, " +- ", std_info
print("AN. Model (dist): ", hll_dist, " +- ", std_dist)