Пример #1
0
def make_synthetic_data():
    mu_init = np.zeros(D)
    # mu_init[0] = 1.0
    sigma_init = 0.5 * np.eye(D)

    A = np.eye(D)
    # A[:2,:2] = \
    #     0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
    #                    [np.sin(np.pi/24),  np.cos(np.pi/24)]])
    sigma_states = 0.1 * np.eye(D)

    # C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
    C = 0. * np.random.randn(K - 1, D)

    truemodel = MultinomialLDS(K,
                               D,
                               init_dynamics_distn=Gaussian(mu=mu_init,
                                                            sigma=sigma_init),
                               dynamics_distn=AutoRegression(
                                   A=A, sigma=sigma_states),
                               C=C)

    data_list = []
    Xs = []
    for i in xrange(Ndata):
        data = truemodel.generate(T=T, N=N)
        data_list.append(data)
        Xs.append(data["x"])
    return data_list, Xs
Пример #2
0
def fit_lds_model(Xs, Xtest, D, N_samples=100):
    model = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=GaussianFixed(mu=np.zeros(D),
                                                             sigma=1 *
                                                             np.eye(D)),
                           dynamics_distn=AutoRegression(nu_0=D + 1,
                                                         S_0=1 * np.eye(D),
                                                         M_0=np.zeros((D, D)),
                                                         K_0=1 * np.eye(D)),
                           sigma_C=0.01)

    for X in Xs:
        model.add_data(X)

    model.resample_parameters()

    init_results = (0, model, model.log_likelihood(),
                    model.heldout_log_likelihood(Xtest, M=1),
                    model.predictive_log_likelihood(Xtest, M=1000))

    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        return toc, None, model.log_likelihood(), \
            model.heldout_log_likelihood(Xtest, M=1), \
            model.predictive_log_likelihood(Xtest, M=1000)

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] + [resample() for _ in progprint_xrange(N_samples)])))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
Пример #3
0
def fit_lds_model(Xs, Xtest, D, N_samples=100):
    Nx = len(Xs)
    assert len(Xtest) == Nx

    mus = [X.sum(0) + 0.1 for X in Xs]
    mus = [mu / mu.sum() for mu in mus]
    # mus = [np.ones(K)/float(K) for _ in Xs]

    models = [
        MultinomialLDS(K,
                       D,
                       init_dynamics_distn=GaussianFixed(mu=np.zeros(D),
                                                         sigma=1 * np.eye(D)),
                       dynamics_distn=AutoRegression(nu_0=D + 1,
                                                     S_0=1 * np.eye(D),
                                                     M_0=np.zeros((D, D)),
                                                     K_0=1 * np.eye(D)),
                       sigma_C=1.,
                       mu_pi=mus[i]) for i in xrange(Nx)
    ]

    for X, model in zip(Xs, models):
        model.add_data(X)

    [model.resample_parameters() for model in models]

    def compute_pred_ll():
        pred_ll = 0
        for Xt, model in zip(Xtest, models):
            pred_ll += model.predictive_log_likelihood(Xt, M=1)[0]

        return pred_ll

    init_results = (0, models, np.nan, np.nan, compute_pred_ll())

    def resample():
        tic = time.time()
        [model.resample_model() for model in models]
        toc = time.time() - tic

        return toc, None, np.nan, np.nan, compute_pred_ll()

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] +
            [resample() for _ in progprint_xrange(N_samples, perline=5)])))

    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
Пример #4
0
def fit_lds_model(Xs, Xtest, D, N_samples=100):
    Nx = len(Xs)
    assert len(Xtest) == Nx

    model = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=GaussianFixed(mu=np.zeros(D),
                                                             sigma=1 *
                                                             np.eye(D)),
                           dynamics_distn=AutoRegression(nu_0=D + 1,
                                                         S_0=1 * np.eye(D),
                                                         M_0=np.zeros((D, D)),
                                                         K_0=1 * np.eye(D)),
                           sigma_C=1.)

    for X in Xs:
        model.add_data(X)

    model.resample_parameters()

    compute_pred_ll = lambda: sum([
        model.predictive_log_likelihood(Xt, data_index=i, M=10)[0]
        for i, Xt in enumerate(Xtest)
    ])

    init_results = (
        0,
        None,
        model.log_likelihood(),
        # model.heldout_log_likelihood(Xtest, M=1),
        np.nan,
        compute_pred_ll())

    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        return toc, None, model.log_likelihood(), \
            np.nan,\
            compute_pred_ll()

    times, samples, lls, test_lls, pred_lls = \
        list(map(np.array, list(zip(*([init_results] +
            [resample() for _ in progprint_xrange(N_samples, perline=5)])))))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
Пример #5
0
def fit_lds_model(Xs, Xtest, N_samples=100):
    model = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu_0=np.zeros(D),
                                                        sigma_0=np.eye(D),
                                                        kappa_0=1.0,
                                                        nu_0=D + 1.0),
                           dynamics_distn=AutoRegression(nu_0=D + 1,
                                                         S_0=np.eye(D),
                                                         M_0=np.zeros((D, D)),
                                                         K_0=np.eye(D)),
                           sigma_C=1)

    for X in Xs:
        model.add_data(X)
    data = model.data_list[0]

    samples = []
    lls = []
    test_lls = []
    mc_test_lls = []
    pis = []
    psis = []
    zs = []
    timestamps = [time.time()]
    for smpl in progprint_xrange(N_samples):
        model.resample_model()
        timestamps.append(time.time())

        samples.append(model.copy_sample())
        # TODO: Use log_likelihood() to marginalize over z
        lls.append(model.log_likelihood())
        # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0])
        mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0])
        pis.append(model.pi(data))
        psis.append(model.psi(data))
        zs.append(data["states"].stateseq)

    lls = np.array(lls)
    test_lls = np.array(test_lls)
    pis = np.array(pis)
    psis = np.array(psis)
    zs = np.array(zs)
    timestamps = np.array(timestamps)
    timestamps -= timestamps[0]
    return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
Пример #6
0
A[:2,:2] = \
    0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],
                   [np.sin(np.pi/24),  np.cos(np.pi/24)]])
sigma_states = 0.1 * np.eye(D)

K = 3
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K - 1, D)

###################
#  generate data  #
###################

model = MultinomialLDS(K,
                       D,
                       init_dynamics_distn=Gaussian(mu=mu_init,
                                                    sigma=sigma_init),
                       dynamics_distn=AutoRegression(A=A, sigma=sigma_states),
                       C=C)
data = model.generate(T=T, N=N, keep=False)
# data["x"] = np.hstack([np.zeros((T,K-1)), np.ones((T,1))])

# Estimate the held out likelihood using Monte Carlo
M = 10000
hll_mc, std_mc = model._mc_heldout_log_likelihood(data["x"], M=M)

# Estimate the held out log likelihood
# hll_info, std_info = model._info_form_heldout_log_likelihood(data["x"], M=M)
hll_dist, std_dist = model._distn_form_heldout_log_likelihood(data["x"], M=M)

print("MC. Model: ", hll_mc, " +- ", std_mc)
# print "AN. Model (info): ", hll_info, " +- ", std_info
Пример #7
0
                       np.cos(np.pi / 24)]])
sigma_states = 0.0001 * np.eye(D)

K = 4
# C = np.hstack((np.ones((K-1, 1)), np.zeros((K-1, D-1))))
C = np.random.randn(K - 1, D)
sigma_obs = 0.01 * np.eye(K)

###################
#  generate data  #
###################

truemodel = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu=mu_init,
                                                        sigma=sigma_init),
                           dynamics_distn=AutoRegression(A=A,
                                                         sigma=sigma_states),
                           C=C)

data = truemodel.generate(T=T)

###################
#    inference    #
###################
testmodel = MultinomialLDS(K,
                           D,
                           init_dynamics_distn=Gaussian(mu_0=mu_init,
                                                        sigma_0=sigma_init,
                                                        kappa_0=1.0,
                                                        nu_0=3.0),