def fit_lds_model(Xs, Xtest, N_samples=100): model = MultinomialLDS(K, D, init_dynamics_distn=Gaussian(mu_0=np.zeros(D), sigma_0=np.eye(D), kappa_0=1.0, nu_0=D + 1.0), dynamics_distn=AutoRegression(nu_0=D + 1, S_0=np.eye(D), M_0=np.zeros((D, D)), K_0=np.eye(D)), sigma_C=1) for X in Xs: model.add_data(X) data = model.data_list[0] samples = [] lls = [] test_lls = [] mc_test_lls = [] pis = [] psis = [] zs = [] timestamps = [time.time()] for smpl in progprint_xrange(N_samples): model.resample_model() timestamps.append(time.time()) samples.append(model.copy_sample()) # TODO: Use log_likelihood() to marginalize over z lls.append(model.log_likelihood()) # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0]) mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0]) pis.append(model.pi(data)) psis.append(model.psi(data)) zs.append(data["states"].stateseq) lls = np.array(lls) test_lls = np.array(test_lls) pis = np.array(pis) psis = np.array(psis) zs = np.array(zs) timestamps = np.array(timestamps) timestamps -= timestamps[0] return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
def fit_lds_model(Xs, Xtest, N_samples=100): model = MultinomialLDS(K, D, init_dynamics_distn=Gaussian(mu_0=np.zeros(D), sigma_0=np.eye(D), kappa_0=1.0, nu_0=D+1.0), dynamics_distn=AutoRegression(nu_0=D+1,S_0=np.eye(D),M_0=np.zeros((D,D)),K_0=np.eye(D)), sigma_C=1 ) for X in Xs: model.add_data(X) data = model.data_list[0] samples = [] lls = [] test_lls = [] mc_test_lls = [] pis = [] psis = [] zs = [] timestamps = [time.time()] for smpl in progprint_xrange(N_samples): model.resample_model() timestamps.append(time.time()) samples.append(model.copy_sample()) # TODO: Use log_likelihood() to marginalize over z lls.append(model.log_likelihood()) # test_lls.append(model.heldout_log_likelihood(Xtest, M=50)[0]) mc_test_lls.append(model._mc_heldout_log_likelihood(Xtest, M=1)[0]) pis.append(model.pi(data)) psis.append(model.psi(data)) zs.append(data["states"].stateseq) lls = np.array(lls) test_lls = np.array(test_lls) pis = np.array(pis) psis = np.array(psis) zs = np.array(zs) timestamps = np.array(timestamps) timestamps -= timestamps[0] return model, lls, test_lls, mc_test_lls, pis, psis, zs, timestamps
testdata = testmodel.data_list[0] N_samples = 100 samples = [] lls = [] pis = [] psis = [] zs = [] for smpl in range(N_samples): print("Iteration ", smpl) testmodel.resample_model() samples.append(testmodel.copy_sample()) lls.append(testmodel.log_likelihood()) pis.append(testmodel.pi(testdata)) psis.append(testmodel.psi(testdata)) zs.append(testdata["states"].stateseq) lls = np.array(lls) pis = np.array(pis) psis = np.array(psis) psi_mean = psis[N_samples // 2:, ...].mean(0) psi_std = psis[N_samples // 2:, ...].std(0) zs = np.array(zs) z_mean = zs[N_samples // 2:, ...].mean(0) z_std = zs[N_samples // 2:, ...].std(0) # Plot the true and inferred states plt.figure()
testdata = testmodel.data_list[0] N_samples = 100 samples = [] lls = [] pis = [] psis = [] zs = [] for smpl in xrange(N_samples): print("Iteration ", smpl) testmodel.resample_model() samples.append(testmodel.copy_sample()) lls.append(testmodel.log_likelihood()) pis.append(testmodel.pi(testdata)) psis.append(testmodel.psi(testdata)) zs.append(testdata["states"].stateseq) lls = np.array(lls) pis = np.array(pis) psis = np.array(psis) psi_mean = psis[N_samples//2:,...].mean(0) psi_std = psis[N_samples//2:,...].std(0) zs = np.array(zs) z_mean = zs[N_samples//2:,...].mean(0) z_std = zs[N_samples//2:,...].std(0) # Plot the true and inferred states