def geweke_test(K, N_iter=10000): """ """ # Create a multinomial distribution mu = np.zeros(K-1) mu[-1] = 1 Sigma = np.eye(K-1) pgm = PGMultinomial(K, mu=mu, Sigma=Sigma) # Run a Geweke test xs = [] samples = [] for itr in range(N_iter): if itr % 10 == 0: print("Iteration ", itr) # Resample the data x = pgm.rvs(10) # Resample the PG-Multinomial parameters pgm.resample(x) # Update our samples xs.append(x.copy()) samples.append(pgm.copy_sample()) # Check that the PG-Multinomial samples are distributed like the prior psi_samples = np.array([s.psi for s in samples]) psi_mean = psi_samples.mean(0) psi_std = psi_samples.std(0) print("Mean bias: ", psi_mean, " +- ", psi_std) # Make Q-Q plots ind = K-2 fig = plt.figure() ax = fig.add_subplot(121) psi_dist = norm(mu[ind], np.sqrt(Sigma[ind,ind])) probplot(psi_samples[:,ind], dist=psi_dist, plot=ax) fig.add_subplot(122) _, bins, _ = plt.hist(psi_samples[:,ind], 20, normed=True, alpha=0.2) bincenters = 0.5*(bins[1:]+bins[:-1]) plt.plot(bincenters, psi_dist.pdf(bincenters), 'r--', linewidth=1) plt.show()
def test_pgm_rvs(): K = 10 mu, sig = compute_uniform_mean_psi(K, sigma=2) # mu = np.zeros(K-1) # sig = np.ones(K-1) print("mu: ", mu) print("sig: ", sig) Sigma = np.diag(sig) # Add some covariance # Sigma[:5,:5] = 1.0 + 1e-3*np.random.randn(5,5) # Sample a bunch of pis and look at the marginals pgm = PGMultinomial(K, mu=mu, Sigma=Sigma) samples = 10000 pis = [] for smpl in xrange(samples): pgm.resample() pis.append(pgm.pi) pis = np.array(pis) print("E[pi]: ", pis.mean(axis=0)) print("var[pi]: ", pis.var(axis=0)) plt.figure() plt.subplot(121) plt.boxplot(pis) plt.xlabel("k") plt.ylabel("$p(\pi_k)$") # Plot the covariance cov = np.cov(pis.T) plt.subplot(122) plt.imshow(cov, interpolation="None", cmap="cool") plt.colorbar() plt.title("Cov($\pi$)") plt.show()
def test_pgm_rvs(): K = 10 mu, sig = compute_uniform_mean_psi(K, sigma=2) # mu = np.zeros(K-1) # sig = np.ones(K-1) print("mu: ", mu) print("sig: ", sig) Sigma = np.diag(sig) # Add some covariance # Sigma[:5,:5] = 1.0 + 1e-3*np.random.randn(5,5) # Sample a bunch of pis and look at the marginals pgm = PGMultinomial(K, mu=mu, Sigma=Sigma) samples = 10000 pis = [] for smpl in range(samples): pgm.resample() pis.append(pgm.pi) pis = np.array(pis) print("E[pi]: ", pis.mean(axis=0)) print("var[pi]: ", pis.var(axis=0)) plt.figure() plt.subplot(121) plt.boxplot(pis) plt.xlabel("k") plt.ylabel("$p(\pi_k)$") # Plot the covariance cov = np.cov(pis.T) plt.subplot(122) plt.imshow(cov, interpolation="None", cmap="cool") plt.colorbar() plt.title("Cov($\pi$)") plt.show()
def geweke_test(K, N_iter=10000): """ """ # Create a multinomial distribution mu = np.zeros(K - 1) mu[-1] = 1 Sigma = np.eye(K - 1) pgm = PGMultinomial(K, mu=mu, Sigma=Sigma) # Run a Geweke test xs = [] samples = [] for itr in xrange(N_iter): if itr % 10 == 0: print("Iteration ", itr) # Resample the data x = pgm.rvs(10) # Resample the PG-Multinomial parameters pgm.resample(x) # Update our samples xs.append(x.copy()) samples.append(pgm.copy_sample()) # Check that the PG-Multinomial samples are distributed like the prior psi_samples = np.array([s.psi for s in samples]) psi_mean = psi_samples.mean(0) psi_std = psi_samples.std(0) print("Mean bias: ", psi_mean, " +- ", psi_std) # Make Q-Q plots ind = K - 2 fig = plt.figure() ax = fig.add_subplot(121) psi_dist = norm(mu[ind], np.sqrt(Sigma[ind, ind])) probplot(psi_samples[:, ind], dist=psi_dist, plot=ax) fig.add_subplot(122) _, bins, _ = plt.hist(psi_samples[:, ind], 20, normed=True, alpha=0.2) bincenters = 0.5 * (bins[1:] + bins[:-1]) plt.plot(bincenters, psi_dist.pdf(bincenters), 'r--', linewidth=1) plt.show()