from __future__ import division, print_function import numpy as np import matplotlib.pyplot as pl from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig setup() # initialize the plotting styles np.random.seed(42) x = 0.0 chain = np.empty(1e6) for step in range(len(chain)): x += np.random.randn() chain[step] = x # Thin the chain for plotting purposes. chain = chain[::1000] mn, mx = np.min(chain), np.max(chain) mid = 0.5 * (mn + mx) rng = 0.5 * (mx - mn) * 1.1 fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(np.arange(len(chain)) * 1e-2, chain, color=COLORS["DATA"]) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.set_xlabel("mcmc step [$\\times 10^5$]") ax.set_ylabel("$x$") ax.set_ylim(mid - rng, mid + rng) savefig(fig, "improper.pdf")
print("Warning: running naive autocorrelation function method. " "This will be slow!") mu = np.mean(x) r = x - mu C = np.empty(len(r) // 2) for i in range(1, len(C)): C[i] = np.mean(r[i:] * r[:-i]) C /= np.mean(r**2) C[0] = 1.0 return C def autocorr_function(x): n = len(x) f = np.fft.fft(x - np.mean(x), n=2 * n) acf = np.fft.ifft(f * np.conjugate(f))[:n].real return acf / acf[0] if __name__ == "__main__": fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) acf = autocorr_function(chain) ax.plot(acf[:61]) ax.set_xlim(0, 60) ax.set_ylabel("$C_x(\Delta)$") ax.set_xlabel("$\Delta$") savefig(fig, "estimatetau.pdf")
return -0.5 * np.dot(x, alpha) def run_mcmc(log_p, x, prop_sigma=1.0, nsteps=2e4, thin=1): lp = log_p(x) chain = np.empty((nsteps // thin, len(x))) acc = 0 for step in range(len(chain)): for i in range(thin): x_prime = np.array(x) x_prime[np.random.randint( len(x))] += prop_sigma * np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): acc += 1 x[:] = x_prime lp = lp_prime if i == thin - 1: chain[step] = x return chain, acc / (len(chain) * thin) if __name__ == "__main__": chain, _ = run_mcmc(log_p_gauss, np.array([0.0, 0.0])) fig = corner.corner(chain, labels=["$x$", "$y$"], range=[(-4.5, 4.5), (-4.5, 4.5)], plot_density=False, plot_contours=False) savefig(fig, "twod_a.pdf", dpi=300)
import numpy as np import matplotlib.pyplot as pl from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig from itertau import autocorr_function, autocorr_time_iterative from tuning import log_p_gauss, run_mcmc, sigs setup() # initialize the plotting styles np.random.seed(42) taus = np.empty((len(sigs), 2)) for i, sig in enumerate(sigs): chain, acc = run_mcmc(log_p_gauss, np.array([0.0, 0.0]), prop_sigma=sig, nsteps=2e5) for j in range(2): acf = autocorr_function(chain[:, j]) taus[i, j] = autocorr_time_iterative(acf) print(sig, acc, taus[i]) fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(sigs, taus[:, 0], ".-", color=COLORS["MODEL_1"]) ax.plot(sigs, taus[:, 1], ".-", color=COLORS["MODEL_2"]) ax.set_xscale("log") ax.set_xlabel(r"$\sigma_q$") ax.set_ylabel(r"$\tau_x$") ax.set_xlim(0.2, 40) ax.set_ylim(0, 450) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) savefig(fig, "tuningtau.pdf")
if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x if __name__ == "__main__": s = SQUARE_FIGSIZE s[0] *= 2 fig, (ax1, ax2) = pl.subplots(1, 2, figsize=s) ax1.plot(np.arange(len(chain)) / 1e3, chain, color=COLORS["DATA"], rasterized=True) ax1.set_xlim(0, len(chain) / 1e3) ax1.set_ylim(2 - 5.5, 2 + 5.5) ax1.set_ylabel("$x$") ax1.set_xlabel("thousands of steps") for i in range(4): a = i * len(chain) / 4 b = (i + 1) * len(chain) / 4 print(i + 1, np.mean(chain[a:b]), np.var(chain[a:b])) ax2.hist(chain[a:b], 50, histtype="step") ax2.set_xlim(2 - 5.5, 2 + 5.5) ax2.set_yticklabels([]) ax2.set_ylabel("$p(x)$") ax2.set_xlabel("$x$") savefig(fig, "convergence.pdf", dpi=300)
s_skew = np.mean((x - s_mu)**3) / s_var**(3. / 2) s_kurt = np.mean((x - s_mu)**4) / s_var**2 return ( (a_mu, a_var, a_skew, a_kurt), (s_mu, s_var, s_skew, s_kurt), ) K = 4**np.arange(1, 11) a_stats, s_stats = map(np.array, zip(*(compute_stats(k) for k in K))) shape = np.array([2, 1.5]) * SQUARE_FIGSIZE fig, axes = pl.subplots(2, 2, sharex=True, figsize=shape) for i, (ax, title) in enumerate( zip(axes.flat, ("mean", "variance", "skewness", "kurtosis"))): mu = a_stats[0, i] ax.axhline(mu, color=COLORS["DATA"]) ax.plot(np.log2(K), s_stats[:, i], "-o", color=COLORS["MODEL_1"], ms=3) d = np.max(np.abs(np.array(ax.get_ylim()) - mu)) ax.set_ylim(mu + d * np.array([-1, 1])) ax.set_ylabel(title) ax.yaxis.set_label_coords(-0.3, 0.5) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.xaxis.set_major_locator(pl.MaxNLocator(6)) [ax.set_xlabel(r"$\log_2 K$") for ax in axes[1]] savefig(fig, "simple_stats.pdf")
raise RuntimeError("chain too short to estimate tau reliably") if __name__ == "__main__": from estimatetau import chain m = len(chain) N = 2 ** np.arange(2, 19) fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) for s in [slice(0, m//2), slice(m//4, 3*m//4), slice(m//2, m), slice(None)]: acf = autocorr_function(chain[s]) taus = np.array([autocorr_time_simple(acf, n) for n in N]) tau = autocorr_time_iterative(acf) ax.plot(N, taus, "-", color=COLORS["DATA"]) print("tau: {0}".format(tau)) ax.plot(N, taus, ".-", color=COLORS["MODEL_2"]) ax.axhline(tau, color=COLORS["MODEL_2"], ls="dashed") ax.set_title(r"$\tau_x = {0:.2f}$".format(tau)) ax.set_ylim(0, 25) ax.set_xlim(2, 5e5) ax.set_xscale("log") ax.set_ylabel(r"$\tau_x$") ax.set_xlabel("window size") savefig(fig, "itertau.pdf")
np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5*np.log(2*np.pi*variance) neg_log_p = lambda x: -log_p(x) r = minimize(neg_log_p, [1e3]) x = r.x[0] lp = log_p(x) chain = np.empty(6000) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(chain, color=COLORS["DATA"]) ax.xaxis.set_major_locator(pl.MaxNLocator(4)) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("step") ax.set_ylabel("$x$") savefig(fig, "optimization.pdf")
from plot_setup import setup, COLORS, SQUARE_FIGSIZE, savefig setup() # initialize the plotting styles np.random.seed(42) x0 = [-4.0, 5.0] s = SQUARE_FIGSIZE s[0] *= 2 s[1] *= 0.8 fig, axes = pl.subplots(1, 3, sharex=True, sharey=True, figsize=s) for n, ax in zip([0.0, -1.0, 2.0], axes): chain, _ = run_mcmc(log_p_gauss, np.array(x0), nsteps=2e3, prop_sigma=10**n) ax.plot(chain[:, 0], chain[:, 1], "o-", color=COLORS["DATA"], ms=2) ax.plot(x0[0], x0[1], "o", color=COLORS["MODEL_1"]) ax.set_xlim(-6.3, 6.3) ax.set_ylim(-6.3, 6.3) ax.set_xlabel("$x$") ax.annotate(r"$\sigma_q = 10^{{{0:.0f}}}$".format(n), (1, 0), xycoords="axes fraction", xytext=(-5, 5), textcoords="offset points", ha="right", va="bottom") ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.xaxis.set_major_locator(pl.MaxNLocator(5)) axes[0].set_ylabel("$y$") savefig(fig, "MH_sigma.pdf")
np.random.seed(42) x0 = [-4.0, 5.0] s = SQUARE_FIGSIZE s[0] *= 2 s[1] *= 0.8 fig, axes = pl.subplots(1, 3, sharex=True, sharey=True, figsize=s) for n, ax in zip([0.0, -1.0, 2.0], axes): chain, _ = run_mcmc(log_p_gauss, np.array(x0), nsteps=2e3, prop_sigma=10**n) ax.plot(chain[:, 0], chain[:, 1], "o-", color=COLORS["DATA"], ms=2) ax.plot(x0[0], x0[1], "o", color=COLORS["MODEL_1"]) ax.set_xlim(-6.3, 6.3) ax.set_ylim(-6.3, 6.3) ax.set_xlabel("$x$") ax.annotate(r"$\sigma_q = 10^{{{0:.0f}}}$".format(n), (1, 0), xycoords="axes fraction", xytext=(-5, 5), textcoords="offset points", ha="right", va="bottom") ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.xaxis.set_major_locator(pl.MaxNLocator(5)) axes[0].set_ylabel("$y$") savefig(fig, "MH_sigma.pdf")
lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x if __name__ == "__main__": s = SQUARE_FIGSIZE s[0] *= 2 fig, (ax1, ax2) = pl.subplots(1, 2, figsize=s) ax1.plot(np.arange(len(chain))/1e3, chain, color=COLORS["DATA"], rasterized=True) ax1.set_xlim(0, len(chain) / 1e3) ax1.set_ylim(2-5.5, 2+5.5) ax1.set_ylabel("$x$") ax1.set_xlabel("thousands of steps") for i in range(4): a = i*len(chain)/4 b = (i+1)*len(chain)/4 print(i+1, np.mean(chain[a:b]), np.var(chain[a:b])) ax2.hist(chain[a:b], 50, histtype="step") ax2.set_xlim(2-5.5, 2+5.5) ax2.set_yticklabels([]) ax2.set_ylabel("$p(x)$") ax2.set_xlabel("$x$") savefig(fig, "convergence.pdf", dpi=300)
delta = x[ind] - x_prime[ind] r += -0.5 * ((delta - 0.5)**2 - (delta + 0.5)**2) if np.random.rand() <= np.exp(r): x[:] = x_prime lp = lp_prime chain[step] = x return chain x0 = [-4.0, 5.0] s = SQUARE_FIGSIZE s[0] *= 2 fig, axes = pl.subplots(1, 2, sharex=True, sharey=True, figsize=s) for broken, ax in zip([True, False], axes): chain = run_broken_mcmc(log_p_gauss, np.array(x0), broken=broken) chain = np.concatenate(([x0], chain)) ax.plot(chain[:, 0], chain[:, 1], "o-", color=COLORS["DATA"], ms=2) ax.plot(x0[0], x0[1], "o", color=COLORS["MODEL_1"]) ax.set_xlim(-6.3, 6.3) ax.set_ylim(-6.3, 6.3) ax.set_xlabel("$x$") ax.annotate("incorrect acceptance" if broken else "corrected acceptance", (1, 0), xycoords="axes fraction", xytext=(-5, 5), textcoords="offset points", ha="right", va="bottom") axes[0].set_ylabel("$y$") savefig(fig, "prop_mean.pdf")
setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5 * np.log(2 * np.pi * variance) neg_log_p = lambda x: -log_p(x) r = minimize(neg_log_p, [1e3]) x = r.x[0] lp = log_p(x) chain = np.empty(6000) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(chain, color=COLORS["DATA"]) ax.xaxis.set_major_locator(pl.MaxNLocator(4)) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("step") ax.set_ylabel("$x$") savefig(fig, "optimization.pdf")
# -*- coding: utf-8 -*- from __future__ import division, print_function import numpy as np import matplotlib.pyplot as pl from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig from twod_a import log_p_gauss, run_mcmc setup() # initialize the plotting styles np.random.seed(42) sigs = 2.0 ** np.arange(-2, 6) if __name__ == "__main__": acc_fracs = np.empty_like(sigs) for i, sig in enumerate(sigs): _, acc_fracs[i] = run_mcmc(log_p_gauss, np.array([0.0, 0.0]), prop_sigma=sig) fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.axhline(0.25, color=COLORS["DATA"]) ax.plot(sigs, acc_fracs, ".-", color=COLORS["MODEL_2"]) ax.set_xscale("log") ax.set_xlabel(r"$\sigma_q$") ax.set_ylabel("acceptance fraction") ax.set_xlim(0.2, 40) savefig(fig, "tuning.pdf")
setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5*np.log(2*np.pi*variance) x = 0.0 lp = log_p(x) chain = np.empty(2e4) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.hist(chain, 100, histtype="step", color=COLORS["DATA"], normed=True) x = 2 + np.linspace(-4.5, 4.5, 5000) ax.plot(x, np.exp(log_p(x)), color=COLORS["MODEL_1"]) ax.set_xlim(x.min(), x.max()) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("$x$") ax.set_ylabel("$p(x)$") savefig(fig, "MH.pdf")
setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5 * np.log(2 * np.pi * variance) x = 0.0 lp = log_p(x) chain = np.empty(2e4) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.hist(chain, 100, histtype="step", color=COLORS["DATA"], normed=True) x = 2 + np.linspace(-4.5, 4.5, 5000) ax.plot(x, np.exp(log_p(x)), color=COLORS["MODEL_1"]) ax.set_xlim(x.min(), x.max()) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("$x$") ax.set_ylabel("$p(x)$") savefig(fig, "MH.pdf")
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division, print_function import corner import numpy as np from twod_a import run_mcmc from plot_setup import setup, savefig setup() # initialize the plotting styles np.random.seed(42) def log_p_uniform(x): if 3 <= x[0] <= 7 and 1 <= x[1] <= 9: return 0.0 return -np.inf chain, _ = run_mcmc(log_p_uniform, np.array([5.0, 5.0]), nsteps=1e5) fig = corner.corner(chain, labels=["$x$", "$y$"], range=[(2.5, 7.5), (0.5, 9.5)], plot_density=False, plot_contours=False) savefig(fig, "twod_b.pdf", dpi=300)
import numpy as np import matplotlib.pyplot as pl from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig setup() # initialize the plotting styles np.random.seed(42) x = 0.0 chain = np.empty(1e6) for step in range(len(chain)): x += np.random.randn() chain[step] = x # Thin the chain for plotting purposes. chain = chain[::1000] mn, mx = np.min(chain), np.max(chain) mid = 0.5 * (mn + mx) rng = 0.5 * (mx - mn) * 1.1 fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(np.arange(len(chain)) * 1e-2, chain, color=COLORS["DATA"]) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.set_xlabel("mcmc step [$\\times 10^5$]") ax.set_ylabel("$x$") ax.set_ylim(mid-rng, mid+rng) savefig(fig, "improper.pdf")
from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5 * np.log(2 * np.pi * variance) x = 1e3 lp = log_p(x) chain = np.empty(6000) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(chain, color=COLORS["DATA"]) ax.xaxis.set_major_locator(pl.MaxNLocator(4)) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("step") ax.set_ylabel("$x$") savefig(fig, "initialization.pdf")
def log_p(x, a=3.0, b=7.0): if a <= x <= b: return -np.log(b - a) return -np.inf x = 5.0 lp = log_p(x) chain = np.empty(2e4) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.axhline(0.0, color="k") ax.hist(chain, 50, histtype="step", color=COLORS["DATA"], normed=True) x = np.linspace(2.5, 7.5, 5000) y = np.exp([log_p(_) for _ in x]) ax.plot(x, y, color=COLORS["MODEL_1"]) ax.set_xlim(x.min(), x.max()) ax.set_ylim(-0.01, y.max() + 0.05) ax.set_xlabel("$x$") ax.set_ylabel("$p(x)$") savefig(fig, "MH2.pdf")
from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5*np.log(2*np.pi*variance) x = 1e3 lp = log_p(x) chain = np.empty(6000) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(chain, color=COLORS["DATA"]) ax.xaxis.set_major_locator(pl.MaxNLocator(4)) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("step") ax.set_ylabel("$x$") savefig(fig, "initialization.pdf")
setup() # initialize the plotting styles np.random.seed(42) def log_p(x, mean=2.0, variance=2.0): return -0.5 * (x - mean)**2 / variance - 0.5 * np.log(2 * np.pi * variance) log_x = 0.0 lp = log_p(log_x) chain = np.empty(2e6) for step in range(len(chain)): log_x_prime = log_x + np.random.randn() lp_prime = log_p(log_x_prime) if np.random.rand() <= np.exp(lp_prime - lp): log_x = log_x_prime lp = lp_prime chain[step] = np.exp(log_x) fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) x = np.linspace(0, 20, 5000) ax.plot(x, np.exp(log_p(np.log(x))) / x, color=COLORS["MODEL_1"]) ax.hist(chain, 10000, histtype="step", color=COLORS["DATA"], normed=True) ax.set_xlim(x.min(), x.max()) ax.yaxis.set_major_locator(pl.MaxNLocator(4)) ax.set_xlabel("$x$") ax.set_ylabel("$p(x)$") savefig(fig, "logprior.pdf")
print("Warning: running naive autocorrelation function method. " "This will be slow!") mu = np.mean(x) r = x - mu C = np.empty(len(r) // 2) for i in range(1, len(C)): C[i] = np.mean(r[i:] * r[:-i]) C /= np.mean(r ** 2) C[0] = 1.0 return C def autocorr_function(x): n = len(x) f = np.fft.fft(x - np.mean(x), n=2*n) acf = np.fft.ifft(f * np.conjugate(f))[:n].real return acf / acf[0] if __name__ == "__main__": fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) acf = autocorr_function(chain) ax.plot(acf[:61]) ax.set_xlim(0, 60) ax.set_ylabel("$C_x(\Delta)$") ax.set_xlabel("$\Delta$") savefig(fig, "estimatetau.pdf")
def log_p(x, a=3.0, b=7.0): if a <= x <= b: return -np.log(b-a) return -np.inf x = 5.0 lp = log_p(x) chain = np.empty(2e4) for step in range(len(chain)): x_prime = x + np.random.randn() lp_prime = log_p(x_prime) if np.random.rand() <= np.exp(lp_prime - lp): x = x_prime lp = lp_prime chain[step] = x fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.axhline(0.0, color="k") ax.hist(chain, 50, histtype="step", color=COLORS["DATA"], normed=True) x = np.linspace(2.5, 7.5, 5000) y = np.exp([log_p(_) for _ in x]) ax.plot(x, y, color=COLORS["MODEL_1"]) ax.set_xlim(x.min(), x.max()) ax.set_ylim(-0.01, y.max() + 0.05) ax.set_xlabel("$x$") ax.set_ylabel("$p(x)$") savefig(fig, "MH2.pdf")
x[:] = x_prime lp = lp_prime chain[step] = x return chain x0 = [-4.0, 5.0] s = SQUARE_FIGSIZE s[0] *= 2 fig, axes = pl.subplots(1, 2, sharex=True, sharey=True, figsize=s) for broken, ax in zip([True, False], axes): chain = run_broken_mcmc(log_p_gauss, np.array(x0), broken=broken) chain = np.concatenate(([x0], chain)) ax.plot(chain[:, 0], chain[:, 1], "o-", color=COLORS["DATA"], ms=2) ax.plot(x0[0], x0[1], "o", color=COLORS["MODEL_1"]) ax.set_xlim(-6.3, 6.3) ax.set_ylim(-6.3, 6.3) ax.set_xlabel("$x$") ax.annotate("incorrect acceptance" if broken else "corrected acceptance", (1, 0), xycoords="axes fraction", xytext=(-5, 5), textcoords="offset points", ha="right", va="bottom") axes[0].set_ylabel("$y$") savefig(fig, "prop_mean.pdf")
s_skew = np.mean((x - s_mu)**3) / s_var**(3./2) s_kurt = np.mean((x - s_mu)**4) / s_var**2 return ( (a_mu, a_var, a_skew, a_kurt), (s_mu, s_var, s_skew, s_kurt), ) K = 4**np.arange(1, 11) a_stats, s_stats = map(np.array, zip(*(compute_stats(k) for k in K))) shape = np.array([2, 1.5])*SQUARE_FIGSIZE fig, axes = pl.subplots(2, 2, sharex=True, figsize=shape) for i, (ax, title) in enumerate(zip( axes.flat, ("mean", "variance", "skewness", "kurtosis"))): mu = a_stats[0, i] ax.axhline(mu, color=COLORS["DATA"]) ax.plot(np.log2(K), s_stats[:, i], "-o", color=COLORS["MODEL_1"], ms=3) d = np.max(np.abs(np.array(ax.get_ylim()) - mu)) ax.set_ylim(mu + d * np.array([-1, 1])) ax.set_ylabel(title) ax.yaxis.set_label_coords(-0.3, 0.5) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) ax.xaxis.set_major_locator(pl.MaxNLocator(6)) [ax.set_xlabel(r"$\log_2 K$") for ax in axes[1]] savefig(fig, "simple_stats.pdf")
import matplotlib.pyplot as pl from plot_setup import setup, SQUARE_FIGSIZE, COLORS, savefig from itertau import autocorr_function, autocorr_time_iterative from tuning import log_p_gauss, run_mcmc, sigs setup() # initialize the plotting styles np.random.seed(42) taus = np.empty((len(sigs), 2)) for i, sig in enumerate(sigs): chain, acc = run_mcmc(log_p_gauss, np.array([0.0, 0.0]), prop_sigma=sig, nsteps=2e5) for j in range(2): acf = autocorr_function(chain[:, j]) taus[i, j] = autocorr_time_iterative(acf) print(sig, acc, taus[i]) fig, ax = pl.subplots(1, 1, figsize=SQUARE_FIGSIZE) ax.plot(sigs, taus[:, 0], ".-", color=COLORS["MODEL_1"]) ax.plot(sigs, taus[:, 1], ".-", color=COLORS["MODEL_2"]) ax.set_xscale("log") ax.set_xlabel(r"$\sigma_q$") ax.set_ylabel(r"$\tau_x$") ax.set_xlim(0.2, 40) ax.set_ylim(0, 450) ax.yaxis.set_major_locator(pl.MaxNLocator(5)) savefig(fig, "tuningtau.pdf")