def test_mixture_1d(): neg_log_probs = [neg_log_normal(1.0, 1.0), neg_log_normal(-1.0, 1.0)] probs = [0.2, 0.8] neg_log_p = mixture(neg_log_probs, probs) true_rvs = [st.norm(1.0, 1.0), st.norm(-1.0, 1)] true_log_p = lambda x: -np.log(sum(p * rv.pdf(x) for p, rv in zip(probs, true_rvs))) for x in np.random.randn(10): assert_almost_equal(neg_log_p(x), true_log_p(x))
def test_hamiltonian_monte_carlo(): # This mostly tests consistency. Tolerance chosen by experiment # Do statistical tests on your own time. np.random.seed(1) neg_log_p = neg_log_normal(2, 0.1) samples, *_ = hamiltonian_monte_carlo(100, neg_log_p, np.array(0.0)) assert_allclose(2.0, np.mean(samples), atol=0.006) assert_allclose(0.1, np.std(samples), atol=0.025)
def test_leapfrog(): neg_log_p = neg_log_normal(2, 0.1) dlogp = grad(neg_log_p) q, p = np.array(0.0), np.array(2.0) path_len, step_size = 1, 0.1 # Should be reversible q_new, p_new, *_ = leapfrog(q, p, dlogp, path_len, step_size) q_new, p_new, *_ = leapfrog(q_new, p_new, dlogp, path_len, step_size) assert_almost_equal(q_new, q) assert_almost_equal(p_new, p)
def test_hamiltonian_monte_carlo(integrator): # This mostly tests consistency. Tolerance chosen by experiment # Do statistical tests on your own time. np.random.seed(1) neg_log_p = AutogradPotential(neg_log_normal(2, 0.1)) samples = hamiltonian_monte_carlo(100, neg_log_p, np.array(0.0), integrator=integrator) assert samples.shape[0] == 100 assert_allclose(2.0, np.mean(samples), atol=0.1) assert_allclose(0.1, np.std(samples), atol=0.1)
def test_leapfrog(): neg_log_p = AutogradPotential(neg_log_normal(2, 0.1)) q, p = np.array(0.0), np.array(2.0) path_len, step_size = 1, 0.1 V, dVdq = neg_log_p(q) # Should be reversible q_new, p_new, _, dVdq = leapfrog(q, p, dVdq, neg_log_p, path_len, step_size) q_new, p_new, _, _ = leapfrog(q_new, p_new, dVdq, neg_log_p, path_len, step_size) assert_almost_equal(q_new, q) assert_almost_equal(p_new, p)
"font.family": "serif", "figure.facecolor": "#fffff8", "axes.facecolor": "#fffff8", "figure.constrained_layout.use": True, "font.size": 14.0, "hist.bins": "auto", "lines.linewidth": 3.0, "lines.markeredgewidth": 2.0, "lines.markerfacecolor": "none", "lines.markersize": 8.0, } ) ### Example 1 ### samples = hamiltonian_monte_carlo( 2000, AutogradPotential(neg_log_normal(0, 0.1)), initial_position=0.0 ) ### Plot 1 ### fig, ax = plt.subplots(figsize=FIGSIZE) ax.hist(samples, bins="auto") ax.axvline(0, color="C1", linestyle="--") ax.set_title("1D Gaussians!") plt.savefig(os.path.join(HERE, "plot1.png")) ### Example 2 ### samples, positions, momentums, accepted, p_accepts = hmc_slow( 50, AutogradPotential(neg_log_normal(0, 0.1)), 0.0, step_size=0.01 ) ### Plot 2 ###
def test_neg_log_normal(): neg_log_p = neg_log_normal(2, 0.1) true_rv = st.norm(2, 0.1) for x in np.random.randn(10): assert_almost_equal(neg_log_p(x), -true_rv.logpdf(x))
import autograd.numpy as np from autograd import grad from minimc import neg_log_normal, mixture, hamiltonian_monte_carlo, neg_log_mvnormal from minimc.minimc_slow import hamiltonian_monte_carlo as hmc_slow import matplotlib.pyplot as plt HERE = os.path.dirname(os.path.abspath(__file__)) FIGSIZE = (10, 7) if __name__ == "__main__": plt.style.use("tufte") ### Example 1 ### samples = hamiltonian_monte_carlo(2000, neg_log_normal(0, 0.1), initial_position=0.0) ### Plot 1 ### fig, ax = plt.subplots(figsize=FIGSIZE) ax.hist(samples, bins="auto") ax.axvline(0, color="C1", linestyle="--") ax.set_title("1D Gaussians!") plt.savefig(os.path.join(HERE, "plot1.png")) ### Example 2 ### samples, positions, momentums, accepted = hmc_slow(50, neg_log_normal(0, 0.1), 0.0, step_size=0.01)