an = a + len(data) / 2 bn = b + 0.5 * np.sum((data - mu)**2) print(an, bn) g = st.gamma(a=an, loc=0, scale=1 / bn) def comp(sig): nonlocal g return g.pdf(sig) return comp if __name__ == "__main__": mu = 0 precision = 0.1111 n = 1000 data = dist.normal(mu, (1 / precision)**0.5, n) a = 2 b = 1 stat = get_stat_ig(data, mu, a, b) sd = 0.1 prop = lambda x, xi: norm(xi, sd).pdf(x) prop_sampler = lambda x: dist.normal(x, sd) samples = mh(stat, prop, prop_sampler, 2000) print(np.mean(samples))
import numpy as np import numpy.random as dist import matplotlib.pyplot as plt from scipy.stats import multivariate_normal as norm import scipy.stats as st from mcmc import metropolis_hastings as mh cov = np.matrix([[1, 0], [0, 10]]) mean = np.array([0, 0]) N_stat = norm(mean, cov) stat = lambda x: N_stat.pdf(x) cov_sampler = np.matrix([[3, 0], [0, 10]]) prop = lambda x, xi: norm(xi, cov_sampler).pdf(x) prop_sampler = lambda x: norm(x, cov_sampler).rvs() k = 5 samples = mh(stat, prop, prop_sampler, 10000, np.array([0, 0])) mu0 = [x[0] for x in samples] mu1 = [x[1] for x in samples] plt.plot(mu0, mu1, 'ro') axes = plt.gca() axes.set_xlim([-15, 15]) axes.set_ylim([-15, 15]) plt.show()
k1 = 0.05 k2 = 0.05 k3 = 5 cov_sampler = np.array([[k1, 0, 0], [0, k2, 0], [0, 0, k3]]) prop = lambda x, xi: normal(xi, cov_sampler).pdf(x) prop_sampler = lambda x: normal(x, cov_sampler).rvs() cov_sampler2 = np.array([[20, 0.5, 0], [0.5, 20, 0.5], [0, 0.5, 20]]) prop2 = lambda x, xi: normal(xi, cov_sampler2).pdf(x) prop_sampler2 = lambda x: normal(x, cov_sampler2).rvs() k = 5 samples, failures = mh(stat, prop, prop_sampler, k, prop, prop_sampler, 2000, np.array([5, 1, 10])) b0 = np.mean(samples[:, 0]) b1 = np.mean(samples[:, 1]) b0confidence_95 = np.percentile(samples[:, 0], [2.5, 97.5]) b1confidence_95 = np.percentile(samples[:, 1], [2.5, 97.5]) mindataX = np.min(dataX) maxdataX = np.max(dataX) plt.figure(1) plt.subplot(221) plt.plot(dataX, Y, 'ro') plt.plot([mindataX, maxdataX], [b0 + mindataX * b1, b0 + maxdataX * b1]) plt.plot([mindataX, maxdataX], [ b0confidence_95[0] + mindataX * b1confidence_95[0],
import numpy as np import numpy.random as dist import matplotlib.pyplot as plt from scipy.stats import norm import scipy.stats as st from mcmc import metropolis_hastings_kth as mh if __name__ == "__main__": n1 = norm(-50, 1) n2 = norm(50, 2) stat = lambda x: n1.pdf(x) + n2.pdf(x) sd1 = 3 prop = lambda x, xi: norm(xi, sd1).pdf(x) prop_sampler = lambda x: dist.normal(x, sd1) k = 5 sd2 = 100 prop_kth = lambda x, xi: norm(xi, sd2).pdf(x) prop_sampler_kth = lambda x: dist.normal(x, sd2) samples = mh(stat, prop, prop_sampler, k, prop_kth, prop_sampler_kth, 2000, 50)
(None, None), (None, None), (0, None), (None, None), (0, None)) # Results already found pre_found_best = np.array([ 3.40658319e-01, 5.68558786e-01, 1.34469382e-01, 3.84466029e-02, 8.67848219e-01, 3.05861386e+00, -1.59939791e-02, 5.04364259e-03, -1.90515806e+01, 1.17007078e-02 ]) import matplotlib.pyplot as pl from matplotlib.ticker import MaxNLocator import mcmc as m mcmc_obj = m.mh(m2loglike, [0]) param_step_stds = np.sqrt(abs(pre_found_best)) / 400. mcmc_obj.run_d_mc(pre_found_best, param_step_stds, numsteps=25000, burn_in=1000) p, p_uncertainty, chains, accepted_fraction, loglike_history = mcmc_obj.get_params( ) ## chains = size(numsteps,length(p0)) labels = [ r"$\Omega_M$", r"$\Omega_{\Lambda}$", r"$ \alpha $", r"$x_0$", r"$ \sigma^2_{x_0} $", r"$ \beta $", r"$c_0$", r"$ \sigma^2_{c_0} $",
def sample(self, n): self.samples = mh(self.stationary, self.proposal_distribution, self.proposal_sampler, n, [0, 0, 3])