def plot_all_no_weight(folder, output): """ Plot all chains as one, with and without weights applied """ print("Plotting all as one, with old and new weights") chain, posterior, t, p, f, l, w, ow = load_stan_from_folder(folder, merge=True) c = ChainConsumer() c.add_chain(chain, posterior=posterior, walkers=l) c.plot(filename=output, truth=t, figsize=0.75)
def test_bic_fail_no_posterior(): d = norm.rvs(size=1000) c = ChainConsumer() c.add_chain(d, num_eff_data_points=1000, num_free_params=1) bics = c.comparison.bic() assert len(bics) == 1 assert bics[0] is None
def plot_separate(folder, output): """ Plot separate cosmologies """ print("Plotting all cosmologies separately") res = load_stan_from_folder(folder, merge=False) c = ChainConsumer() for i, (chain, posterior, t, p, f, l, w, ow) in enumerate(res): c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="%d"%i) c.plot(filename=output, truth=t, figsize=0.75)
def test_aic_0(): d = norm.rvs(size=1000) p = norm.logpdf(d) c = ChainConsumer() c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000) aics = c.comparison.aic() assert len(aics) == 1 assert aics[0] == 0
def test_dic_0(): d = norm.rvs(size=1000) p = norm.logpdf(d) c = ChainConsumer() c.add_chain(d, posterior=p) dics = c.comparison.dic() assert len(dics) == 1 assert dics[0] == 0
def test_bic_fail_no_num_params(): d = norm.rvs(size=1000) p = norm.logpdf(d) c = ChainConsumer() c.add_chain(d, posterior=p, num_eff_data_points=1000) bics = c.comparison.bic() assert len(bics) == 1 assert bics[0] is None
def test_get_chain_via_object(self): c = ChainConsumer() c.add_chain(self.data, name="A") c.add_chain(self.data, name="B") assert c._get_chain(c.chains[0])[0] == 0 assert c._get_chain(c.chains[1])[0] == 1 assert len(c._get_chain(c.chains[0])) == 1 assert len(c._get_chain(c.chains[1])) == 1
def is_unconstrained(chain, param): c = ChainConsumer() c.add_chain(chain, parameters=param) constraints = c.get_summary()[0] for key in constraints: val = constraints[key] if val[0] is None or val[2] is None: return True return False
def test_shade_alpha_algorithm2(self): consumer = ChainConsumer() consumer.add_chain(self.data) consumer.add_chain(self.data) consumer.configure() alpha0 = consumer.chains[0].config["shade_alpha"] alpha1 = consumer.chains[0].config["shade_alpha"] assert alpha0 == 1.0 / 2.0 assert alpha1 == 1.0 / 2.0
def debug_plots(std): print(std) res = load_stan_from_folder(std, merge=True, cut=False) chain, posterior, t, p, f, l, w, ow = res print(w.mean(), np.std(w), np.mean(np.log(w)), np.std(np.log(w))) # import matplotlib.pyplot as plt # plt.hist(np.log(w), 100) # plt.show() # exit() logw = np.log(w) m = np.mean(logw) s = np.std(logw) print(m, s) logw -= (m + 3 * s) good = logw < 0 logw *= good w = np.exp(logw) sorti = np.argsort(w) for key in chain.keys(): chain[key] = chain[key][sorti] w = w[sorti] ow = ow[sorti] posterior = posterior[sorti] c = ChainConsumer() truth = [0.3, 0.14, 3.1, -19.365, 0, 0, 0.1, 1.0, 0.1, 0, 0, 0, 0, 0, 0] c.add_chain(chain, name="uncorrected", posterior=posterior) c.add_chain(chain, weights=w, name="corrected", posterior=posterior) c.plot(filename="output.png", parameters=9, truth=truth, figsize=1.3) # c = ChainConsumer() # c.add_chain(chain, weights=w, name="corrected") c.plot_walks(chains="corrected", filename="walks.png", truth=truth)
def test_aic_data_dependence(): d = norm.rvs(size=1000) p = norm.logpdf(d) c = ChainConsumer() c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000) c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=500) aics = c.comparison.aic() assert len(aics) == 2 assert aics[0] == 0 expected = (2.0 * 1 * 2 / (500 - 1 - 1)) - (2.0 * 1 * 2 / (1000 - 1 - 1)) assert np.isclose(aics[1], expected, atol=1e-3)
def test_bic_data_dependence2(): d = norm.rvs(size=1000) p = norm.logpdf(d) c = ChainConsumer() c.add_chain(d, posterior=p, num_free_params=2, num_eff_data_points=1000) c.add_chain(d, posterior=p, num_free_params=3, num_eff_data_points=500) bics = c.comparison.bic() assert len(bics) == 2 assert bics[0] == 0 expected = 3 * np.log(500) - 2 * np.log(1000) assert np.isclose(bics[1], expected, atol=1e-3)
def get_instance(): np.random.seed(0) c = ChainConsumer() parameters = ["$x$", r"$\Omega_\epsilon$", "$r^2(x_0)$"] for name in ["Ref. model", "Test A", "Test B", "Test C"]: # Add some random data mean = np.random.normal(loc=0, scale=3, size=3) sigma = np.random.uniform(low=1, high=3, size=3) data = np.random.multivariate_normal(mean=mean, cov=np.diag(sigma**2), size=100000) c.add_chain(data, parameters=parameters, name=name) return c
def _get_consumer(self, results, chain_consumer=None, include_latent=False): if chain_consumer is None: from chainconsumer import ChainConsumer chain_consumer = ChainConsumer() n = len(self._theta_labels) if include_latent else self._num_actual chain_consumer.add_chain(results["chain"], weights=results.get("weights"), posterior=results.get("posterior"), parameters=self._theta_labels[:n], name=self.model_name) return chain_consumer
def plot_results(chain, param, chainf, chainf2, chainf3, paramf, t0, x0, x1, c, temp_dir, seed, interped): cc = ChainConsumer() cc.add_chain(chain, parameters=param, name="Posterior") cc.add_chain(chainf, parameters=paramf, name="Minuit") cc.add_chain(chainf2, parameters=paramf, name="Emcee") cc.add_chain(chainf3, parameters=paramf, name="Nestle") truth = {"$t_0$": t0, "$x_0$": x0, "$x_1$": x1, "$c$": c, r"$\mu$": get_mu(interped, x0, x1, c)} cc.plot(filename=temp_dir + "/surfaces_%d.png" % seed, truth=truth)
def test_aic_posterior_dependence(): d = norm.rvs(size=1000) p = norm.logpdf(d) p2 = norm.logpdf(d, scale=2) c = ChainConsumer() c.add_chain(d, posterior=p, num_free_params=1, num_eff_data_points=1000) c.add_chain(d, posterior=p2, num_free_params=1, num_eff_data_points=1000) aics = c.comparison.aic() assert len(aics) == 2 assert aics[0] == 0 expected = 2 * np.log(2) assert np.isclose(aics[1], expected, atol=1e-3)
def test_dic_posterior_dependence(): d = norm.rvs(size=1000000) p = norm.logpdf(d) p2 = norm.logpdf(d, scale=2) c = ChainConsumer() c.add_chain(d, posterior=p) c.add_chain(d, posterior=p2) bics = c.comparison.dic() assert len(bics) == 2 assert bics[1] == 0 dic1 = 2 * np.mean(-2 * p) + 2 * norm.logpdf(0) dic2 = 2 * np.mean(-2 * p2) + 2 * norm.logpdf(0, scale=2) assert np.isclose(bics[0], dic1 - dic2, atol=1e-3)
def plot_separate_weight(folder, output): """ Plot separate cosmologies, with and without weights applied """ print("Plotting all cosmologies separately, with old and new weights") res = load_stan_from_folder(folder, merge=False) c = ChainConsumer() ls = [] for i, (chain, posterior, t, p, f, l, w, ow) in enumerate(res): c.add_chain(chain, posterior=posterior, walkers=l, name="Uncorrected %d"%i) c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="Corrected %d"%i) ls += ["-", "--"] c.configure_general(linestyles=ls) c.plot(filename=output, truth=t, figsize=0.75)
def test_remove_chain_by_name(self): tolerance = 5e-2 consumer = ChainConsumer() consumer.add_chain(self.data * 2, name="a") consumer.add_chain(self.data, name="b") consumer.remove_chain(chain="a") consumer.configure() summary = consumer.analysis.get_summary() assert isinstance(summary, dict) actual = np.array(list(summary.values())[0]) expected = np.array([3.5, 5.0, 6.5]) diff = np.abs(expected - actual) assert np.all(diff < tolerance)
def plot_single_cosmology_weight(folder, output, i=0): print("Plotting cosmology realisation %d" % i) res = load_stan_from_folder(folder, merge=False) c = ChainConsumer() chain, posterior, t, p, f, l, w, ow = res[i] c.add_chain(chain, posterior=posterior, walkers=l, name="Uncorrected %d"%i) c.add_chain(chain, weights=w, posterior=posterior, walkers=l, name="Corrected %d"%i) c.plot(filename=output, truth=t, figsize=0.75)
def plot_all(folder, output, output_walk=None): """ Plot all chains as one """ print("Plotting all as one") chain, posterior, t, p, f, l, w, ow = load_stan_from_folder(folder, merge=True) c = ChainConsumer() c.add_chain(chain, weights=w, posterior=posterior, walkers=l) c.plot(filename=output, truth=t, figsize=0.75) if output_walk is not None: c.plot_walks(filename=output_walk)
def test_shade_alpha_algorithm3(self): consumer = ChainConsumer() consumer.add_chain(self.data) consumer.add_chain(self.data) consumer.add_chain(self.data) consumer.configure() alphas = [c.config["shade_alpha"] for c in consumer.chains] assert len(alphas) == 3 assert alphas[0] == 1.0 / 3.0 assert alphas[1] == 1.0 / 3.0 assert alphas[2] == 1.0 / 3.0
dum = numpy.percentile(numpy.exp(ans['scale_a']) - 1, (50, 50 - 34, 50 + 34)) dum = dum * 3e5 print(r'${:8.0f}_{{-{:8.0f}}}^{{+{:8.0f}}}$'.format(dum[0], dum[0] - dum[1], dum[2] - dum[0])) # plt.plot(ans['flux'].mean(axis=0)[10:-10]) # plt.show() # plt.plot(ans['scale_a']) # plt.show() # plt.plot(ans['sigma2']) # plt.show() c = ChainConsumer() # print(ans['scale_a'].shape) # print(ans['norm'].shape) # sdfd c.add_chain([ ans['scale_a'], ans['norm'][:, 0], ans['norm'][:, 1], numpy.sqrt(ans['sigma2']) ], parameters=["$a_2-a_1$", "$n_1$", "$n_2$", r"$\sigma$"]) fig = c.plotter.plot() fig.tight_layout() fig.savefig('corner.png') # plt.hist((numpy.exp(ans['scale_a'])-1)*3e5) # plt.xlabel(r'$(\lambda_2/\lambda_1 -1)*3e5$') # plt.savefig('scale.png')
out_lines = np.zeros(mc.nburnin) for ii in xrange(20, mc.nburnin): out_lines[ii] = GelmanRubin(chain_T[:ii, :, nd]) #plt.ylim(0.95, 2.3) plt.plot(out_absc[20:], out_lines[20:], '-', color='k') plt.axhline(1.01) plt.savefig(dir_output + 'GRtrace_pam_' + repr(nd) + '.png', bbox_inches='tight') plt.close() print print '*************************************************************' print if args.cc != 'False': cc = ChainConsumer() for nd in xrange(0, mc.ndim): # (0,ndim): cc.add_chain(chain[:, :, nd].flatten(), walkers=mc.nwalkers) #print(cc.get_latex_table()) print cc.get_summary() print cc.diagnostic_gelman_rubin(threshold=0.05) print cc.diagnostic_geweke() print print '*************************************************************' print x0 = 1. / 150 M_star1_rand = np.random.normal(M_star1, M_star1_err, n_kept)
Note that you *cannot* use dictionary input with the grid method and not specify the full flattened array. This is because we cannot construct the meshgrid from a dictionary, as the order of the parameters is not preserved in the dictionary. """ import numpy as np from chainconsumer import ChainConsumer from scipy.stats import multivariate_normal x, y = np.linspace(-3, 3, 50), np.linspace(-7, 7, 100) xx, yy = np.meshgrid(x, y, indexing='ij') pdf = np.exp(-0.5 * (xx * xx + yy * yy / 4 + np.abs(xx * yy))) c = ChainConsumer() c.add_chain([x, y], parameters=["$x$", "$y$"], weights=pdf, grid=True) fig = c.plotter.plot() fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this. ############################################################################### # If you have the flattened array already, you can also pass this # Turning 2D data to flat data. xs, ys = xx.flatten(), yy.flatten() coords = np.vstack((xs, ys)).T pdf_flat = multivariate_normal.pdf(coords, mean=[0.0, 0.0], cov=[[1.0, 0.7], [0.7, 3.5]]) c = ChainConsumer() c.add_chain([xs, ys], parameters=["$x$", "$y$"], weights=pdf_flat, grid=True) c.configure(smooth=1) # Notice how smoothing changes the results! fig = c.plotter.plot()
# Start by loading the main catalog file processed from GBMCMC outputs catPath = "../../tutorial/data/ucb" catalogs = GWCatalogs.create(GWCatalogType.UCB, catPath, "cat15728640_v2.h5") final_catalog = catalogs.get_last_catalog() detections_attr = final_catalog.get_attr_detections() detections = final_catalog.get_detections(detections_attr) # Sort table by SNR and select highest SNR source detections.sort_values(by="SNR", ascending=False, inplace=True) sourceId = detections.index[0] samples = final_catalog.get_source_samples(sourceId) # Reject chain samples with negative fdot (enforce GR-driven prior) samples_GR = samples[(samples["Frequency Derivative"] > 0)] # Add distance and chirpmass to samples get_DL(samples_GR) get_Mchirp(samples_GR) # Make corner plot parameters = ["Chirp Mass", "Luminosity Distance"] parameter_symbols = [r"$\mathcal{M}\ [{\rm M}_\odot]$", r"$D_L\ [{\rm kpc}]$"] df = samples_GR[parameters].values c = ChainConsumer().add_chain(df, parameters=parameter_symbols, cloud=True) c.configure(flip=False) fig = c.plotter.plot(figsize=1.5) plt.show()
color=cs[2]) fitter.set_sampler(sampler) fitter.set_num_walkers(10) fitter.fit(file) if fitter.should_plot(): import logging logging.info("Creating plots") res = fitter.load() from chainconsumer import ChainConsumer import copy c = ChainConsumer() for posterior, weight, chain, evidence, model, data, extra in res: chain_conv = copy.deepcopy(chain) chain_conv[:, 0], chain_conv[:, 2] = model.get_alphas( chain[:, 0], chain[:, 2]) parameters = model.get_labels() parameters[0] = r"$\alpha_{par}$" parameters[2] = r"$\alpha_{perp}$" c.add_chain(chain_conv, weights=weight, parameters=parameters, **extra) max_post = posterior.argmax() ps = chain_conv[max_post, :] for l, p in zip(parameters, ps): print(l, p)
def test_gelman_rubin_index(): data = np.vstack((np.random.normal(loc=0.0, size=100000), np.random.normal(loc=1.0, size=100000))).T consumer = ChainConsumer() consumer.add_chain(data, walkers=4) assert consumer.diagnostic.gelman_rubin(chain=0)
""" ############################################################################### # You can specify truth values using a list (in the same order as the # declared parameters). import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(2) cov = 0.2 * normal(size=(3, 3)) + np.identity(3) truth = normal(size=3) data = multivariate_normal(truth, 0.5 * (cov + cov.T), size=100000) c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$", r"$\beta$"]) fig = c.plotter.plot(truth=truth) fig.set_size_inches( 4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this. ############################################################################### # Or you can specify truth values using a dictionary. This allows you to specify # truth values for only some parameters. You can also customise the look # of your truth lines. c.configure_truth(color='w', ls=":", alpha=0.8) fig2 = c.plotter.plot(truth={"$x$": truth[0], "$y$": truth[1]}) fig2.set_size_inches( 4.5 + fig2.get_size_inches()) # Resize fig for doco. You don't need this.
lnprob, args=(zhel, zcmb, mb, x1, color, thirdvar, Cmu, blind_values), threads=4) # run the sampler # how many steps (will have nSteps*nwalkers of samples) sampler.run_mcmc(pos, nSteps) if options.plot: burnin = 0 # change data = np.genfromtxt( options.chains + '/my_params_JLA_FlatwCDM_CPL_uncorrected_PV_blind_%s.txt' % date, delimiter=',') data = data[burnin:] c = ChainConsumer() c.add_chain(data, parameters=[ r'$\Omega_m$', r'$w_0$', r'$\\alpha$', r'$\beta$', r'$M_B$', r'$\Delta_M$' ]) params = c.get_summary() print params.keys() figw = c.plot_walks() figw.show() fig = c.plot() figw.savefig(options.chains + '/walks.png') fig.savefig(options.chains + '/marginals.png') fig.show()
def do_analysis(self): # run_id = "chains_1808/test1" # constants: c = 2.9979e10 G = 6.67428e-8 # -------------------------------------------------------------------------# # load in sampler: reader = emcee.backends.HDFBackend(filename=self.run_id + ".h5") #sampler = emcee.EnsembleSampler(self.nwalkers, self.ndim, self.lnprob, args=(self.x, self.y, self.yerr), backend=reader) #tau = 20 tau = reader.get_autocorr_time( tol=0 ) #using tol=0 means we'll always get an estimate even if it isn't trustworthy. burnin = int(2 * np.max(tau)) thin = int(0.5 * np.min(tau)) samples = reader.get_chain(flat=True, discard=burnin) sampler = reader.get_chain(flat=False) blobs = reader.get_blobs(flat=True) # samples = reader.get_chain(discard=burnin, flat=True, thin=thin) # log_prob_samples = reader.get_log_prob(discard=burnin, flat=True, thin=thin) # blobs = reader.get_blobs(discard=burnin, flat=True, thin=thin) data = [] for i in range(len(blobs["model"])): data.append(eval(blobs["model"][i].decode('ASCII', 'replace'))) # -------------------------------------------------------------------------# # get the acceptance fraction: #accept = reader.acceptance_fraction/nsteps #this will be an array with the acceptance fraction for each walker #print(f"The average acceptance fraction of the walkers is: {np.mean(accept)}") # get the autocorrelation times: # print("burn-in: {0}".format(burnin)) # print("thin: {0}".format(thin)) # print("flat chain shape: {0}".format(samples.shape)) # print("flat log prob shape: {0}".format(log_prob_samples.shape)) # print("flat log prior shape: {0}".format(log_prior_samples.shape)) # alternate method of checking if the chains are converged: # This code is from https://dfm.io/posts/autocorr/ # get autocorrelation time: def next_pow_two(n): i = 1 while i < n: i = i << 1 return i def autocorr_func_1d(x, norm=True): x = np.atleast_1d(x) if len(x.shape) != 1: raise ValueError( "invalid dimensions for 1D autocorrelation function") n = next_pow_two(len(x)) # Compute the FFT and then (from that) the auto-correlation function f = np.fft.fft(x - np.mean(x), n=2 * n) acf = np.fft.ifft(f * np.conjugate(f))[:len(x)].real acf /= 4 * n # Optionally normalize if norm: acf /= acf[0] return acf # Automated windowing procedure following Sokal (1989) def auto_window(taus, c): m = np.arange(len(taus)) < c * taus if np.any(m): return np.argmin(m) return len(taus) - 1 # Following the suggestion from Goodman & Weare (2010) def autocorr_gw2010(y, c=5.0): f = autocorr_func_1d(np.mean(y, axis=0)) taus = 2.0 * np.cumsum(f) - 1.0 window = auto_window(taus, c) return taus[window] def autocorr_new(y, c=5.0): f = np.zeros(y.shape[1]) for yy in y: f += autocorr_func_1d(yy) f /= len(y) taus = 2.0 * np.cumsum(f) - 1.0 window = auto_window(taus, c) return taus[window] # Compute the estimators for a few different chain lengths #loop through 10 parameters: f = plt.figure(figsize=(8, 5)) param = ["$X$", "$Z$", "$Q_{\mathrm{b}}$", "$f_{\mathrm{a}}$", "$f_{\mathrm{E}}$", "$r{\mathrm{1}}$",\ "$r{\mathrm{2}}$", "$r{\mathrm{3}}$", "$M$", "$R$"] for j in range(10): chain = sampler[:, :, j].T print(np.shape(sampler)) N = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]), 10)).astype(int) print(N) gw2010 = np.empty(len(N)) new = np.empty(len(N)) for i, n in enumerate(N): gw2010[i] = autocorr_gw2010(chain[:, :n]) new[i] = autocorr_new(chain[:, :n]) # Plot the comparisons #plt.loglog(N, gw2010, "o-", label="G\&W 2010") plt.loglog(N, new, "o-", label=f"{param[j]}") plt.loglog(N, gw2010, "o-", label=None, color='grey') ylim = plt.gca().get_ylim() #plt.ylim(ylim) plt.xlabel("Number of samples, $N$", fontsize='xx-large') plt.ylabel(r"$\tau$ estimates", fontsize='xx-large') plt.plot(N, np.array(N) / 50.0, "--k") # label=r"$\tau = N/50$") plt.legend(fontsize='large', loc='best', ncol=2) #bbox_to_anchor=(0.99, 1.02) plt.xticks(fontsize=14) plt.yticks(fontsize=14) plt.savefig('{}_autocorrelationtimes.pdf'.format(self.run_id)) plt.show() print( f"The autocorrelation time for each parameter as calculated by emcee is: {tau}" ) # -------------------------------------------------------------------------# # Get parameters for each model run from the blobs structure: # get each individual parameter: time = [data[i]['time'] for i in range(len(data))] e_b = [data[i]['e_b'] for i in range(len(data))] alpha = [data[i]['alpha'] for i in range(len(data))] X = [data[i]['x_0'] for i in range(len(data))] Z = [data[i]['z'] for i in range(len(data))] base = [data[i]['base'] for i in range(len(data))] mdot = [data[i]['mdot'] for i in range(len(data))] r1 = np.array([data[i]['r1'] for i in range(len(data))]) r2 = np.array([data[i]['r2'] for i in range(len(data))]) r3 = np.array([data[i]['r3'] for i in range(len(data))]) mass = np.array([data[i]['mass'] for i in range(len(data))]) radius = np.array([data[i]['radius'] for i in range(len(data))]) # calculate redshift and gravity from mass and radius: R = np.array(radius) * 1e5 #cgs M = np.array(mass) * 1.989e33 #cgs redshift = np.power((1 - (2 * G * M / (R * c**2))), -0.5) gravity = M * redshift * G / R**2 #cgs # calculate distance and inclincation from scaling factors: r1 = np.array(r1) r2 = np.array(r2) r3 = np.array(r3) print(np.min(r1)) print(np.min(r2)) print(np.min(r3)) print(np.min(mass)) print(np.min(X)) sqrt = (r1 * r2 * r3 * 1e3) / (63.23 * 0.74816) xip = np.power(sqrt, 0.5) xib = (0.74816 * xip) / r2 distance = 10 * np.power((r1 / xip), 0.5) #kpc cosi_2 = 1 / (2 * xip) cosi = 0.5 / (2 * (xip / xib) - 1) # to get the parameter middle values and uncertainty use the functions get_param_uncert_obs and get_param_uncert_pred, e.g. #t1, t2, t3, t4, t5, t6, t7 = get_param_uncert_obs1(time, self.numburstssim+1) #times = [list(t1), list(t2), list(t3), list(t4), list(t5), list(t6), list(t7)] times = get_param_uncert_obs(time, self.numburstssim * 2 + 1) timepred = [x[0] for x in times] timepred_errup = [x[1] for x in times] timepred_errlow = [x[2] for x in times] ebs = get_param_uncert_obs(e_b, self.numburstssim * 2) ebpred = [x[0] for x in ebs] ebpred_errup = [x[1] for x in ebs] ebpred_errlow = [x[2] for x in ebs] alphas = get_param_uncert_obs(alpha, self.numburstssim * 2) Xpred = np.array(list(get_param_uncert(X))[0]) Zpred = np.array(list(get_param_uncert(Z))[0]) basepred = np.array(list(get_param_uncert(base))[0]) dpred = np.array(list(get_param_uncert(distance))[0]) cosipred = np.array(list(get_param_uncert(cosi))[0]) xippred = np.array(list(get_param_uncert(xip))[0]) xibpred = np.array(list(get_param_uncert(xib))[0]) masspred = np.array(list(get_param_uncert(mass))[0]) radiuspred = np.array(list(get_param_uncert(radius))[0]) gravitypred = np.array(list(get_param_uncert(gravity))[0]) redshiftpred = np.array(list(get_param_uncert(redshift))[0]) r1pred = np.array(list(get_param_uncert(r1))[0]) r2pred = np.array(list(get_param_uncert(r2))[0]) r3pred = np.array(list(get_param_uncert(r3))[0]) # scale fluences by scaling factor: ebpred = np.array(ebpred) * np.array(r3pred[0]) ebpred_errup = np.array(ebpred_errup) * np.array(r3pred[0]) ebpred_errlow = np.array(ebpred_errlow) * np.array(r3pred[0]) # save to text file with columns: paramname, value, upper uncertainty, lower uncertainty np.savetxt( f'{self.run_id}_parameterconstraints_pred.txt', (Xpred, Zpred, basepred, dpred, cosipred, xippred, xibpred, masspred, radiuspred, gravitypred, redshiftpred, r1pred, r2pred, r3pred), header= 'Xpred, Zpred, basepred, dpred, cosipred, xippred, xibpred, masspred, radiuspred,gravitypred, redshiftpred, r1pred, r2pred, r3pred \n value, upper uncertainty, lower uncertainty' ) # -------------------------------------------------------------------------# # PLOTS # -------------------------------------------------------------------------# # make plot of posterior distributions of your parameters: c = ChainConsumer() c.add_chain(samples, parameters=[ "X", "Z", "Qb", "fa", "fE", "r1", "r2", "r3", "M", "R" ]) c.plotter.plot(filename=self.run_id + "_posteriors.pdf", figsize="column") # make plot of posterior distributions of the mass, radius, surface gravity, and redshift: # stack data for input to chainconsumer: mass = mass.ravel() radius = radius.ravel() gravity = gravity.ravel() redshift = redshift.ravel() mrgr = np.column_stack((mass, radius, gravity, redshift)) # plot with chainconsumer: c = ChainConsumer() c.add_chain(mrgr, parameters=["M", "R", "g", "1+z"]) c.plotter.plot(filename=self.run_id + "_massradius.pdf", figsize="column") # make plot of observed burst comparison with predicted bursts: # get the observed bursts for comparison: tobs = self.bstart ebobs = self.fluen plt.figure(figsize=(10, 7)) plt.scatter(tobs, ebobs, color='black', marker='.', label='Observed', s=200) #plt.scatter(time_pred_35, e_b_pred_35, marker = '*',color='cyan',s = 200, label = '2 M$_{\odot}$, R = 11.2 km') plt.scatter(timepred[1:], ebpred, marker='*', color='darkgrey', s=100, label='Predicted') #plt.scatter(time_pred_18, e_b_pred_18, marker = '*',color='orange',s = 200, label = '1.4 M$_{\odot}$, R = 10 km') plt.errorbar(timepred[1:], ebpred, yerr=[ebpred_errup, ebpred_errlow], xerr=[timepred_errup[1:], timepred_errlow[1:]], fmt='.', color='darkgrey') plt.errorbar(tobs, ebobs, fmt='.', color='black') plt.xlabel("Time (days after start of outburst)") plt.ylabel("Fluence (1e-9 erg/cm$^2$)") plt.legend(loc=2) plt.savefig(f'{self.run_id}_predictedburstscomparison.pdf') plt.show() # plot the chains: ndim = 10 labels = [ "$X$", "$Z$", "$Q_b$", "$f_a$", "$f_E$", "$r1$", "$r2$", "$r3$", "$M$", "$R$" ] plt.clf() fig, axes = plt.subplots(ndim, 1, sharex=True, figsize=(8, 9)) for i in range(ndim): axes[i].plot(sampler[:, :, i].T, color="k", alpha=0.4) axes[i].yaxis.set_major_locator(MaxNLocator(5)) axes[i].set_ylabel(labels[i]) axes[ndim - 1].set_xlabel("step number") plt.tight_layout(h_pad=0.0) plt.savefig(self.run_id + 'chain-plot.pdf') plt.show()
""" ====================== Cloud and Sigma Levels ====================== Choose custom sigma levels and display point cloud. In this example we display more sigma levels, turn on the point cloud, and disable the parameter summaries on the top of the marginalised distributions. Note that because of the very highly correlated distribution we have, it is useful to increase the number of bins the plots are generated with, to capture the thinness of the correlation. """ import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(1) cov = normal(size=(3, 3)) data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000) c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$", "$z$"]) c.configure(summary=False, bins=1.4, cloud=True, sigmas=np.linspace(0, 2, 10)) fig = c.plotter.plot() fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
def test_marker(self): loc = [0, 1, 2] c = ChainConsumer() c.add_marker(loc) assert np.all(np.equal(loc, c.chains[0].chain[0, :]))
def test_shade_alpha_algorithm1(self): consumer = ChainConsumer() consumer.add_chain(self.data) consumer.configure() alpha = consumer.chains[0].config["shade_alpha"] assert alpha == 1.0
def test_get_chain_name(self): c = ChainConsumer() c.add_chain(self.data, name="A") assert c._get_chain_name(0) == "A"
# -*- coding: utf-8 -*- """ ================== Plot Distributions ================== If you want a fast check of your distributions for high dimensional spaces (such that you can only generate a surfaces for a subset of parameters), you can simply plot all of the marginalised distributions using this method. """ import numpy as np from numpy.random import random, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(0) means, cov = np.arange(8), random(size=(8, 8)) data = multivariate_normal(means, np.dot(cov, cov.T), size=1000000) params = ["$x$", "$y$", "$z$", "a", "b", "c", "d", "e"] c = ChainConsumer().add_chain(data, parameters=params) fig = c.plotter.plot_distributions(truth=means) fig.set_size_inches( 3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
""" #plot this initial posterior/new prior c = ChainConsumer() c.add_chain(sampler0.flatchain, parameters=[r"$\Omega_m$", r"$\sigma_8$"]) c.configure(kde=1.7) #fig = c.plotter.plot_walks(display=True,truth=[0.3,0.8]) fig = c.plotter.plot(display=True,truth=[0.3,0.8], filename = outfile + "_iterate_0.png") plt.close() """ #iterate, using posterior from previous iteration as new prior for i in range(1,nReal): sampler = emcee.EnsembleSampler(nwalkers, ndim, post_new, args=(y_arr[:,0], prior_new, cov, length, c_raw)) pos, prob, state = sampler.run_mcmc(p0, burn) sampler.reset() sampler.run_mcmc(p0, steps) prior_new = sampler print("iteration " + str(i+1) + " of " + str(nReal) + " complete") #plot initial and final posteriors using chainconsumer c = ChainConsumer() c.add_chain(sampler.flatchain, parameters=[r"$\Omega_m$", r"$\sigma_8$"], name = str(nReal)+"th") c.add_chain(sampler0.flatchain, parameters=[r"$\Omega_m$", r"$\sigma_8$"], name = "First") c.configure(kde=1.7) fig = c.plotter.plot(display=True,truth=[0.3,0.8], filename = outfile + "_" + str(nReal) + "_iterate.png") fig = c.plotter.plot_summary(display=True,truth=[0.3,0.8], filename = outfile + "_" + str(nReal) + "_iterate_summary.png") #timekeeping print("My program took", time.time() - start_time, "s to run")
cs = ["#262232", "#116A71", "#48AB75", "#b7c742"] for r in [True]: t = "Recon" if r else "Prerecon" datae = PowerSpectrum_SDSS_DR12_Z061_NGC(recon=r, min_k=0.03, max_k=0.30, postprocess=p) for ls, n in zip(["-", ":"], ["", " (No Poly)"]): if n: fix = ["om", "f", "a1", "a2", "a3", "a4", "a5"] else: fix = ["om", "f"] fitter.add_model_and_dataset(PowerBeutler2017(postprocess=p, recon=r, fix_params=fix), datae, name=f"Beutler 2017{n}", linestyle=ls, color=cs[0]) fitter.add_model_and_dataset(PowerSeo2016(postprocess=p, recon=r, fix_params=fix), datae, name=f"Seo 2016{n}", linestyle=ls, color=cs[1]) fitter.add_model_and_dataset(PowerDing2018(postprocess=p, recon=r, fix_params=fix), datae, name=f"Ding 2018{n}", linestyle=ls, color=cs[2]) fitter.add_model_and_dataset(PowerNoda2019(postprocess=p, recon=r), datae, name=f"Noda 2019", color=cs[3]) sampler = DynestySampler(temp_dir=dir_name, nlive=300) fitter.set_sampler(sampler) fitter.set_num_walkers(10) fitter.fit(file) if fitter.should_plot(): from chainconsumer import ChainConsumer c = ChainConsumer() for posterior, weight, chain, evidence, model, data, extra in fitter.load(): c.add_chain(chain, weights=weight, parameters=model.get_labels(), **extra) c.configure(shade=True, bins=30, legend_artists=True) c.analysis.get_latex_table(filename=pfn + "_params.txt") c.plotter.plot_summary(filename=pfn + "_summary.png", errorbar=True, truth={"$\\Omega_m$": 0.31, "$\\alpha$": 0.9982}) c.plotter.plot(filename=pfn + "_contour.png", truth={"$\\Omega_m$": 0.31, "$\\alpha$": 0.9982})
""" ======================== Gaussian KDE and Extents ======================== Smooth marginalised distributions with a Gaussian KDE, and pick custom extents. Note that invoking the KDE on large data sets will significantly increase rendering time when you have a large number of points. You can also pass a float to your KDE to modify the width of the bandpass by that factor! You can see the increase in contour smoothness (without broadening) for when you have a low number of samples in your chains! """ import numpy as np from chainconsumer import ChainConsumer np.random.seed(0) data = np.random.multivariate_normal([0.0, 4.0], [[1.0, -0.7], [-0.7, 1.5]], size=3000) c = ChainConsumer() c.add_chain(data, name="KDE on") c.add_chain(data + 1, name="KDE off") c.add_chain(data + 2, name="KDE x2!") c.configure(kde=[True, False, 2.0], shade_alpha=0.1, flip=False) fig = c.plotter.plot(extents=[(-2, 4), (0, 9)]) fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
""" ============== Shade Gradient ============== Control contour contrast! To help make your confidence levels more obvious, you can play with the gradient steepness and resulting contrast in your contours. """ import numpy as np from numpy.random import multivariate_normal from chainconsumer import ChainConsumer np.random.seed(0) data1 = multivariate_normal([0, 0], [[1, 0], [0, 1]], size=1000000) data2 = multivariate_normal([4, -4], [[1, 0], [0, 1]], size=1000000) c = ChainConsumer() c.add_chain(data1, parameters=["$x$", "$y$"]) c.add_chain(data2, parameters=["$x$", "$y$"]) c.configure(shade_gradient=[0.1, 3.0], colors=['o', 'k'], sigmas=[0, 1, 2, 3], shade_alpha=1.0) fig = c.plotter.plot() fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
pfn, dir_name, file = setup(__file__) model = TestModel() data = TestDataset() sampler = DynestySampler(temp_dir=dir_name, max_iter=None) fitter = Fitter(dir_name) fitter.add_model_and_dataset(model, data) fitter.set_sampler(sampler) fitter.set_num_walkers(1) fitter.fit(file) if fitter.should_plot(): res, = fitter.load() posterior, weight, chain, evidence, model, data, extra = res import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2) ax[0].plot(weight) ax[1].plot(evidence) plt.show() from chainconsumer import ChainConsumer c = ChainConsumer() c.add_chain(chain, weights=weight, parameters=model.get_labels()) c.plotter.plot(filename=pfn + "_contour.png")
# Everything below is nasty plotting code ########################################################### if fitter.should_plot(): import logging logging.info("Creating plots") from chainconsumer import ChainConsumer output = {} print(allnames) for name in allnames: fitname = " ".join(name.split()[:8]) output[fitname] = [] c = ChainConsumer() counter = 0 truth = { "$\\Omega_m$": 0.3121, "$\\alpha$": 1.0, "$\\epsilon$": 0, "$\\alpha_\\perp$": 1.0, "$\\alpha_\\parallel$": 1.0 } for posterior, weight, chain, evidence, model, data, extra in fitter.load( ): kmax = extra["name"].split(" ")[-1][9:-1] fitname = " ".join(extra["name"].split()[:8]) color = plt.colors.rgb2hex(cmap(float(counter) / (len(kmaxs) - 1)))
import numpy as np import os import sys from chainconsumer import ChainConsumer import matplotlib.pyplot as plt true = [0.295, .8344] true = [0.3156, .831] c = ChainConsumer() names = ['$\\Omega_m$', '$h$', '$w$', '$S_8$'] #,'$h_0$','$\\Omega_b$','$n_s$','$A_s$'] dfile = '2pt_NG.fits_d_w_chain.txt' # DES Y1 data gwfile = 'maria_o3_sim_test.txt' # GW simulation O3 #gwfile= 'gw170817-holz_joint.txt' #dfile='chains/2pt_sim_1110_baseline_Y3cov.fits_chain_d_w.txt' #gwfile='marcelle_Y3_forcast.txt' print r1 = 0 * np.random.random() r2 = 0 * np.random.random() r3 = 0 * np.random.random() chaindir = '/Users/maria/current-work/des-gw/MainPaper/' #chaindir='/data/des41.a/data/alyssag/cosmosis/gw/' ## DES Y3 print("--- Using file: ", dfile) file = chaindir + dfile f = open(file) for i, line in enumerate(f):
def test_geweke_index(): data = np.vstack((np.random.normal(loc=0.0, size=100000), np.random.normal(loc=1.0, size=100000))).T consumer = ChainConsumer() consumer.add_chain(data, walkers=20, name="c1") assert consumer.diagnostic.geweke(chain=0)
""" =================== Change Font Options =================== Control tick rotation and font sizes. Here the tick rotation has been turned off, ticks made smaller, more ticks added, and label size increased! """ import numpy as np from numpy.random import multivariate_normal from chainconsumer import ChainConsumer np.random.seed(0) data = multivariate_normal([0, 1, 2], np.eye(3) + 0.2, size=100000) # If you pass in parameter labels and only one chain, you can also get parameter bounds c = ChainConsumer() c.add_chain(data, parameters=["$x$", "$y^2$", r"$\Omega_\beta$"], name="Example") c.configure(diagonal_tick_labels=False, tick_font_size=8, label_font_size=25, max_ticks=8) fig = c.plotter.plot(figsize="column", legend=True) fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
""" =================== Two Disjoint Chains =================== You can plot multiple chains. They can even have different parameters! """ import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(0) cov = normal(size=(3, 3)) cov2 = normal(size=(4, 4)) data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000) data2 = multivariate_normal(normal(size=4), np.dot(cov2, cov2.T), size=100000) c = ChainConsumer() c.add_chain(data, parameters=["$x$", "$y$", r"$\alpha$"]) c.add_chain(data2, parameters=["$x$", "$y$", r"$\alpha$", r"$\gamma$"]) fig = c.plotter.plot() fig.set_size_inches( 4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
converged and mixed. Below is an example walk from a Metropolis-Hastings run, where we have set the optional parameters for the weights and posteriors, giving the top two subplots. .. figure:: ../../examples/resources/exampleWalk.png :align: center """ ############################################################################### # To generate your own walk, with a 100 point smoothed walk overplotting, # you can use the following code: import numpy as np from chainconsumer import ChainConsumer if __name__ == "__main__": np.random.seed(0) data1 = np.random.randn(100000, 2) data2 = np.random.randn(100000, 2) - 2 data1[:, 1] += 1 c = ChainConsumer() c.add_chain(data1, parameters=["$x$", "$y$"]) c.add_chain(data2, parameters=["$x$", "$z$"]) fig = c.plotter.plot_walks(truth={"$x$": -1, "$y$": 1, "$z$": -2}, convolve=100) fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
def test_remove_multiple_chains3(self): tolerance = 5e-2 consumer = ChainConsumer() consumer.add_chain(self.data * 2, parameters=["p1"], name="a") consumer.add_chain(self.data, parameters=["p2"], name="b") consumer.add_chain(self.data * 3, parameters=["p3"], name="c") consumer.remove_chain(chain=["a", 2]) consumer.configure() summary = consumer.analysis.get_summary() assert isinstance(summary, dict) assert "p2" in summary assert "p1" not in summary assert "p3" not in summary actual = np.array(list(summary.values())[0]) expected = np.array([3.5, 5.0, 6.5]) diff = np.abs(expected - actual) assert np.all(diff < tolerance)
""" ############################################################################### # You can specify truth values using a list (in the same order as the # declared parameters). import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(2) cov = 0.2 * normal(size=(3, 3)) + np.identity(3) truth = normal(size=3) data = multivariate_normal(truth, 0.5 * (cov + cov.T), size=100000) c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$", r"$\beta$"]) fig = c.plot(truth=truth) fig.set_size_inches( 2.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this. ############################################################################### # Or you can specify truth values using a dictionary. This allows you to specify # truth values for only some parameters. You can also customise the look # of your truth lines. c.configure_truth(color='w', ls=":", alpha=0.8) fig2 = c.plot(truth={"$x$": truth[0], "$y$": truth[1]}) fig2.set_size_inches( 2.5 + fig2.get_size_inches()) # Resize fig for doco. You don't need this.
def test_remove_multiple_chains_fails(self): with pytest.raises(AssertionError): ChainConsumer().add_chain(self.data).remove_chain(chain=[0, 0])
context to other surfaces, we can select that point to give a colour mapped scatter plot. We can *also* display this as a posterior surface by setting `plot_colour_params=True`, if we wanted. """ import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer np.random.seed(1) cov = normal(size=(3, 3)) data = multivariate_normal(normal(size=3), np.dot(cov, cov.T), size=100000) c = ChainConsumer().add_chain(data, parameters=["$x$", "$y$", "$z$"]) c.configure(color_params="$z$") fig = c.plotter.plot(figsize=1.0) fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this. ############################################################################### # You can also plot the weights or posterior if they are specified. Showing weights here. weights = 1 / (1 + data[:, 0]**2 + data[:, 1]**2) c = ChainConsumer().add_chain(data[:, :2], parameters=["$x$", "$y$"], weights=weights) c.configure(color_params="weights") fig = c.plotter.plot(figsize=3.0) fig.set_size_inches(4.5 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
def test_get_names(self): c = ChainConsumer() c.add_chain(self.data, name="A") c.add_chain(self.data, name="B") assert c._all_names() == ["A", "B"]
# -*- coding: utf-8 -*- """ =================== Change Font Options =================== Control tick rotation and font sizes. Here the tick rotation has been turned off, ticks made smaller, more ticks added, and label size increased! """ import numpy as np from numpy.random import multivariate_normal from chainconsumer import ChainConsumer np.random.seed(0) data = multivariate_normal([0, 1, 2], np.eye(3) + 0.2, size=100000) # If you pass in parameter labels and only one chain, you can also get parameter bounds c = ChainConsumer() c.add_chain(data, parameters=["$x$", "$y^2$", r"$\Omega_\beta$"], name="Example") c.configure(diagonal_tick_labels=False, tick_font_size=8, label_font_size=25, max_ticks=8) fig = c.plotter.plot(figsize="column", legend=True) fig.set_size_inches(3 + fig.get_size_inches()) # Resize fig for doco. You don't need this.
def test_summary_bad_input5(self): with pytest.raises(AssertionError): ChainConsumer().add_chain(self.data).configure(summary_area=-0.2)
c1.samples['intrinsic_alignment_parameters--a1bin1'], c1.samples['intrinsic_alignment_parameters--a1bin2'], c1.samples['intrinsic_alignment_parameters--a1bin3'], c1.samples['intrinsic_alignment_parameters--a1bin4'] ]) samp2 = np.array([ c2.samples['cosmological_parameters--s8'], c2.samples['intrinsic_alignment_parameters--a1bin1'], c2.samples['intrinsic_alignment_parameters--a1bin2'], c2.samples['intrinsic_alignment_parameters--a1bin3'], c2.samples['intrinsic_alignment_parameters--a1bin4'] ]) #import pdb ; pdb.set_trace() print('Making cornerplot...') cc = ChainConsumer() #import pdb ; pdb.set_trace() names = ['$S_8$', r'$A^{1}_1$', r'$A^{2}_1$', r'$A^{3}_1$', r'$A^{4}_1$'] cc.add_chain(samp1.T, parameters=names, weights=c1.weight, kde=True, name=r'$1\times2$pt') cc.add_chain(samp2.T, parameters=names, weights=c2.weight, kde=True, name=r'$2\times2$pt')
# In[27]: params=["Glucose","BMI"] m=diabetesDF["Outcome"]==1 diabetic=diabetesDF.loc[m,params].values non_diabetic=diabetesDF.loc[-m,params].values non_diabetic.shape # In[28]: from chainconsumer import ChainConsumer c= ChainConsumer() c.add_chain(diabetic,parameters=params, name="Diabetic", kde=1.0, color="b") c.add_chain(non_diabetic, parameters=params, name="Non Diabetic",kde=1.0, color="r") c.configure(contour_labels="confidence",usetex=False) c.plotter.plot(figsize=3.0); # * It gives you two contours for each chain, one of the 68% confidence interval and the other is for the 95% confidence interval. # # * This is useful when doing Hypothesis testing. # # * For example, if you randomly pick a Diabetic person out of a different data sample, you can say that 68% of the time would expect glucose levels to lie within the 68% contour, and 95% of the time their BMI and Glucose levels would lie in the second contour. # # * This is useful when you would like to check if a data point comes from this distribution or not. # # * For example, you can check where the data point lies and estimate the chance of it being of a diabetic or Non Diabetic person.
""" import numpy as np from numpy.random import normal, multivariate_normal from chainconsumer import ChainConsumer if __name__ == "__main__": np.random.seed(2) cov = normal(size=(2, 2)) + np.identity(2) d1 = multivariate_normal(normal(size=2), 0.5 * (cov + cov.T), size=100000) cov = normal(size=(2, 2)) + np.identity(2) d2 = multivariate_normal(normal(size=2), 0.5 * (cov + cov.T), size=100000) cov = normal(size=(2, 2)) + np.identity(2) d3 = multivariate_normal(normal(size=2), 0.5 * (cov + cov.T), size=1000000) c = ChainConsumer() c.add_chain(d1, parameters=["$x$", "$y$"]) c.add_chain(d2) c.add_chain(d3) c.configure(linestyles=["-", "--", "-"], linewidths=[1.0, 3.0, 1.0], bins=[3.0, 1.0, 1.0], colors=["#1E88E5", "#D32F2F", "#111111"], smooth=[0, 1, 2], shade=[True, True, False], shade_alpha=[0.2, 0.1, 0.0], bar_shade=[True, False, False]) fig = c.plotter.plot() fig.set_size_inches(
hs = h * H * ez[-1] rs_fid = get_r_s([0.273])[0] daval = (alpha/(1+epsilon)) * da / rs_fid hrc = hs * rs_fid / (alpha * (1 + epsilon) * (1 + epsilon)) / c res = np.vstack((omch2, daval, z/hrc)).T return res p1 = [r"$\Omega_c h^2$", r"$\alpha$", r"$\epsilon$"] p2 = [r"$\Omega_c h^2$", r"$D_A(z)/r_s$", r"$cz/H(z)/r_s $"] if False: consumer = ChainConsumer() consumer.configure_contour(sigmas=[0,1.3]) consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z0"), parameters=p1, name="$0.2<z<0.6$") consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z1"), parameters=p1, name="$0.4<z<0.8$") consumer.add_chain(load_directory("../bWigMpBin/bWigMpBin_z2"), parameters=p1, name="$0.6<z<1.0$") consumer.plot(figsize="column", filename="wigglez_multipole_alphaepsilon.pdf", truth=[0.113, 1.0, 0.0]) print(consumer.get_latex_table()) if True: c = ChainConsumer() c.configure_contour(sigmas=[0,1,2]) c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z0", 0.44), parameters=p2, name="$0.2<z<0.6$") c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z1", 0.60), parameters=p2, name="$0.4<z<0.8$") c.add_chain(convert_directory("../bWigMpBin/bWigMpBin_z2", 0.73), parameters=p2, name="$0.6<z<1.0$") print(c.get_latex_table()) #c.plot(figsize="column", filename="wigglez_multipole_dah.pdf")
#'out_redmagic_high_mock_baseline-gp1-gg1-pp1-multinest-fidcov-ta-y3fid-6rpmin_200rpmax-rsd1-lens0-mag0-phot1.txt' samp1 = np.array([c1.samples['cosmological_parameters--w'],c1.samples['cosmological_parameters--omega_m'], c1.samples['cosmological_parameters--s8'], (np.random.rand(len(c1.samples['cosmological_parameters--s8'])) - 0.5) * 10.5, (np.random.rand(len(c1.samples['cosmological_parameters--s8'])) - 0.5) * 10.5]) samp2 = np.array([c2.samples['cosmological_parameters--w'],c2.samples['cosmological_parameters--omega_m'], c2.samples['cosmological_parameters--s8'], (np.random.rand(len(c2.samples['cosmological_parameters--s8'])) - 0.5) * 10.5, (np.random.rand(len(c2.samples['cosmological_parameters--s8'])) - 0.5) * 10.5]) samp3 = np.array([c3.samples['cosmological_parameters--w'],c3.samples['cosmological_parameters--omega_m'], c3.samples['cosmological_parameters--s8'], c3.samples['intrinsic_alignment_parameters--a1'], c3.samples['intrinsic_alignment_parameters--a2']]) #samp4 = np.array([c4.samples['cosmological_parameters--w0'],c4.samples['cosmological_parameters--omega_m'], c4.samples['cosmological_parameters--s8'], c4.samples['intrinsic_alignment_parameters--a1'], c4.samples['intrinsic_alignment_parameters--a2']]) #import pdb ; pdb.set_trace() print('Making cornerplot...') cc = ChainConsumer() #import pdb ; pdb.set_trace() names = ['$w$',r'$\Omega_{\rm m}$','$S_8$', r'$A_1$', r'$A_2$'] cc.add_chain(samp1.T, parameters=names, weights=c1.weight, kde=True, name=r'Low-z (Pantheon SN + eBOSS BAO)') #cc.add_chain(samp2.T, parameters=names, weights=c2.weight, kde=True, name=r'Planck 15') cc.add_chain(samp3.T, parameters=names, weights=c3.weight, kde=True, name=r'DES Y3 $1 \times 2$pt') #cc.add_chain(samp4.T, parameters=names, weights=c4.weight, kde=True, name=r'') cc.configure(colors=['#5C1F84', '#F9ACCC','#000000','#4682b4'],shade=[True,True,True,False]*3, shade_alpha=[0.65, 0.65, 0.5,0.5], kde=[2]*4,legend_kwargs={"loc": "upper right", "fontsize": 16},label_font_size=16,tick_font_size=14) #cc.configure(colors=['#800080', '#800080', '#FF1493', '#000000' ],shade=[False,True,True,False], shade_alpha=[0.25,0.25,0.25], max_ticks=4, kde=[6]*5, linestyles=["-", "-", '-.', '--'], legend_kwargs={"loc": "upper right", "fontsize": 14},label_font_size=14,tick_font_size=14) plt.close() ;