def get_overall_best_fit(self, pars, **specargs): """Returns the overall best-fit, the chi-square and the N.d.o.f.""" if type(pars) == str: pars = [pars] for s, v in enumerate(self.p.get("data_vectors")): d = DataManager(self.p, v, self.cosmo, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) kwargs["z"] = zmean kwargs["chi2"] = lik.chi2(sam.p0) all_pars = self.p.p.get("params") dof = np.sum( [param["vary"] for param in all_pars if "vary" in param]) kwargs["dof"] = len(lik.dv) - dof kwargs["PTE"] = 1 - chi2.cdf(kwargs["chi2"], kwargs["dof"]) if s == 0: keys = ["z", "chi2", "dof", "PTE"] + pars OV_BF = {k: kwargs[k] for k in keys} else: for k in keys: OV_BF[k] = np.vstack((OV_BF[k], kwargs[k])) return OV_BF
if vv['name']==sample: v=vv dat = DataManager(p, v, cosmo, all_data=False) gat = DataManager(p, v, cosmo, all_data=True) def th(pars,d): if not cosmo_vary: cosmo_fid = cosmo hmc_fid = hmc else: cosmo_fid = COSMO_ARGS(kwargs) hmc_fid = get_hmcalc(cosmo_fid, **kwargs) return get_theory(p, d, cosmo_fid, hmc_fid, hm_correction=hm_correction, selection=None,**pars) likd = Likelihood(p.get('params'), dat.data_vector, dat.covar, th, template=dat.templates) likg = Likelihood(p.get('params'), gat.data_vector, gat.covar, th, template=gat.templates) sam = Sampler(likd.lnprob, likd.p0, likd.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) params = likd.build_kwargs(sam.p0) clth=th(params,gat)[:len(ls)] l=np.geomspace(ls[0],ls[-1],1024) clthp=np.exp(interp1d(np.log(ls),np.log(clth),kind='quadratic')(np.log(l))) # grey boundaries z, nz = np.loadtxt(dat.tracers[0][0].dndz, unpack=True) zmean = np.average(z, weights=nz)
def get_chains(self, pars, **specargs): """Returns a dictionary containing the chains of `pars`. """ def bias_one(p0, num): """Calculates the halo model bias for a set of parameters.""" bb = hm_bias(self.cosmo, 1 / (1 + zarr), d.tracers[num][1].profile, **lik.build_kwargs(p0)) return bb def bias_avg(num, skip): """Calculates the halo model bias of a profile, from a chain.""" #from pathos.multiprocessing import ProcessingPool as Pool #with Pool() as pool: # bb = pool.map(lambda p0: bias_one(p0, num), # sam.chain[::skip]) bb = list(map(lambda p0: bias_one(p0, num), sam.chain[::skip])) bb = np.mean(np.array(bb), axis=1) return bb # path to chain fname = lambda s: self.p.get("global")["output_dir"] + "/sampler_" + \ self.p.get("mcmc")["run_name"] + "_" + s + "_chain" if type(pars) == str: pars = [pars] import os print(os.getcwd()) preCHAINS = {} fid_pars = pars.copy() for par in pars: try: print(fname(par)) preCHAINS[par] = np.load(fname(par) + ".npy") fid_pars.remove(par) print("Found saved chains for %s." % par) except FileNotFoundError: continue if "bg" or "by" in fid_pars: # skip every (for computationally expensive hm_bias) b_skip = specargs.get("reduce_by_factor") if b_skip is None: print("'reduce_by_factor' not given. Defaulting to 100.") b_skip = 100 for s, v in enumerate(self.p.get("data_vectors")): d = DataManager(self.p, v, self.cosmo, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() chains = lik.build_kwargs(sam.chain.T) sam.update_p0(sam.chain[np.argmax(sam.probs)]) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) chains["z"] = zmean if "probs" in pars: chains["probs"] = sam.probs if ("by" or "bg") in fid_pars: sigz = np.sqrt(np.sum(NN * (zz - zmean)**2) / np.sum(NN)) zarr = np.linspace(zmean - sigz, zmean + sigz, 10) if "bg" in pars: chains["bg"] = bias_avg(num=0, skip=b_skip) if "by" in pars: chains["by"] = bias_avg(num=1, skip=b_skip) # Construct tomographic dictionary if s == 0: keys = ["z"] + fid_pars CHAINS = {k: chains[k] for k in keys} else: for k in keys: CHAINS[k] = np.vstack((CHAINS[k], chains[k])) # save bias chains to save time if not already saved if "bg" in fid_pars: np.save(fname("bg"), CHAINS["bg"]) if "by" in fid_pars: np.save(fname("by"), CHAINS["by"]) return {**preCHAINS, **CHAINS}
# Theory predictor wrapper def th(kwargs): """Theory for free cosmology.""" cosmo_use = p.get_cosmo(pars=kwargs) # optimized internally return get_theory(p, d, cosmo_use, hmc, hm_correction=hm_correction, **kwargs) # Set up likelihood lik = Likelihood(p.get('params'), d.data_vector, d.covar, th, template=d.templates, debug=p.get('mcmc')['debug']) # Set up sampler p0 = p.get_map_p0(lik.p_free_names) # p0 for particular map sam = Sampler(lik.lnprob, p0, lik.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) # print(dict(zip(lik.p_free_names, p0))) # print("chisq:", lik.chi2(p0)) # exit(1) # Compute best fit and covariance if not sam.read_properties(): print(" Computing best-fit and covariance") sam.get_best_fit(update_p0=True)
if not cosmo_vary: pars_fid = pars cosmo_fid = cosmo hmc_fid = hmc else: pars_fid = {**p.get_cosmo_pars(), **pars} cosmo_fid = COSMO_ARGS(pars_fid) hmc_fid = get_hmcalc(cosmo_fid, **pars_fid) return get_theory(p, d, cosmo_fid, hmc_fid, return_separated=False, hm_correction=None, include_1h=False, include_2h=True, **pars_fid) # Set up likelihood lik = Likelihood(p.get('params'), d.data_vector, d.covar, th, debug=p.get('mcmc')['debug']) # Set up sampler sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) # Read chains and best-fit sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) # Compute galaxy bias # zarr = np.linspace(zmean - sigz, zmean + sigz, 10) # bgchain = np.array([hm_bias(cosmo, 1./(1 + zarr), d.tracers[0][0], # **(lik.build_kwargs(p0))) for p0 in sam.chain[::100]]) # bychain = np.array([hm_bias(cosmo, 1./(1 + zarr), d.tracers[1][1], # **(lik.build_kwargs(p0))) for p0 in sam.chain[::100]])
for s, v in enumerate(p.get("data_vectors")): # Construct data vector and covariance d = DataManager(p, v, cosmo, all_data=False) g = DataManager(p, v, cosmo, all_data=True) thd = thr(d) thg = thr(g) z, nz = np.loadtxt(d.tracers[0][0].dndz, unpack=True) zmean = np.average(z, weights=nz) # Set up likelihood likd = Likelihood(p.get('params'), d.data_vector, d.covar, thd.th, template=d.templates) likg = Likelihood(p.get('params'), g.data_vector, g.covar, thg.th, template=g.templates) # Set up sampler sam = Sampler(likd.lnprob, likd.p0, likd.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) # Read chains and best-fit sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)])
def get_chains(self, pars=None, **specargs): """Returns a dictionary containing the chains of `pars`. """ # if `pars` is not set, collect chains for all free parameters if pars is None: pars = [ par["name"] for par in self.p.get("params") if par.get("vary") ] def bias_one(p0, num): """Calculates the halo model bias for a set of parameters.""" if self.cosmo_vary: cosmo = COSMO_ARGS(pars) hmc = get_hmcalc( cosmo, **{ "mass_function": self.p.get_massfunc(), "halo_bias": self.p.get_halobias() }) else: cosmo = self.cosmo hmc = self.hmc bb = hm_bias(cosmo, hmc, 1 / (1 + zarr), d.tracers[num][1], **lik.build_kwargs(p0)) return bb def bias_avg(num, skip): """Calculates the halo model bias of a profile, from a chain.""" from pathos.multiprocessing import ProcessingPool as Pool with Pool() as pool: bb = pool.map(lambda p0: bias_one(p0, num), sam.chain[::skip]) # bb = list(map(lambda p0: bias_one(p0, num), sam.chain[::skip])) bb = np.mean(np.array(bb), axis=1) return bb # path to chain fname = lambda s: self.p.get("global")["output_dir"] + "/sampler_" + \ self.p.get("mcmc")["run_name"] + "_" + s + "_chain" if type(pars) == str: pars = [pars] preCHAINS = {} fid_pars = pars.copy() for par in pars: try: preCHAINS[par] = np.load(fname(par) + ".npy") fid_pars.remove(par) print("Found saved chains for %s." % par) except FileNotFoundError: continue if ("bg" in fid_pars) or ("by" in fid_pars) or ("bk" in fid_pars): # thin sample (for computationally expensive hm_bias) b_skip = specargs.get("thin") if b_skip is None: print("Chain 'thin' factor not given. Defaulting to 100.") b_skip = 100 for s, v in enumerate(self.p.get("data_vectors")): print(v["name"]) d = DataManager(self.p, v, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() chains = lik.build_kwargs(sam.chain.T) sam.update_p0(sam.chain[np.argmax(sam.probs)]) # print(sam.p0) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) chains["z"] = zmean if "probs" in pars: chains["probs"] = sam.probs if ("bg" in fid_pars) or ("by" in fid_pars) or ("bk" in fid_pars): sigz = np.sqrt(np.sum(NN * (zz - zmean)**2) / np.sum(NN)) zarr = np.linspace(zmean - sigz, zmean + sigz, 10) if "bg" in pars: chains["bg"] = bias_avg(num=0, skip=b_skip) if "by" in pars: chains["by"] = bias_avg(num=1, skip=b_skip) if "bk" in pars: chains["bk"] = bias_avg(num=2, skip=b_skip) # Construct tomographic dictionary if s == 0: keys = ["z"] + fid_pars CHAINS = {k: [chains[k]] for k in keys} else: for k in keys: CHAINS[k].append(chains[k]) # save bias chains to save time if not already saved if "bg" in fid_pars: np.save(fname("bg"), CHAINS["bg"]) if "by" in fid_pars: np.save(fname("by"), CHAINS["by"]) if "bk" in fid_pars: np.save(fname("bk"), CHAINS["bk"]) return {**preCHAINS, **CHAINS}
# Construct data vector and covariance d = DataManager(p, v, cosmo, jk_region=jk_region) # Theory predictor wrapper def th(pars): return get_theory(p, d, cosmo, hm_correction=hm_correction, selection=sel, **pars) # Set up likelihood lik = Likelihood(p.get('params'), d.data_vector, d.covar, th, template=d.templates, debug=p.get('mcmc')['debug']) # Set up sampler sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, p.get_sampler_prefix(v['name']) + "jk%d" % jk_region, p.get('mcmc')) # Compute best fit and covariance around it if not sam.read_properties(): print(" Computing best-fit and covariance") sam.get_best_fit(update_p0=True) cov0 = sam.get_covariance(update_cov=True) sam.save_properties()
def th2h(pars): return get_theory(p, d, cosmo, return_separated=False, hm_correction=hm_correction, selection=sel, include_2h=True, include_1h=False, **pars) # Set up likelihood lik = Likelihood(p.get('params'), d.data_vector, d.covar, th, debug=p.get('mcmc')['debug']) # Set up sampler sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) # Read chains and best-fit sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) # Compute galaxy bias zarr = np.linspace(zmean - sigz, zmean + sigz, 10) bgchain = np.array([ hm_bias(cosmo, 1. / (1 + zarr), d.tracers[0][0].profile,
hm_correction = None for v in p.get('data_vectors'): if v['name'] == bin_name: d = DataManager(p, v, cosmo) z, nz = np.loadtxt(d.tracers[0][0].dndz, unpack=True) zmean = np.average(z, weights=nz) sigz = np.sqrt(np.sum(nz * (z - zmean)**2) / np.sum(nz)) # Theory predictor wrapper def th(pars): return get_theory(p, d, cosmo, return_separated=False, hm_correction=hm_correction, selection=sel, **pars) lik = Likelihood(p.get('params'), d.data_vector, d.covar, th, debug=p.get('mcmc')['debug']) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) sam.get_chain() figs_ch = lik.plot_chain(sam.chain, save_figure=True, prefix='notes/paper/') plt.show()