def get_overall_best_fit(self, pars, **specargs): """Returns the overall best-fit, the chi-square and the N.d.o.f.""" if type(pars) == str: pars = [pars] for s, v in enumerate(self.p.get("data_vectors")): d = DataManager(self.p, v, self.cosmo, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) kwargs["z"] = zmean kwargs["chi2"] = lik.chi2(sam.p0) all_pars = self.p.p.get("params") dof = np.sum( [param["vary"] for param in all_pars if "vary" in param]) kwargs["dof"] = len(lik.dv) - dof kwargs["PTE"] = 1 - chi2.cdf(kwargs["chi2"], kwargs["dof"]) if s == 0: keys = ["z", "chi2", "dof", "PTE"] + pars OV_BF = {k: kwargs[k] for k in keys} else: for k in keys: OV_BF[k] = np.vstack((OV_BF[k], kwargs[k])) return OV_BF
def get_chains(self, pars, **specargs): """Returns a dictionary containing the chains of `pars`. """ def bias_one(p0, num): """Calculates the halo model bias for a set of parameters.""" bb = hm_bias(self.cosmo, 1 / (1 + zarr), d.tracers[num][1].profile, **lik.build_kwargs(p0)) return bb def bias_avg(num, skip): """Calculates the halo model bias of a profile, from a chain.""" #from pathos.multiprocessing import ProcessingPool as Pool #with Pool() as pool: # bb = pool.map(lambda p0: bias_one(p0, num), # sam.chain[::skip]) bb = list(map(lambda p0: bias_one(p0, num), sam.chain[::skip])) bb = np.mean(np.array(bb), axis=1) return bb # path to chain fname = lambda s: self.p.get("global")["output_dir"] + "/sampler_" + \ self.p.get("mcmc")["run_name"] + "_" + s + "_chain" if type(pars) == str: pars = [pars] import os print(os.getcwd()) preCHAINS = {} fid_pars = pars.copy() for par in pars: try: print(fname(par)) preCHAINS[par] = np.load(fname(par) + ".npy") fid_pars.remove(par) print("Found saved chains for %s." % par) except FileNotFoundError: continue if "bg" or "by" in fid_pars: # skip every (for computationally expensive hm_bias) b_skip = specargs.get("reduce_by_factor") if b_skip is None: print("'reduce_by_factor' not given. Defaulting to 100.") b_skip = 100 for s, v in enumerate(self.p.get("data_vectors")): d = DataManager(self.p, v, self.cosmo, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() chains = lik.build_kwargs(sam.chain.T) sam.update_p0(sam.chain[np.argmax(sam.probs)]) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) chains["z"] = zmean if "probs" in pars: chains["probs"] = sam.probs if ("by" or "bg") in fid_pars: sigz = np.sqrt(np.sum(NN * (zz - zmean)**2) / np.sum(NN)) zarr = np.linspace(zmean - sigz, zmean + sigz, 10) if "bg" in pars: chains["bg"] = bias_avg(num=0, skip=b_skip) if "by" in pars: chains["by"] = bias_avg(num=1, skip=b_skip) # Construct tomographic dictionary if s == 0: keys = ["z"] + fid_pars CHAINS = {k: chains[k] for k in keys} else: for k in keys: CHAINS[k] = np.vstack((CHAINS[k], chains[k])) # save bias chains to save time if not already saved if "bg" in fid_pars: np.save(fname("bg"), CHAINS["bg"]) if "by" in fid_pars: np.save(fname("by"), CHAINS["by"]) return {**preCHAINS, **CHAINS}
hmc_fid = hmc else: cosmo_fid = COSMO_ARGS(kwargs) hmc_fid = get_hmcalc(cosmo_fid, **kwargs) return get_theory(p, d, cosmo_fid, hmc_fid, hm_correction=hm_correction, selection=None,**pars) likd = Likelihood(p.get('params'), dat.data_vector, dat.covar, th, template=dat.templates) likg = Likelihood(p.get('params'), gat.data_vector, gat.covar, th, template=gat.templates) sam = Sampler(likd.lnprob, likd.p0, likd.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) params = likd.build_kwargs(sam.p0) clth=th(params,gat)[:len(ls)] l=np.geomspace(ls[0],ls[-1],1024) clthp=np.exp(interp1d(np.log(ls),np.log(clth),kind='quadratic')(np.log(l))) # grey boundaries z, nz = np.loadtxt(dat.tracers[0][0].dndz, unpack=True) zmean = np.average(z, weights=nz) lmin=v["twopoints"][0]["lmin"] chi = ccl.comoving_radial_distance(cosmo, 1/(1+zmean)) kmax = p.get("mcmc")["kmax"] lmax = kmax*chi - 0.5 plate_template=window_plates(ls,5.) plate_template_hi=window_plates(l,5.)
def get_chains(self, pars=None, **specargs): """Returns a dictionary containing the chains of `pars`. """ # if `pars` is not set, collect chains for all free parameters if pars is None: pars = [ par["name"] for par in self.p.get("params") if par.get("vary") ] def bias_one(p0, num): """Calculates the halo model bias for a set of parameters.""" if self.cosmo_vary: cosmo = COSMO_ARGS(pars) hmc = get_hmcalc( cosmo, **{ "mass_function": self.p.get_massfunc(), "halo_bias": self.p.get_halobias() }) else: cosmo = self.cosmo hmc = self.hmc bb = hm_bias(cosmo, hmc, 1 / (1 + zarr), d.tracers[num][1], **lik.build_kwargs(p0)) return bb def bias_avg(num, skip): """Calculates the halo model bias of a profile, from a chain.""" from pathos.multiprocessing import ProcessingPool as Pool with Pool() as pool: bb = pool.map(lambda p0: bias_one(p0, num), sam.chain[::skip]) # bb = list(map(lambda p0: bias_one(p0, num), sam.chain[::skip])) bb = np.mean(np.array(bb), axis=1) return bb # path to chain fname = lambda s: self.p.get("global")["output_dir"] + "/sampler_" + \ self.p.get("mcmc")["run_name"] + "_" + s + "_chain" if type(pars) == str: pars = [pars] preCHAINS = {} fid_pars = pars.copy() for par in pars: try: preCHAINS[par] = np.load(fname(par) + ".npy") fid_pars.remove(par) print("Found saved chains for %s." % par) except FileNotFoundError: continue if ("bg" in fid_pars) or ("by" in fid_pars) or ("bk" in fid_pars): # thin sample (for computationally expensive hm_bias) b_skip = specargs.get("thin") if b_skip is None: print("Chain 'thin' factor not given. Defaulting to 100.") b_skip = 100 for s, v in enumerate(self.p.get("data_vectors")): print(v["name"]) d = DataManager(self.p, v, all_data=False) self.d = d lik = Likelihood(self.p.get('params'), d.data_vector, d.covar, self._th, template=d.templates) sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, self.p.get_sampler_prefix(v['name']), self.p.get('mcmc')) sam.get_chain() chains = lik.build_kwargs(sam.chain.T) sam.update_p0(sam.chain[np.argmax(sam.probs)]) # print(sam.p0) kwargs = lik.build_kwargs(sam.p0) w = kwargs["width"] zz, NN = self._get_dndz(d.tracers[0][0].dndz, w) zmean = np.average(zz, weights=NN) chains["z"] = zmean if "probs" in pars: chains["probs"] = sam.probs if ("bg" in fid_pars) or ("by" in fid_pars) or ("bk" in fid_pars): sigz = np.sqrt(np.sum(NN * (zz - zmean)**2) / np.sum(NN)) zarr = np.linspace(zmean - sigz, zmean + sigz, 10) if "bg" in pars: chains["bg"] = bias_avg(num=0, skip=b_skip) if "by" in pars: chains["by"] = bias_avg(num=1, skip=b_skip) if "bk" in pars: chains["bk"] = bias_avg(num=2, skip=b_skip) # Construct tomographic dictionary if s == 0: keys = ["z"] + fid_pars CHAINS = {k: [chains[k]] for k in keys} else: for k in keys: CHAINS[k].append(chains[k]) # save bias chains to save time if not already saved if "bg" in fid_pars: np.save(fname("bg"), CHAINS["bg"]) if "by" in fid_pars: np.save(fname("by"), CHAINS["by"]) if "bk" in fid_pars: np.save(fname("bk"), CHAINS["bk"]) return {**preCHAINS, **CHAINS}
cov0 = sam.get_covariance(update_cov=True) sam.save_properties() print(" Best-fit parameters:") for nam, val, sig in zip(sam.parnames, sam.p0, np.sqrt(np.diag(sam.covar))): param_jk_values[nam] = val print(" " + nam + " : %.3lE +- %.3lE" % (val, sig)) if nam == p.get("mcmc")["save_par"]: par.append(val) z, nz = np.loadtxt(d.tracers[0][0].dndz, unpack=True) zmean = np.average(z, weights=nz) sigz = np.sqrt(np.sum(nz * (z - zmean)**2) / np.sum(nz)) zarr = np.linspace(zmean - sigz, zmean + sigz, 10) bg = np.mean( hm_bias(cosmo, 1. / (1 + zarr), d.tracers[0][0].profile, **(lik.build_kwargs(sam.p0)))) by = np.mean( hm_bias(cosmo, 1. / (1 + zarr), d.tracers[1][1].profile, **(lik.build_kwargs(sam.p0)))) chi2 = lik.chi2(sam.p0) print(" b_g : %.3lE" % bg) print(" b_y : %.3lE" % by) print(" chi^2 = %lf" % chi2) print(" n_data = %d" % (len(d.data_vector))) param_jk_values['b_g'] = bg param_jk_values['b_y'] = by param_jk_values['chi2'] = chi2 np.savez(p.get_sampler_prefix(v['name']) + "jk%d_vals" % jk_region, M1=np.array(param_jk_values['M1']), Mmin=np.array(param_jk_values['Mmin']),
th, debug=p.get('mcmc')['debug']) # Set up sampler sam = Sampler(lik.lnprob, lik.p0, lik.p_free_names, p.get_sampler_prefix(v['name']), p.get('mcmc')) # Read chains and best-fit sam.get_chain() sam.update_p0(sam.chain[np.argmax(sam.probs)]) # Compute galaxy bias zarr = np.linspace(zmean - sigz, zmean + sigz, 10) bgchain = np.array([ hm_bias(cosmo, 1. / (1 + zarr), d.tracers[0][0].profile, **(lik.build_kwargs(p0))) for p0 in sam.chain[::100] ]) bychain = np.array([ hm_bias(cosmo, 1. / (1 + zarr), d.tracers[1][1].profile, **(lik.build_kwargs(p0))) for p0 in sam.chain[::100] ]) bgmin, bg, bgmax = np.percentile(bgchain, [16, 50, 84]) bymin, by, bymax = np.percentile(bychain, [16, 50, 84]) # Plot power spectra figs_cl = lik.plot_data(sam.p0, d, save_figures=True, save_data=True, prefix=p.get_sampler_prefix(v['name']),