def get_new_tracers(add_random=True, n_svd=4): tr = [] for i in range(4): zs = s_d.tracers[i].z nzs = [s_d.tracers[i].Nz / np.sum(s_d.tracers[i].Nz)] for pn in pz_codes: n = s_d.tracers[i].extra_cols[pn] nzs.append(n / np.sum(n)) nzs = np.array(nzs) nz_mean = np.mean(nzs, axis=0) nz_new = nz_mean.copy() if add_random: cov_nzs = np.mean(np.array([ (n - nz_mean)[:, None] * (n - nz_mean)[None, :] for n in nzs ]), axis=0) w, ev = np.linalg.eigh(cov_nzs) sigs = np.sqrt(w[-n_svd:]) evs = ev[:, -n_svd:] nz_new += np.sum((np.random.randn(n_svd) * sigs)[None, :] * evs, axis=1) T = sacc.Tracer('bin_%d' % i, 'point', zs, nz_new, exp_sample='HSC_DESC') tr.append(T) return tr
def get_tracer_from_name(name, exp_sample=None): if name=='A90': nu = [90.] if name=='A150': nu = [150.] bandpass = [1.] return sacc.Tracer(name, "spin2", np.asarray(nu), np.asarray(bandpass), exp_sample)
def get_mean_cov(s_data, Ntr, Nztr, noi_fac): # photo-z codes pz_codes = ['nz_demp', 'nz_ephor', 'nz_ephor_ab', 'nz_frankenz'] # store new tracers and noise covariance cov_all = np.zeros((Nztr * Ntr, Nztr * Ntr)) tr = [] for i in range(Ntr): # get nz for all pz codes zs = s_data.tracers[i].z nzs = [s_data.tracers[i].Nz / np.sum(s_data.tracers[i].Nz)] for pn in pz_codes: n = s_data.tracers[i].extra_cols[pn] nzs.append(n / np.sum(n)) nzs = np.array(nzs) # get mean and variance nz_mean = np.mean(nzs, axis=0) nz_var = np.var(nzs, axis=0) nz_var_mean = nz_var[nz_mean > 0].mean() cov = np.diag(noi_fac * nz_var_mean * np.ones(len(zs))) cov_all[i * len(zs):(i + 1) * len(zs), i * len(zs):(i + 1) * len(zs)] = cov # store new tracers T = sacc.Tracer('bin_%d' % i, 'point', zs, nz_mean, exp_sample='HSC_DESC') tr.append(T) s_m = sacc.SACC(tr, s_data.binning, s_data.mean) return s_m, cov_all
def get_mean_cov(s_data, Ntr, Nztr, noi_fac, upsample): ## THIS IMPLEMENTS UPSAMPLING + SECTION 2.2.1 # photo-z codes pz_codes = ['nz_demp', 'nz_ephor', 'nz_ephor_ab', 'nz_frankenz'] # store new tracers and noise covariance myNztr = Nztr * upsample cov_all = np.zeros((myNztr * Ntr, myNztr * Ntr)) tr = [] for i in range(Ntr): # get nz for all pz codes zs = s_data.tracers[i].z nzs = s_data.tracers[i].Nz nzs /= np.sum(nzs) if (upsample >= 1): minz, maxz = zs[0], zs[-1] newzs = zs[0] + np.arange(myNztr) * (maxz - minz) / (Nztr - 1) / upsample newnzs = np.zeros(myNztr) w = np.where(newzs <= maxz) newnzs[w] = [interp1d(zs, nzs, kind='cubic')(newzs[w])] nzs = [newnzs] else: newzs = zs nzs = [nzs] for pn in pz_codes: n = s_data.tracers[i].extra_cols[pn] n /= np.sum(n) newn = np.zeros(myNztr) newn[w] = interp1d(zs, n, kind='cubic')(newzs[w]) nzs.append(newn) nzs = np.array(nzs) # get mean and variance #nz_mean = np.mean(nzs, axis=0) # TESTING nz_mean = np.zeros(myNztr) nz_mean[w] = interp1d(zs, s_data.tracers[i].Nz, kind='cubic')(newzs[w]) nz_var = np.var(nzs, axis=0) # TODO: is this necessary? nz_var = gaussian_filter(nz_var, 2.5 * upsample) # TODO: is this correct? # used to be #corr = np.eye(Nztr) #sqrtnz_var = np.sqrt(nz_var) #cov = noi_fac*np.outer(sqrtnz_var,sqrtnz_var)*corr # I think it should be cov = noi_fac * np.diag(nz_var) cov_all[i * myNztr:(i + 1) * myNztr, i * myNztr:(i + 1) * myNztr] = cov # store new tracers T = sacc.Tracer('bin_%d' % i, 'point', newzs, nz_mean, exp_sample='HSC_DESC') tr.append(T) s_m = sacc.SACC(tr, s_data.binning, s_data.mean) return s_m, cov_all
def get_smooth_s_and_prior(s_data, cosmo, want_prior, A_smooth=0.25, noi_fac=4.): # number of tracers and bins Nz_per_tracer = len(s_data.tracers[0].z) N_tracers = len(s_data.tracers) Nz_total = N_tracers * Nz_per_tracer zs = s_data.tracers[0].z # obtain the mean of the 4 pz codes with their noise s_mean, cov_noise = get_mean_cov(s_data, N_tracers, Nz_per_tracer, noi_fac) s0 = NzVec(s_data) # compute the CV if os.path.isfile("cov_CV.npy"): print("!!!!! Loading cached CV covariance matrix !!!!!") cov_CV = np.load("cov_CV.npy") else: # compute cv covmat cov_CV = np.zeros((Nz_total, Nz_total)) for i in range(N_tracers): # cosmic variance covmat for each tracer cov_CV_per_tracer = compute_covmat_cv(cosmo, s_mean.tracers[i].z, s_mean.tracers[i].Nz) cov_CV[i * Nz_per_tracer:(i + 1) * Nz_per_tracer, i * Nz_per_tracer:(i + 1) * Nz_per_tracer] = cov_CV_per_tracer np.save("cov_CV.npy", cov_CV) # impose smoothness of first and second derivative D = A_smooth**2 * obtain_smoothing_D(s_mean, first=True, second=True) # compute total covariance of noise cov_total = cov_noise + cov_CV # compute precision with and without the smoothing matrix D P0 = np.linalg.inv(cov_total) P = P0 + D # get the smoothed N(z) for all tracers s_smooth = np.dot(np.dot(np.linalg.inv(P0 + D), P0), s0) print(s0[:10], s_smooth[:10]) tr = [] for i in range(N_tracers): T = sacc.Tracer('bin_%d' % i, 'point', zs, s_smooth[i * Nz_per_tracer:(i + 1) * Nz_per_tracer], exp_sample='HSC_DESC') tr.append(T) s = sacc.SACC(tr, s_data.binning, s_data.mean) # return smooth s (and smooth prior) if want_prior: return s, P else: return s
def get_sacc_tracers(self): sacc_t = [] for b in range(self.n_bpss): bpss = self.bpss['band%d' % (b + 1)] for s in range(self.nsplits): T = sacc.Tracer(self.get_map_label(b, s), 'CMBP', bpss['nu'], bpss['bnu'], exp_sample='SO_SAT') T.addColumns({'dnu': bpss['dnu']}) sacc_t.append(T) return sacc_t
def get_tracers(self, s): """ Gets two array of tracers: one for coadd SACC files, one for null SACC files. """ tracers_bands = {} for t in s.tracers: band, split = t.name[2:-1].split('_', 2) if split == 'split1': T = sacc.Tracer(band, t.type, t.z, t.Nz, exp_sample=t.exp_sample) T.addColumns({'dnu': t.extra_cols['dnu']}) tracers_bands[band] = T self.t_coadd = [] for i in range(self.nbands): self.t_coadd.append(tracers_bands['band%d' % (i + 1)]) self.t_nulls = [] self.ind_nulls = {} ind_null = 0 for b in range(self.nbands): t = tracers_bands['band%d' % (b + 1)] for i in range(self.nsplits): # Loop over unique pairs for j in range(i, self.nsplits): name = 'band%d_null%dm%d' % (b + 1, i + 1, j + 1) self.ind_nulls[name] = ind_null T = sacc.Tracer(name, t.type, t.z, t.Nz, exp_sample=t.exp_sample) T.addColumns({'dnu': t.extra_cols['dnu']}) self.t_nulls.append(T) ind_null += 1
def get_sacc_tracers(self, tracers): """ Generate a list of SACC tracers from the input Tracers. """ sacc_tracers = [] for i_t, t in enumerate(tracers): z = (t.nz_data['z_i'] + t.nz_data['z_f']) * 0.5 nz = t.nz_data['nz_cosmos'] T = sacc.Tracer('bin_%d' % i_t, 'point', z, nz, exp_sample="HSC_DESC") T.addColumns({ 'nz_' + c: t.nz_data['nz_' + c] for c in ['demp', 'ephor', 'ephor_ab', 'frankenz', 'nnpz'] }) sacc_tracers.append(T) return sacc_tracers
def getTracers(cosmo, dic_par): #Create SACC tracers and corresponding CCL tracers tracers = [] cltracers = [] for i, z in enumerate(zbins): zar = np.arange(z - 3 * zbin_size, z + 3 * zbin_size, 0.001) Nz = np.exp(-(z - zar)**2 / (2 * zbin_size**2)) T = sacc.Tracer("des_gals_" + str(i), "point", zar, Nz, exp_sample="gals", Nz_sigma_logmean=0.01, Nz_sigma_logwidth=0.1) bias = np.ones_like(zar) T.addColumns({'b': bias}) tracers.append(T) cltracers.append( ccl.ClTracerNumberCounts(cosmo, dic_par['has_rsd'], dic_par['has_magnification'], zar, Nz, zar, bias)) return tracers, cltracers
for f in fields ]), axis=0) / area) for i_w, ww in enumerate(sc['GAMA09H'].binning.windows) ] # Bins s_bn = sc['GAMA09H'].binning s_bn.windows = wins # Tracers s_tr = [] for i_t in range(4): T = sacc.Tracer('bin_%d' % i_t, 'point', zs[i_t], Nz[i_t], exp_sample="HSC_DESC") T.addColumns({pn: ec[pn][i_t] for pn in pz_codes}) s_tr.append(T) # Signal spectra s_mean = sacc.MeanVec(mean) s_prec = sacc.Precision(cov, "dense", is_covariance=True, binning=s_bn) s_meta = {'Area_rad': area} s = sacc.SACC(s_tr, s_bn, s_mean, precision=s_prec, meta=s_meta) s.saveToHDF("COADD/power_spectra_wdpj.sacc") # Noise spectra s_mean = sacc.MeanVec(mean_n) s_bn.windows = None s = sacc.SACC(s_tr, s_bn, s_mean, meta=s_meta)
bpw_model[b1, 0, b2, 0, :] += bpw_cmb_ee * seds[b1, 0] * seds[b2, 0] bpw_model[b1, 1, b2, 1, :] += bpw_cmb_bb * seds[b1, 0] * seds[b2, 0] bpw_model[b1, 0, b2, 0, :] += bpw_sync_ee * seds[b1, 1] * seds[b2, 1] bpw_model[b1, 1, b2, 1, :] += bpw_sync_bb * seds[b1, 1] * seds[b2, 1] bpw_model[b1, 0, b2, 0, :] += bpw_dust_ee * seds[b1, 2] * seds[b2, 2] bpw_model[b1, 1, b2, 1, :] += bpw_dust_bb * seds[b1, 2] * seds[b2, 2] np.savez("c_ells_sky", ls=ells_bpw, cls_ee=bpw_model[:, 0, :, 0, :], cls_bb=bpw_model[:, 1, :, 1, :]) exit(1) tracers = [] for b in range(nfreqs): T = sacc.Tracer("band%d" % (b + 1), 'CMBP', bpss[b].nu, bpss[b].bnu, exp_sample='SO_SAT') T.addColumns({'dnu': bpss[b].dnu}) tracers.append(T) typ, ell, t1, q1, t2, q2 = [], [], [], [], [], [] pol_names = ['E', 'B'] for i1 in range(2 * nfreqs): b1 = i1 // 2 p1 = i1 % 2 for i2 in range(i1, 2 * nfreqs): b2 = i2 // 2 p2 = i2 % 2 ty = pol_names[p1] + pol_names[p2] for il, ll in enumerate(ells_bpw): ell.append(ll)
#Initialize covariance covar=np.zeros([n_cross*n_ell,n_cross*n_ell]) ''' else: print("Unknown covariance option " + o.covar_opt + " no covariance computed") covar = None #Save to SACC format print("Saving to SACC") #Tracers sacc_tracers = [] for i_t, t in enumerate(tracers): z = (t.nz_data['z_i'] + t.nz_data['z_f']) * 0.5 nz = t.nz_data['n_z'] T = sacc.Tracer('bin_%d' % i_t, 'point', z, nz, exp_sample=o.hsc_field) T.addColumns({'ndens': t.ndens_perad * np.ones_like(nz)}) sacc_tracers.append(T) #Binning and mean type, ell, dell, t1, q1, t2, q2 = [], [], [], [], [], [], [] for t1i in range(nbins): for t2i in range(t1i, nbins): for i_l, l in enumerate(ell_eff): type.append('F') #Fourier-space ell.append(l) dell.append(lend[i_l] - lini[i_l]) t1.append(t1i) q1.append('P') t2.append(t2i) q2.append('P') sacc_binning = sacc.Binning(type, ell, t1, q1, t2, q2, deltaLS=dell)
def get_tracer_from_name(name, exp_sample=None): d = np.loadtxt("BK15_cosmomc/data/BK15/bandpass_" + name + ".txt", unpack=True) return sacc.Tracer(name, "spin2", d[0], d[1], exp_sample)
## We have some DES galaxies and we also have some LSST galaxies and the CMB kappa map ## ## we start by defining tracers ## tracers = [] ## First DES galaxies with 4 tomographic bins: for i, z in enumerate([0.3, 0.5, 0.7, 0.9]): zar = np.arange(z - 0.1, z + 0.1, 0.001) Nz = np.exp(-(z - zar)**2 / (2 * 0.03**2)) bias = np.ones(len(zar)) * (i + 0.5) T = sacc.Tracer("des_gals_%i" % i, "spin0", zar, Nz, exp_sample="des_gals", Nz_sigma_logmean=0.01, Nz_sigma_logwidth=0.1) T.addColumns({'b': bias}) tracers.append(T) ## Next LSS galaxies with 4 different tomographic bins. ## Here the PZ modelling got more advanced so we have some PZ shapes to marginalise over for i, z in enumerate([0.5, 0.7, 0.9, 1.1]): zar = np.arange(z - 0.1, z + 0.1, 0.001) Nz = np.exp(-(z - zar)**2 / (2 * 0.025**2)) DNz = np.zeros((len(Nz), 2)) ## some random shapes of Nz to marginalise over DNz[:, 0] = (z - zar)**2 * 0.01
def process_catalog(o): #Read z-binning print "Bins" z0_bins, zf_bins, lmax_bins = np.loadtxt(o.fname_bins_z, unpack=True) nbins = len(z0_bins) cat = fc.Catalog(read_from=o.fname_in) #Get weights, compute binary mask based on weights, and apodize it if needed print "Window" mask = Mask(cat, o.nside, o.theta_apo) nside = mask.nside #Get contaminant templates #TODO: check resolution if o.templates_fname != "none": templates = [[t] for t in hp.read_map(o.templates_fname, field=None)] ntemp = len(templates) else: templates = None ntemp = 0 #Generate bandpowers binning scheme (we're assuming all maps will use the same bandpowers!) print "Bandpowers" bpw = nmt.NmtBin(nside, nlb=o.delta_ell) ell_eff = bpw.get_effective_ells() tracers = [] #Generate tracers #TODO: pass extra sampling parameters zs, nzs, mps = bin_catalog(cat, z0_bins, zf_bins, mask) if mrank != 0: return for zar, nzar, mp, lmax in zip(zs, nzs, mps, lmax_bins): zav = np.average(zar, weights=nzar) print "-- z-bin: %3.2f " % zav tracers.append(Tracer(mp, zar, nzar, lmax, mask, templates=templates)) if o.save_map: hp.write_map("map_%3.2f.fits" % zav, mp) cat.rewind() print "Compute power spectra" #Compute coupling matrix #TODO: (only done once, assuming all maps have the same mask!) print " Computing coupling matrix" w = nmt.NmtWorkspace() if not (os.path.isfile(o.nmt_workspace)): w.compute_coupling_matrix(tracers[0].field, tracers[0].field, bpw) if o.nmt_workspace != "none": w.write_to(o.nmt_workspace) else: w.read_from(o.nmt_workspace) #Compute all cross-correlations def compute_master(fa, fb, wsp, clb): cl_coupled = nmt.compute_coupled_cell(fa, fb) cl_decoupled = wsp.decouple_cell(cl_coupled, cl_bias=clb) return cl_decoupled #If attempting to deproject contaminant templates, we need an estimate of the true power spectra. #This can be done interatively from a first guess using cl_bias=0, but I haven't coded that up yet. #For the moment we will use cl_guess=0. cl_guess = np.zeros(3 * nside) t1 = time() print " Computing power spectrum" cls_all = {} for b1 in np.arange(nbins): f1 = tracers[b1].field for b2 in np.arange(b1, nbins): f2 = tracers[b2].field if ntemp > 0: cl_bias = nmt.deprojection_bias(f1, f2, w, cl_theory) else: cl_bias = None cls_all[(b1, b2)] = compute_master(f1, f2, w, clb=cl_bias)[0] print 'Computed bin: ', b1, b2, ' in ', time() - t1, ' s' if debug: plt.figure() plt.plot(ell_eff, cls_all[(b1, b1)]) plt.xscale('log') plt.yscale('log') plt.xlabel(r'$l$') plt.ylabel(r'$C_{l}$') plt.show() print "Translating into SACC" #Transform everything into SACC format #1- Generate SACC tracers stracers = [ sacc.Tracer("tr_b%d" % i, "point", t.zarr, t.nzarr, exp_sample="gals") for i, t in enumerate(tracers) ] #2- Define SACC binning typ, ell, t1, q1, t2, q2 = [], [], [], [], [], [] for i1 in np.arange(nbins): for i2 in np.arange(i1, nbins): lmax = min(tracers[i1].lmax, tracers[i2].lmax) for l in ell_eff[ell_eff < lmax]: typ.append('F') ell.append(l) t1.append(i1) t2.append(i2) q1.append('P') q2.append('P') sbin = sacc.Binning(typ, ell, t1, q1, t2, q2) ssbin = sacc.SACC(stracers, sbin) #3- Arrange power spectra into SACC mean vector vec = np.zeros((ssbin.size(), )) for t1i, t2i, ells, ndx in ssbin.sortTracers(): lmax = min(tracers[t1i].lmax, tracers[t2i].lmax) vec[ndx] = cls_all[(t1i, t2i)][np.where(ell_eff < lmax)[0]] svec = sacc.MeanVec(vec) #4- Create SACC file and write to file csacc = sacc.SACC(stracers, sbin, svec) csacc.saveToHDF(o.fname_out)
def get_smooth_s_and_prior(s_data, cosmo, noi_fac=4., A_smooth=1., dz_thr=0.04, upsample=1, cov_cv=True): # number of tracers and bins Nz_per_tracer = len(s_data.tracers[0].z) N_tracers = len(s_data.tracers) Nz_total = N_tracers * Nz_per_tracer zs_data = s_data.tracers[0].z # obtain the mean of the 4 pz codes with their noise s_mean, cov_noise = get_mean_cov(s_data, N_tracers, Nz_per_tracer, noi_fac, upsample) zs_mean = s_mean.tracers[0].z s0 = NzVec(s_mean) if cov_cv: # compute the CV covfn = "cov_CV_%i.npy" % (upsample) if os.path.isfile(covfn): print("!!!!! Loading cached CV covariance matrix !!!!!") cov_CV = np.load(covfn) else: # compute cv covmat cov_CV = np.zeros((Nz_total * upsample, Nz_total * upsample)) for i in range(N_tracers): print("Tracer = %i out of %i" % (i, N_tracers - 1)) # cosmic variance covmat for each tracer cov_CV_per_tracer = compute_covmat_cv(cosmo, s_data.tracers[i].z, s_data.tracers[i].Nz) if upsample > 1: new_Nz_per_tracer = Nz_per_tracer * upsample assert (new_Nz_per_tracer == len(zs_mean)) cov_CV_up_per_tracer = np.zeros( (new_Nz_per_tracer, new_Nz_per_tracer)) intermediate_cov = np.zeros( (Nz_per_tracer, new_Nz_per_tracer)) bor_cov_CV_up_per_tracer = np.zeros( (new_Nz_per_tracer, new_Nz_per_tracer)) """ ## I think this might or might not be right. ## but in fact it is, just looks wrong. for row in range(Nz_per_tracer): fun = interpolate.interp1d(zs_data,cov_CV_per_tracer[row,:],fill_value="extrapolate") bor_cov_CV_up_per_tracer[row,:] = fun(zs_mean) for col in range(Nz_per_tracer*upsample): fun = interpolate.interp1d(zs_data,bor_cov_CV_up_per_tracer[:len(zs_data),col],fill_value="extrapolate") bor_cov_CV_up_per_tracer[:,col] = fun(zs_mean) """ for row in range(Nz_per_tracer): fun = interpolate.interp1d(zs_data, cov_CV_per_tracer[row, :], fill_value="extrapolate") intermediate_cov[row, :] = fun(zs_mean) for col in range(Nz_per_tracer * upsample): fun = interpolate.interp1d(zs_data, intermediate_cov[:, col], fill_value="extrapolate") cov_CV_up_per_tracer[:, col] = fun(zs_mean) print(bor_cov_CV_up_per_tracer - cov_CV_up_per_tracer) cov_CV[i * len(zs_mean):(i + 1) * len(zs_mean), i * len(zs_mean):(i + 1) * len(zs_mean)] = cov_CV_up_per_tracer else: cov_CV[i * len(zs_mean):(i + 1) * len(zs_mean), i * len(zs_mean):(i + 1) * len(zs_mean)] = cov_CV_per_tracer np.save(covfn, cov_CV) else: cov_CV = 0 # impose smoothness D = obtain_generalized_D(s_mean, A_smooth, dz_thr) # compute total covariance of noise cov_total = cov_noise + cov_CV # compute precision with and without the smoothing matrix D P0 = np.linalg.inv(cov_total) P = P0 + D # get the smoothed N(z) for all tracers s_smooth = np.dot(np.dot(np.linalg.inv(P0 + D), P0), s0) #s_smooth = s0 tr = [] for i in range(N_tracers): T = sacc.Tracer('bin_%d' % i, 'point', zs_mean, s_smooth[i * Nz_per_tracer * upsample:(i + 1) * Nz_per_tracer * upsample], exp_sample='HSC_DESC') tr.append(T) s = sacc.SACC(tr, s_data.binning, s_data.mean) # return smooth s and smoothing prior return s, P
def get_tracer_from_Bpass(b): return sacc.Tracer(b.name, "spin2", b.nu, b.bnu, 'SO_SAT')