def quick_analytic_cov(l, Clth_dict, window, binning_file, lmax): bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file( binning_file, lmax) nbins = len(bin_size) fsky = steve_effective_fsky(window) prefac = 1 / ((2 * bin_c + 1) * fsky * bin_size) cov = {} cov["TT"] = Clth_dict["TaTc"] * Clth_dict["TbTd"] + Clth_dict[ "TaTd"] * Clth_dict["TbTc"] cov["TE"] = Clth_dict["TaTc"] * Clth_dict["EbEd"] + Clth_dict[ "TaEd"] * Clth_dict["EbTc"] cov["ET"] = Clth_dict["EaEc"] * Clth_dict["TbTd"] + Clth_dict[ "EaTd"] * Clth_dict["TbEc"] cov["EE"] = Clth_dict["EaEc"] * Clth_dict["EbEd"] + Clth_dict[ "EaEd"] * Clth_dict["EbEc"] mat_diag = np.zeros((4 * nbins, 4 * nbins)) for count, spec in enumerate(["TT", "TE", "ET", "EE"]): lb, cov[spec] = pspy_utils.naive_binning(l, cov[spec], binning_file, lmax) cov[spec] *= prefac for i in range(nbins): mat_diag[i + count * nbins, i + count * nbins] = cov[spec][i] return fsky, mat_diag
def theory_for_covariance(ps_dict, spec_name_list, spectra, lmax, beam=None, binning_file=None, force_positive=True): ps_dict_for_cov = deepcopy(ps_dict) if force_positive: for name in spec_name_list: m1, m2 = name.split("x") for spec in spectra: X, Y = spec ps_dict_for_cov["%sx%s" % (m1, m1)][X + X] = np.abs( ps_dict_for_cov["%sx%s" % (m1, m1)][X + X]) ps_dict_for_cov["%sx%s" % (m1, m1)][Y + Y] = np.abs( ps_dict_for_cov["%sx%s" % (m2, m2)][Y + Y]) if beam is not None: beam_data = np.loadtxt(beam) l, bl = beam_data[:, 0], beam_data[:, 1] lb, bb = pspy_utils.naive_binning(l, bl, binning_file, lmax) for name in spec_name_list: for spec in spectra: ps_dict_for_cov[name][spec] *= bb**2 return ps_dict_for_cov
surveys = d["surveys"] lmax = d["lmax"] type = d["type"] binning_file = d["binning_file"] lth = np.arange(2, lmax + 2) for sv in surveys: arrays = d["arrays_%s" % sv] for id_ar1, ar1 in enumerate(arrays): for id_ar2, ar2 in enumerate(arrays): if id_ar1 > id_ar2: continue l, bl_ar1 = pspy_utils.read_beam_file(d["beam_%s_%s" % (sv, ar1)]) l, bl_ar2 = pspy_utils.read_beam_file(d["beam_%s_%s" % (sv, ar2)]) lb, bb_ar1 = pspy_utils.naive_binning(l, bl_ar1, binning_file, lmax) lb, bb_ar2 = pspy_utils.naive_binning(l, bl_ar2, binning_file, lmax) nsplits = len(d["maps_%s_%s" % (sv, ar1)]) spec_name_noise = "%s_%s_%sx%s_%s_noise" % (type, sv, ar1, sv, ar2) print(spec_name_noise) lb, nbs = so_spectra.read_ps("%s/%s.dat" % (spectra_dir, spec_name_noise), spectra=spectra) nl_dict = {} for spec in spectra: nbs_mean = nbs[spec] * bb_ar1 * bb_ar2 plt.figure(figsize=(12, 12))
print("use kspace tf from file %s" % sv) _, _, kf_tf, _ = np.loadtxt(ks_f["tf"], unpack=True) tf_survey *= np.sqrt(np.abs(kf_tf[:len(lb)])) if deconvolve_pixwin: # extra pixel window function deconvolution for healpix and planck projected on CAR pixwin_l = np.ones(2 * lmax) if sv == "Planck": print("Deconvolve Planck pixel window function") pixwin_l = hp.pixwin(2048) if template.pixel == "HEALPIX": pixwin_l = hp.pixwin(template.nside) # this should be checked with simulations since maybe this should be done at the mcm level _, pw = pspy_utils.naive_binning(np.arange(len(pixwin_l)), pixwin_l, binning_file, lmax) tf_survey *= pw for id_ar, ar in enumerate(d["arrays_%s" % sv]): tf_array[sv, ar] = tf_survey.copy() if d["deconvolve_map_maker_tf_%s" % sv]: print("deconvolve map maker tf %s %s" % (sv, ar)) _, mm_tf = np.loadtxt("mm_tf_%s_%s.dat" % (sv, ar), unpack=True) tf_array[sv, ar] *= mm_tf[:len(lb)] np.savetxt(spec_dir + "/tf_%s_%s.dat" % (sv, ar), np.transpose([lb, tf_array[sv, ar]])) # compute the power spectra
arrays_1 = d["arrays_%s" % sv1] nsplits_1 = nsplit[sv1] if d["tf_%s" % sv1] is not None: print("will deconvolve tf of %s" %sv1) _, _, tf1, _ = np.loadtxt(d["tf_%s" % sv1], unpack=True) tf1 = tf1[:len(lb)] else: tf1 = np.ones(len(lb)) if deconvolve_pixwin: # we have an extra correction for the 1d healpix pixel window function # this should be checked with simulations since maybe this # step should be done at the mcm level l_pw = np.arange(len(pixwin_l[sv1])) _, pw1 = pspy_utils.naive_binning(l_pw, pixwin_l[sv1], binning_file, lmax) tf1 *= pw1 for id_ar1, ar1 in enumerate(arrays_1): for id_sv2, sv2 in enumerate(surveys): arrays_2 = d["arrays_%s" % sv2] nsplits_2 = nsplit[sv2] if d["tf_%s" % sv2] is not None: print("will deconvolve tf of %s" %sv2) _, _, tf2, _ = np.loadtxt(d["tf_%s" % sv2], unpack=True) tf2 = tf2[:len(lb)] else: tf2 = np.ones(len(lb))