def get_N0_iter(qe_key, nlev_t, nlev_p, beam_fwhm, cls_unl, lmin_ivf, lmax_ivf, itermax, lmax_qlm=None): """Iterative lensing-N0 estimate Calculates iteratively partially lensed spectra and lensing noise levels. This uses the python camb package to get the partially lensed spectra. This makes no assumption on response = 1 / noise hence is about twice as slow as it could be in standard cases. Args: qe_key: QE estimator key nlev_t: temperature noise level (in :math:`\mu `K-arcmin) nlev_p: polarisation noise level (in :math:`\mu `K-arcmin) beam_fwhm: Gaussian beam full width half maximum in arcmin cls_unl(dict): unlensed CMB power spectra lmin_ivf: minimal CMB multipole used in the QE lmax_ivf: maximal CMB multipole used in the QE itermax: number of iterations to perform lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf` Returns Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0. #FIXME: this is requiring the full camb python package for the lensed spectra calc. """ assert qe_key in ['p_p', 'p', 'ptt'], qe_key try: from camb.correlations import lensed_cls except ImportError: assert 0, "could not import camb.correlations.lensed_cls" def cls2dls(cls): """Turns cls dict. into camb cl array format""" keys = ['tt', 'ee', 'bb', 'te'] lmax = np.max([len(cl) for cl in cls.values()]) - 1 dls = np.zeros((lmax + 1, 4), dtype=float) refac = np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float) / (2. * np.pi) for i, k in enumerate(keys): cl = cls.get(k, np.zeros(lmax + 1, dtype=float)) sli = slice(0, min(len(cl), lmax + 1)) dls[sli, i] = cl[sli] * refac[sli] cldd = np.copy(cls.get('pp', None)) if cldd is not None: cldd *= np.arange(len(cldd)) ** 2 * np.arange(1, len(cldd) + 1, dtype=float) ** 2 / (2. * np.pi) return dls, cldd def dls2cls(dls): """Inverse operation to cls2dls""" assert dls.shape[1] == 4 lmax = dls.shape[0] - 1 cls = {} refac = 2. * np.pi * utils.cli( np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float)) for i, k in enumerate(['tt', 'ee', 'bb', 'te']): cls[k] = dls[:, i] * refac return cls if lmax_qlm is None: lmax_qlm = 2 * lmax_ivf lmax_qlm = min(lmax_qlm, 2 * lmax_ivf) lmin_ivf = max(lmin_ivf, 1) transfi2 = utils.cli(hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf)) ** 2 llp2 = np.arange(lmax_qlm + 1, dtype=float) ** 2 * np.arange(1, lmax_qlm + 2, dtype=float) ** 2 / (2. * np.pi) N0s = [] N0 = np.inf for irr, it in utils.enumerate_progress(range(itermax + 1)): dls_unl, cldd = cls2dls(cls_unl) clwf = 0. if it == 0 else cldd[:lmax_qlm + 1] * utils.cli(cldd[:lmax_qlm + 1] + llp2 * N0[:lmax_qlm + 1]) cldd[:lmax_qlm + 1] *= (1. - clwf) cls_plen = dls2cls(lensed_cls(dls_unl, cldd)) cls_ivfs = {} if qe_key in ['ptt', 'p_p', 'p']: cls_ivfs['tt'] = cls_plen['tt'][:lmax_ivf + 1] + (nlev_t * np.pi / 180. / 60.) ** 2 * transfi2 if qe_key in ['p_p', 'p']: cls_ivfs['ee'] = cls_plen['ee'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2 cls_ivfs['bb'] = cls_plen['bb'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2 if qe_key in ['p']: cls_ivfs['te'] = np.copy(cls_plen['te'][:lmax_ivf + 1]) cls_ivfs = utils.cl_inverse(cls_ivfs) for cl in cls_ivfs.values(): cl[:lmin_ivf] *= 0. fal = cls_ivfs n_gg = get_nhl(qe_key, qe_key, cls_plen, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm)[0] r_gg = qresp.get_response(qe_key, lmax_ivf, 'p', cls_plen, cls_plen, fal, lmax_qlm=lmax_qlm)[0] N0 = n_gg * utils.cli(r_gg ** 2) N0s.append(N0) return np.array(N0s)
def get_N0_iter(qe_key: str, nlev_t: float, nlev_p: float, beam_fwhm: float, cls_unl_fid: dict, lmin_ivf, lmax_ivf, itermax, cls_unl_dat=None, lmax_qlm=None, ret_delcls=False, datnoise_cls: dict or None = None, unlQE=False, version='1'): """Iterative lensing-N0 estimate Calculates iteratively partially lensed spectra and lensing noise levels. This uses the python camb package to get the partially lensed spectra. This makes no assumption on response = 1 / noise hence is about twice as slow as it could be in standard cases. Args: qe_key: QE estimator key nlev_t: temperature noise level (in :math:`\mu `K-arcmin) nlev_p: polarisation noise level (in :math:`\mu `K-arcmin) beam_fwhm: Gaussian beam full width half maximum in arcmin cls_unl_fid(dict): unlensed CMB power spectra lmin_ivf: minimal CMB multipole used in the QE lmax_ivf: maximal CMB multipole used in the QE itermax: number of iterations to perform lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf` ret_delcls(optional): returns the partially delensed CMB cls as well if set datnoise_cls(optional): feeds in custom noise spectra to the data. The nlevs and beam only apply to the filtering in this case Returns Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0. Note: This assumes the unlensed spectra are known #FIXME: this is requiring the full camb python package for the lensed spectra calc. """ assert qe_key in ['p_p', 'p', 'ptt'], qe_key try: from camb.correlations import lensed_cls except ImportError: assert 0, "could not import camb.correlations.lensed_cls" if lmax_qlm is None: lmax_qlm = 2 * lmax_ivf lmax_qlm = min(lmax_qlm, 2 * lmax_ivf) lmin_ivf = max(lmin_ivf, 1) transfi2 = utils.cli( hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf))**2 llp2 = np.arange(lmax_qlm + 1, dtype=float)**2 * np.arange( 1, lmax_qlm + 2, dtype=float)**2 / (2. * np.pi) if datnoise_cls is None: datnoise_cls = dict() if qe_key in ['ptt', 'p']: datnoise_cls['tt'] = (nlev_t * np.pi / 180. / 60.)**2 * transfi2 if qe_key in ['p_p', 'p']: datnoise_cls['ee'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2 datnoise_cls['bb'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2 N0s_biased = [] N0s_unbiased = [] N1s_biased = [] N1s_unbiased = [] delcls_fid = [] delcls_true = [] N0_unbiased = np.inf N1_unbiased = np.inf dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid) cls_len_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid)) if cls_unl_dat is None: cls_unl_dat = cls_unl_fid cls_len_true = cls_len_fid else: dls_unl_true, cldd_true = cls2dls(cls_unl_dat) cls_len_true = dls2cls(lensed_cls(dls_unl_true, cldd_true)) cls_plen_true = cls_len_true for irr, it in utils.enumerate_progress(range(itermax + 1)): dls_unl_true, cldd_true = cls2dls(cls_unl_dat) dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid) if it == 0: rho_sqd_phi = 0. else: # The cross-correlation coefficient is identical for the Rfid-biased QE or the rescaled one rho_sqd_phi = np.zeros(len(cldd_true)) rho_sqd_phi[:lmax_qlm + 1] = cldd_true[:lmax_qlm + 1] * utils.cli( cldd_true[:lmax_qlm + 1] + llp2 * (N0_unbiased[:lmax_qlm + 1] + N1_unbiased[:lmax_qlm + 1])) if 'wE' in version: assert qe_key in ['p_p'] if it == 0: print('including imperfect knowledge of E in iterations') slic = slice(lmin_ivf, lmax_ivf + 1) rho_sqd_E = np.zeros(len(dls_unl_true[:, 1])) rho_sqd_E[slic] = cls_unl_dat['ee'][slic] * utils.cli( cls_plen_true['ee'][slic] + datnoise_cls['ee'][slic]) dls_unl_fid[:, 1] *= rho_sqd_E dls_unl_true[:, 1] *= rho_sqd_E cldd_fid *= rho_sqd_phi cldd_true *= rho_sqd_phi cls_plen_fid_resolved = dls2cls(lensed_cls(dls_unl_fid, cldd_fid)) cls_plen_true_resolved = dls2cls( lensed_cls(dls_unl_true, cldd_true)) cls_plen_fid = { ck: cls_len_fid[ck] - (cls_plen_fid_resolved[ck] - cls_unl_fid[ck][:len(cls_len_fid[ck])]) for ck in cls_len_fid.keys() } cls_plen_true = { ck: cls_len_true[ck] - (cls_plen_true_resolved[ck] - cls_unl_dat[ck][:len(cls_len_true[ck])]) for ck in cls_len_true.keys() } else: cldd_true *= (1. - rho_sqd_phi) # The true residual lensing spec. cldd_fid *= (1. - rho_sqd_phi ) # What I think the residual lensing spec is cls_plen_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid)) cls_plen_true = dls2cls(lensed_cls(dls_unl_true, cldd_true)) cls_filt = cls_plen_fid if not unlQE else cls_unl_fid cls_w = cls_plen_fid if not unlQE else cls_unl_fid cls_f = cls_plen_true fal = {} dat_delcls = {} if qe_key in ['ptt', 'p']: fal['tt'] = cls_filt['tt'][:lmax_ivf + 1] + ( nlev_t * np.pi / 180. / 60.)**2 * transfi2 dat_delcls['tt'] = cls_plen_true['tt'][:lmax_ivf + 1] + datnoise_cls['tt'] if qe_key in ['p_p', 'p']: fal['ee'] = cls_filt['ee'][:lmax_ivf + 1] + ( nlev_p * np.pi / 180. / 60.)**2 * transfi2 fal['bb'] = cls_filt['bb'][:lmax_ivf + 1] + ( nlev_p * np.pi / 180. / 60.)**2 * transfi2 dat_delcls['ee'] = cls_plen_true['ee'][:lmax_ivf + 1] + datnoise_cls['ee'] dat_delcls['bb'] = cls_plen_true['bb'][:lmax_ivf + 1] + datnoise_cls['bb'] if qe_key in ['p']: fal['te'] = np.copy(cls_filt['te'][:lmax_ivf + 1]) dat_delcls['te'] = np.copy(cls_plen_true['te'][:lmax_ivf + 1]) fal = utils.cl_inverse(fal) for cl in fal.values(): cl[:lmin_ivf] *= 0. for cl in dat_delcls.values(): cl[:lmin_ivf] *= 0. cls_ivfs_arr = utils.cls_dot([fal, dat_delcls, fal]) cls_ivfs = dict() for i, a in enumerate(['t', 'e', 'b']): for j, b in enumerate(['t', 'e', 'b'][i:]): if np.any(cls_ivfs_arr[i, j + i]): cls_ivfs[a + b] = cls_ivfs_arr[i, j + i] n_gg = get_nhl(qe_key, qe_key, cls_w, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm)[0] r_gg_true = qresp.get_response(qe_key, lmax_ivf, 'p', cls_w, cls_f, fal, lmax_qlm=lmax_qlm)[0] r_gg_fid = qresp.get_response( qe_key, lmax_ivf, 'p', cls_w, cls_w, fal, lmax_qlm=lmax_qlm)[0] if cls_f is not cls_w else r_gg_true N0_biased = n_gg * utils.cli( r_gg_fid** 2) # N0 of possibly biased (by Rtrue / Rfid) QE estimator N0_unbiased = n_gg * utils.cli( r_gg_true**2 ) # N0 of QE estimator after rescaling by Rfid / Rtrue to make it unbiased N0s_biased.append(N0_biased) N0s_unbiased.append(N0_unbiased) cls_plen_true['pp'] = cldd_true * utils.cli( np.arange(len(cldd_true))**2 * np.arange(1, len(cldd_true) + 1, dtype=float)**2 / (2. * np.pi)) cls_plen_fid['pp'] = cldd_fid * utils.cli( np.arange(len(cldd_fid))**2 * np.arange(1, len(cldd_fid) + 1, dtype=float)**2 / (2. * np.pi)) if 'wN1' in version: if it == 0: print('Adding n1 in iterations') from lensitbiases import n1_fft from scipy.interpolate import UnivariateSpline as spl lib = n1_fft.n1_fft(fal, cls_w, cls_f, np.copy(cls_plen_true['pp']), lminbox=50, lmaxbox=5000, k2l=None) n1_Ls = np.arange(50, lmax_qlm + 1, 50) if lmax_qlm not in n1_Ls: n1_Ls = np.append(n1_Ls, lmax_qlm) n1 = np.array( [lib.get_n1(qe_key, L, do_n1mat=False) for L in n1_Ls]) N1_biased = spl(n1_Ls, n1_Ls**2 * (n1_Ls * 1. + 1)**2 * n1 / r_gg_fid[n1_Ls]**2, k=2, s=0, ext='zeros')(np.arange(len(N0_unbiased))) N1_biased *= utils.cli( np.arange(lmax_qlm + 1)**2 * np.arange(1, lmax_qlm + 2, dtype=float)**2) N1_unbiased = N1_biased * (r_gg_fid * utils.cli(r_gg_true))**2 else: N1_biased = np.zeros(lmax_qlm + 1, dtype=float) N1_unbiased = np.zeros(lmax_qlm + 1, dtype=float) delcls_fid.append(cls_plen_fid) delcls_true.append(cls_plen_true) N1s_biased.append(N1_biased) N1s_unbiased.append(N1_unbiased) return (np.array(N0s_biased), np.array(N0s_unbiased)) if not ret_delcls else ( (np.array(N0s_biased), np.array(N0s_unbiased), delcls_fid, delcls_true))
fal_sepTP = {'tt': utils.cli(cls_len['tt'][:lmax_ivf + 1] + (nlev_t / 60. / 180. * np.pi) ** 2 / transf ** 2), 'ee': utils.cli(cls_len['ee'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi) ** 2 / transf ** 2), 'bb': utils.cli(cls_len['bb'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi) ** 2 / transf ** 2)} cls_ivfs_sepTP = {'tt':fal_sepTP['tt'].copy(), 'ee':fal_sepTP['ee'].copy(), 'bb':fal_sepTP['bb'].copy(), 'te':cls_len['te'][:lmax_ivf + 1] * fal_sepTP['tt'] * fal_sepTP['ee']} cls_dat = { 'tt': (cls_len['tt'][:lmax_ivf + 1] + (nlev_t / 60. /180. * np.pi) ** 2 / transf ** 2), 'ee': (cls_len['ee'][:lmax_ivf + 1] + (nlev_p / 60. /180. * np.pi) ** 2 / transf ** 2), 'bb': (cls_len['bb'][:lmax_ivf + 1] + (nlev_p / 60. /180. * np.pi) ** 2 / transf ** 2), 'te': np.copy(cls_len['te'][:lmax_ivf + 1]) } fal_jtTP = utils.cl_inverse(cls_dat) cls_ivfs_jtTP = utils.cl_inverse(cls_dat) for cls in [fal_sepTP, fal_jtTP, cls_ivfs_sepTP, cls_ivfs_jtTP]: for cl in cls.values(): cl[:max(1, lmin_ivf)] *= 0. if ksource == 'p': w = lambda ell : ell ** 2 *(ell + 1) ** 2 * 1e7 * 0.5 / np.pi ylabel = r'$10^7\cdot L^2(L + 1)^2 C_L^{\phi\phi} / 2\pi$' elif ksource == 'f': w = lambda ell : 1. ylabel = r'$C_L^{ff}$' elif ksource == 'a': w = lambda ell : 1. ylabel = r'$C_L^{\alpha\alpha}$'
def test_w(): cls_path = os.path.join( os.path.dirname(os.path.abspath(plancklens.__file__)), 'data', 'cls') lmax_ivf = 500 lmin_ivf = 100 nlev_t = 35. nlev_p = 35. * np.sqrt(2.) beam_fwhm = 6. lmax_qlm = lmax_ivf for ksource in ['p', 'f']: if ksource in ['p', 'f']: qe_keys = [ksource + 'tt', ksource + '_p', ksource] elif ksource in ['a', 'a_p', 'stt']: qe_keys = [ksource] else: assert 0 transf = hp.gauss_beam(beam_fwhm / 60. / 180. * np.pi, lmax=lmax_ivf) cls_len = utils.camb_clfile( os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat')) cls_weight = utils.camb_clfile( os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat')) fal_sepTP = { 'tt': utils.cli(cls_len['tt'][:lmax_ivf + 1] + (nlev_t / 60. / 180. * np.pi)**2 / transf**2), 'ee': utils.cli(cls_len['ee'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi)**2 / transf**2), 'bb': utils.cli(cls_len['bb'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi)**2 / transf**2) } cls_ivfs_sepTP = { 'tt': fal_sepTP['tt'].copy(), 'ee': fal_sepTP['ee'].copy(), 'bb': fal_sepTP['bb'].copy(), 'te': cls_len['te'][:lmax_ivf + 1] * fal_sepTP['tt'] * fal_sepTP['ee'] } cls_dat = { 'tt': (cls_len['tt'][:lmax_ivf + 1] + (nlev_t / 60. / 180. * np.pi)**2 / transf**2), 'ee': (cls_len['ee'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi)**2 / transf**2), 'bb': (cls_len['bb'][:lmax_ivf + 1] + (nlev_p / 60. / 180. * np.pi)**2 / transf**2), 'te': np.copy(cls_len['te'][:lmax_ivf + 1]) } fal_jtTP = utils.cl_inverse(cls_dat) cls_ivfs_jtTP = utils.cl_inverse(cls_dat) for cls in [fal_sepTP, fal_jtTP, cls_ivfs_sepTP, cls_ivfs_jtTP]: for cl in cls.values(): cl[:max(1, lmin_ivf)] *= 0. for qe_key in qe_keys: NG, NC, NGC, NCG = nhl.get_nhl(qe_key, qe_key, cls_weight, cls_ivfs_sepTP, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm) RG, RC, RGC, RCG = qresp.get_response(qe_key, lmax_ivf, ksource, cls_weight, cls_len, fal_sepTP, lmax_qlm=lmax_qlm) if qe_key[1:] in ['tt', '_p']: assert np.allclose(NG[1:], RG[1:], rtol=1e-6), qe_key assert np.allclose(NC[2:], RC[2:], rtol=1e-6), qe_key assert np.all(NCG == 0.) and np.all(NGC == 0.) # for these keys assert np.all(RCG == 0.) and np.all(RGC == 0.) NG, NC, NGC, NCG = nhl.get_nhl(ksource, ksource, cls_weight, cls_ivfs_jtTP, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm) RG, RC, RGC, RCG = qresp.get_response(ksource, lmax_ivf, ksource, cls_weight, cls_len, fal_jtTP, lmax_qlm=lmax_qlm) assert np.allclose(NG[1:], RG[1:], rtol=1e-6), ksource assert np.allclose(NC[2:], RC[2:], rtol=1e-6), ksource assert np.all(NCG == 0.) and np.all(NGC == 0.) # for these keys assert np.all(RCG == 0.) and np.all(RGC == 0.)
def get_N0(beam_fwhm=1.4, nlev_t: float or np.ndarray = 5., nlev_p=None, lmax_CMB: dict or int = 3000, lmin_CMB=100, lmax_out=None, cls_len: dict or None = None, cls_weight: dict or None = None, joint_TP=True, ksource='p'): r"""Example function to calculates reconstruction noise levels for a bunch of quadratic estimators Args: beam_fwhm: beam fwhm in arcmin nlev_t: T white noise level in uK-arcmin (an array of size lmax_CMB can be passed for scale-dependent noise level) nlev_p: P white noise level in uK-arcmin (defaults to root(2) nlevt) (can also be an array) lmax_CMB: max. CMB multipole used in the QE (use a dict with 't' 'e' 'b' keys instead of int to set different CMB lmaxes) lmin_CMB: min. CMB multipole used in the QE lmax_out: max lensing 'L' multipole calculated cls_len: CMB spectra entering the sky response to the anisotropy (defaults to FFP10 lensed CMB spectra) cls_weight: CMB spectra entering the QE weights (defaults to FFP10 lensed CMB spectra) joint_TP: if True include calculation of the N0s for the GMV estimator (incl. joint T and P filtering) ksource: anisotropy source to consider (defaults to 'p', lensing) Returns: N0s array for the lensing gradient and curl modes for the T-only, P-onl and (G)MV estimators Prompted by AL """ if nlev_p is None: nlev_p = nlev_t * np.sqrt(2) if not isinstance(lmax_CMB, dict): lmaxs_CMB = {s: lmax_CMB for s in ['t', 'e', 'b']} else: lmaxs_CMB = lmax_CMB print("Seeing lmax's:") for s in lmaxs_CMB.keys(): print(s + ': ' + str(lmaxs_CMB[s])) lmax_ivf = np.max(list(lmaxs_CMB.values())) lmin_ivf = lmin_CMB lmax_qlm = lmax_out or lmax_ivf cls_path = os.path.join( os.path.dirname(os.path.abspath(plancklens.__file__)), 'data', 'cls') cls_len = cls_len or utils.camb_clfile( os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat')) cls_weight = cls_weight or utils.camb_clfile( os.path.join(cls_path, 'FFP10_wdipole_lensedCls.dat')) # We consider here TT, Pol-only and the GMV comb if joint_TP is set qe_keys = [ksource + 'tt', ksource + '_p'] if not joint_TP: qe_keys.append(ksource) # Simple white noise model. Can feed here something more fancy if desired transf = hp.gauss_beam(beam_fwhm / 60. / 180. * np.pi, lmax=lmax_ivf) Noise_L_T = (nlev_t / 60. / 180. * np.pi)**2 / transf**2 Noise_L_P = (nlev_p / 60. / 180. * np.pi)**2 / transf**2 # Data power spectra cls_dat = { 'tt': (cls_len['tt'][:lmax_ivf + 1] + Noise_L_T), 'ee': (cls_len['ee'][:lmax_ivf + 1] + Noise_L_P), 'bb': (cls_len['bb'][:lmax_ivf + 1] + Noise_L_P), 'te': np.copy(cls_len['te'][:lmax_ivf + 1]) } for s in cls_dat.keys(): cls_dat[s][min(lmaxs_CMB[s[0]], lmaxs_CMB[s[1]]) + 1:] *= 0. # (C+N)^{-1} filter spectra # For independent T and P filtering, this is really just 1/ (C+ N), diagonal in T, E, B space fal_sepTP = {spec: utils.cli(cls_dat[spec]) for spec in ['tt', 'ee', 'bb']} # Spectra of the inverse-variance filtered maps # In general cls_ivfs = fal * dat_cls * fal^t, with a matrix product in T, E, B space cls_ivfs_sepTP = utils.cls_dot([fal_sepTP, cls_dat, fal_sepTP], ret_dict=True) # For joint TP filtering, fals is matrix inverse fal_jtTP = utils.cl_inverse(cls_dat) # since cls_dat = fals, cls_ivfs = fals. If the data spectra do not match the filter, this must be changed: cls_ivfs_jtTP = utils.cls_dot([fal_jtTP, cls_dat, fal_jtTP], ret_dict=True) for cls in [fal_sepTP, fal_jtTP, cls_ivfs_sepTP, cls_ivfs_jtTP]: for cl in cls.values(): cl[:max(1, lmin_ivf)] *= 0. N0s = {} N0_curls = {} for qe_key in qe_keys: # This calculates the unormalized QE gradient (G), curl (C) variances and covariances: # (GC and CG is zero for most estimators) NG, NC, NGC, NCG = nhl.get_nhl(qe_key, qe_key, cls_weight, cls_ivfs_sepTP, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm) # Calculation of the G to G, C to C, G to C and C to G QE responses (again, cross-terms are typically zero) RG, RC, RGC, RCG = qresp.get_response(qe_key, lmax_ivf, ksource, cls_weight, cls_len, fal_sepTP, lmax_qlm=lmax_qlm) # Gradient and curl noise terms N0s[qe_key] = utils.cli(RG**2) * NG N0_curls[qe_key] = utils.cli(RC**2) * NC if joint_TP: NG, NC, NGC, NCG = nhl.get_nhl(ksource, ksource, cls_weight, cls_ivfs_jtTP, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm) RG, RC, RGC, RCG = qresp.get_response(ksource, lmax_ivf, ksource, cls_weight, cls_len, fal_jtTP, lmax_qlm=lmax_qlm) N0s[ksource] = utils.cli(RG**2) * NG N0_curls[ksource] = utils.cli(RC**2) * NC return N0s, N0_curls
def get_N0_iter(qe_key: str, nlev_t: float or np.ndarray, nlev_p: float or np.ndarray, beam_fwhm: float, cls_unl_fid: dict, lmin_cmb, lmax_cmb, itermax, cls_unl_dat=None, lmax_qlm=None, ret_delcls=False, datnoise_cls: dict or None = None): r"""Iterative lensing-N0 estimate Calculates iteratively partially lensed spectra and lensing noise levels. This uses the python camb package to get the partially lensed spectra. At each iteration this takes out the resolved part of the lenses and recomputes a N0 Args: qe_key: QE estimator key nlev_t: temperature noise level (in :math:`\mu `K-arcmin) (an array can be passed for scale-dependent noise level) nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)(an array can be passed for scale-dependent noise level) beam_fwhm: Gaussian beam full width half maximum in arcmin cls_unl_fid(dict): unlensed CMB power spectra lmin_cmb: minimal CMB multipole used in the QE lmax_cmb: maximal CMB multipole used in the QE itermax: number of iterations to perform lmax_qlm(optional): maximum lensing multipole to consider. Defaults to 2 lmax_ivf ret_delcls(optional): returns the partially delensed CMB cls as well if set datnoise_cls(optional): feeds in custom noise spectra to the data. The nlevs and beam only apply to the filtering in this case Returns Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0. Note: this is requiring camb python package for the lensed spectra calc. """ assert qe_key in ['p_p', 'p', 'ptt'], qe_key try: from camb.correlations import lensed_cls except ImportError: assert 0, "could not import camb.correlations.lensed_cls" if isinstance(lmax_cmb, dict): lmaxs_ivf = lmax_cmb print("Seeing lmax's:") for s in lmaxs_ivf.keys(): print(s + ': ' + str(lmaxs_ivf[s])) else: lmaxs_ivf = {s: lmax_cmb for s in ['t', 'e', 'b']} lmin_ivf = lmin_cmb lmax_ivf = np.max(list(lmaxs_ivf.values())) if lmax_qlm is None: lmax_qlm = 2 * lmax_ivf lmax_qlm = min(lmax_qlm, 2 * lmax_ivf) lmin_ivf = max(lmin_ivf, 1) transfi2 = utils.cli( hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf))**2 llp2 = np.arange(lmax_qlm + 1, dtype=float)**2 * np.arange( 1, lmax_qlm + 2, dtype=float)**2 / (2. * np.pi) if datnoise_cls is None: datnoise_cls = dict() if qe_key in ['ptt', 'p']: datnoise_cls['tt'] = (nlev_t * np.pi / 180. / 60.)**2 * transfi2 if qe_key in ['p_p', 'p']: datnoise_cls['ee'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2 datnoise_cls['bb'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2 N0s_biased = [] N0s_unbiased = [] delcls_fid = [] delcls_true = [] N0_unbiased = np.inf if cls_unl_dat is None: cls_unl_dat = cls_unl_fid for irr, it in utils.enumerate_progress(range(itermax + 1)): dls_unl_true, cldd_true = cls2dls(cls_unl_dat) dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid) if it == 0: rho_sqd_phi = 0. else: # The cross-correlation coefficient is identical for the Rfid-biased QE or the rescaled one rho_sqd_phi = np.zeros(len(cldd_true)) rho_sqd_phi[:lmax_qlm + 1] = cldd_true[:lmax_qlm + 1] * utils.cli( cldd_true[:lmax_qlm + 1] + llp2 * N0_unbiased[:lmax_qlm + 1]) cldd_true *= (1. - rho_sqd_phi) # The true residual lensing spec. cldd_fid *= (1. - rho_sqd_phi ) # What I think the residual lensing spec is cls_plen_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid)) cls_plen_true = dls2cls(lensed_cls(dls_unl_true, cldd_true)) cls_filt = cls_plen_fid cls_f = cls_plen_true fal = {} dat_delcls = {} if qe_key in ['ptt', 'p']: fal['tt'] = cls_filt['tt'][:lmax_ivf + 1] + ( nlev_t * np.pi / 180. / 60.)**2 * transfi2 dat_delcls['tt'] = cls_plen_true['tt'][:lmax_ivf + 1] + datnoise_cls['ee'] if qe_key in ['p_p', 'p']: fal['ee'] = cls_filt['ee'][:lmax_ivf + 1] + ( nlev_p * np.pi / 180. / 60.)**2 * transfi2 fal['bb'] = cls_filt['bb'][:lmax_ivf + 1] + ( nlev_p * np.pi / 180. / 60.)**2 * transfi2 dat_delcls['ee'] = cls_plen_true['ee'][:lmax_ivf + 1] + datnoise_cls['ee'] dat_delcls['bb'] = cls_plen_true['bb'][:lmax_ivf + 1] + datnoise_cls['bb'] if qe_key in ['p']: fal['te'] = np.copy(cls_filt['te'][:lmax_ivf + 1]) dat_delcls['te'] = np.copy(cls_plen_true['te'][:lmax_ivf + 1]) for spec in fal.keys(): fal[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) + 1:] *= 0 for spec in dat_delcls.keys(): dat_delcls[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) + 1:] *= 0 fal = utils.cl_inverse(fal) for cl in fal.values(): cl[:lmin_ivf] *= 0. for cl in dat_delcls.values(): cl[:lmin_ivf] *= 0. cls_ivfs = utils.cls_dot([fal, dat_delcls, fal], ret_dict=True) cls_w = deepcopy(cls_plen_fid) for spec in cls_w.keys(): # in principle not necessary cls_w[spec][:lmin_ivf] *= 0. cls_w[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) + 1:] *= 0 n_gg = nhl.get_nhl(qe_key, qe_key, cls_w, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm)[0] r_gg_true = qresp.get_response(qe_key, lmax_ivf, 'p', cls_w, cls_f, fal, lmax_qlm=lmax_qlm)[0] r_gg_fid = qresp.get_response( qe_key, lmax_ivf, 'p', cls_w, cls_w, fal, lmax_qlm=lmax_qlm)[0] if cls_f is not cls_w else r_gg_true N0_biased = n_gg * utils.cli( r_gg_fid** 2) # N0 of possibly biased (by Rtrue / Rfid) QE estimator N0_unbiased = n_gg * utils.cli( r_gg_true**2 ) # N0 of QE estimator after rescaling by Rfid / Rtrue to make it unbiased N0s_biased.append(N0_biased) N0s_unbiased.append(N0_unbiased) cls_plen_true['pp'] = cldd_true * utils.cli( np.arange(len(cldd_true))**2 * np.arange(1, len(cldd_true) + 1, dtype=float)**2 / (2. * np.pi)) cls_plen_fid['pp'] = cldd_fid * utils.cli( np.arange(len(cldd_fid))**2 * np.arange(1, len(cldd_fid) + 1, dtype=float)**2 / (2. * np.pi)) delcls_fid.append(cls_plen_fid) delcls_true.append(cls_plen_true) return (np.array(N0s_biased), np.array(N0s_unbiased)) if not ret_delcls else ( (np.array(N0s_biased), np.array(N0s_unbiased), delcls_fid, delcls_true))