Ejemplo n.º 1
0
def vmaps2vmap_P(pix_vmaps, weights_e, weights_b, nside):
    """From individual Q and U freq pixel variance maps and weights create expected pixel variance map

        Args:
            pix_vmaps: list of pixel variance maps
            weights_e: weights for E-mode freq. weighting (as applied onto the noise maps)
            weights_b: weights for B-mode freq. weighting (as applied onto the noise maps)
            nside: desired output map resolution

       Note:
           the pix_vmaps in pol in this routine are expected to be ~ 1/2 (s2_Q + s2_U)

           See Planck 2018 gravitational lensing paper Eqs 16-17


    """
    assert len(pix_vmaps) == len(weights_e), (len(pix_vmaps), len(weights_e))
    assert len(pix_vmaps) == len(weights_b), (len(pix_vmaps), len(weights_b))

    nf, lmaxp1_e = weights_e.shape
    nf, lmaxp1_b = weights_b.shape

    lmax_out = min(2 * max(lmaxp1_e, lmaxp1_b) - 2, 3 * nside - 1)
    ret_lm = np.zeros(hp.Alm.getsize(lmax_out), dtype=complex)
    for i, (pix_vmap, wle, wlb) in enumerate_progress(
            list(zip(pix_vmaps, weights_e, weights_b))):
        m = read_map(pix_vmap)
        vpix = hp.nside2pixarea(hp.npix2nside(m.size), degrees=False)
        this_s2lm = hp.map2alm(m, iter=0, lmax=lmax_out)
        wl2 = 0.25 * vpix * _w2wsq(wle + wlb, 2, 2, lmax_out)
        wl2 += 0.25 * vpix * _w2wsq(wle - wlb, 2, -2, lmax_out)
        hp.almxfl(this_s2lm, wl2, inplace=True)
        ret_lm += this_s2lm
    return hp.alm2map(ret_lm, nside, verbose=False)
Ejemplo n.º 2
0
    def get_bamc(self):
        """Binned additive MC correction, with crude error bars.

            This compares the reconstruction on the simulations to the FFP10 input lensing spectrum.

            Note:
                the approximate error corrections to the additive MC correction variance follows Appendix C of
                https://arxiv.org/abs/1807.06210, check this for more details on its validity.

        """
        assert self.k1[0] == 'p' and self.k2[
            0] == 'p' and self.ksource == 'p', (self.k1, self.k2, self.ksource)
        ss2 = 2 * self.parfile.qcls_ss.get_sim_stats_qcl(
            self.k1, self.parfile.mc_sims_var, k2=self.k2).mean()
        cl_pred = utils.camb_clfile(
            os.path.join(
                self.cls_path,
                'FFP10_wdipole_lenspotentialCls.dat'))['pp'][:len(ss2)]
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        bp_stats = utils.stats(self.nbins)
        bp_n1 = self.get_n1()
        for i, idx in utils.enumerate_progress(self.parfile.mc_sims_var,
                                               label='collecting BP stats'):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            bp_stats.add(
                self._get_binnedcl(qc_norm * (dd - ss2) - cl_pred) - bp_n1)
        NMF = len(self.parfile.qcls_dd.mc_sims_mf)
        NB = len(self.parfile.mc_sims_var)
        return bp_stats.mean(), bp_stats.sigmas_on_mean() * np.sqrt(
            (1. + 1. + 2. / NMF + 2 * NB / (float(NMF * NMF))))
Ejemplo n.º 3
0
    def get_sim_stats_qcl(self,
                          k1,
                          mc_sims,
                          k2=None,
                          recache=False,
                          lmax=None):
        """Returns the sim-average of the input *plancklens.qecl* list QE power spectra average

            Args:
                k1: QE anisotropy key 1
                mc_sims: the simulation indices to average the spectra over
                k2: QE anisotropy key 2 (defaults to k1)

            Returns:
                *plancklens.utils.stats* instance

        """
        if k2 is None: k2 = k1
        if lmax is None: lmax = self.get_lmaxqcl(k1, k2)
        tfname = os.path.join(
            self.lib_dir, 'sim_qcl_stats_%s_%s_%s_%s.pk' %
            (k1, k2, lmax, utils.mchash(mc_sims)))
        if not os.path.exists(tfname) or recache:
            stats_qcl = utils.stats(lmax + 1, docov=False)
            for i, idx in utils.enumerate_progress(
                    mc_sims,
                    label='building sim_stats qcl (k1,k2)=' + str((k1, k2))):
                stats_qcl.add(self.get_sim_qcl(k1, idx, k2=k2, lmax=lmax))
            pk.dump(stats_qcl, open(tfname, 'wb'), protocol=2)
        return pk.load(open(tfname, 'rb'))
Ejemplo n.º 4
0
    def __init__(self, n_inv, b_transf,
                 marge_monopole=False, marge_dipole=False, marge_uptolmin=-1, marge_maps=(), nlev_ftl=None):
        if isinstance(n_inv, list):
            n_inv_prod = util.load_map(n_inv[0])
            if len(n_inv) > 1:
                for n in n_inv[1:]:
                    n_inv_prod = n_inv_prod * util.load_map(n)
            n_inv = n_inv_prod
        else:
            n_inv = util.load_map(n_inv)
        print("opfilt_tt: inverse noise map std dev / av = %.3e" % (
                    np.std(n_inv[np.where(n_inv != 0.0)]) / np.average(n_inv[np.where(n_inv != 0.0)])))
        templates = []
        templates_hash = []
        for tmap in [util.load_map(m) for m in marge_maps]:
            assert (len(n_inv) == len(tmap))
            templates.append(template_removal.template_map(tmap))
            templates_hash.append(hashlib.sha1(tmap.view(np.uint8)).hexdigest())

        if marge_uptolmin >= 0:
            templates.append(template_removal.template_uptolmin(marge_uptolmin))
        else:
            if marge_monopole: templates.append(template_removal.template_monopole())
            if marge_dipole: templates.append(template_removal.template_dipole())

        if len(templates) != 0:
            nmodes = int(np.sum([t.nmodes for t in templates]))
            modes_idx_t = np.concatenate(([t.nmodes * [int(im)] for im, t in enumerate(templates)]))
            modes_idx_i = np.concatenate(([range(0, t.nmodes) for t in templates]))
            Pt_Nn1_P = np.zeros((nmodes, nmodes))
            for i, ir in enumerate_progress(range(nmodes), label='filling template (%s) projection matrix'%nmodes):
                tmap = np.copy(n_inv)
                templates[modes_idx_t[ir]].apply_mode(tmap, int(modes_idx_i[ir]))

                ic = 0
                for tc in templates[0:modes_idx_t[ir] + 1]:
                    Pt_Nn1_P[ir, ic:(ic + tc.nmodes)] = tc.dot(tmap)
                    Pt_Nn1_P[ic:(ic + tc.nmodes), ir] = Pt_Nn1_P[ir, ic:(ic + tc.nmodes)]
                    ic += tc.nmodes
            eigv, eigw = np.linalg.eigh(Pt_Nn1_P)
            eigv_inv = 1.0 / eigv
            self.Pt_Nn1_P_inv = np.dot(np.dot(eigw, np.diag(eigv_inv)), np.transpose(eigw))

        self.n_inv = n_inv
        self.b_transf = b_transf
        self.npix = len(self.n_inv)

        self.nside = hp.npix2nside(self.npix)
        self.marge_monopole = marge_monopole
        self.marge_dipole = marge_dipole
        self.marge_uptolmin = marge_uptolmin
        self.templates = templates
        self.templates_hash = templates_hash

        if nlev_ftl is None:
            nlev_ftl =  10800. / np.sqrt(np.sum(self.n_inv) / (4.0 * np.pi)) / np.pi
        self.nlev_ftl = nlev_ftl
        print("ninv_ftl: using %.2f uK-amin noise Cl"%self.nlev_ftl)
Ejemplo n.º 5
0
    def get_mcn0_cov(self, mc_sims_dd=None):
        """Covariance matrix obtained from the realization-independent debiaser.

        """
        if mc_sims_dd is None: mc_sims_dd = self.parfile.mc_sims_var
        mcn0_cov = utils.stats(self.nbins)
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims_dd):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            mcn0_cov.add(self._get_binnedcl(qc_norm * dd))
        return mcn0_cov.cov()
Ejemplo n.º 6
0
    def get_nhl_cov(self, mc_sims_dd=None):
        """Covariance matrix obtained from the semi-analytical N0 debiaser.

        """
        if mc_sims_dd is None: mc_sims_dd = self.parfile.mc_sims_var
        nhl_cov = utils.stats(self.nbins)
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims_dd):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            nhl_cov.add(
                self._get_binnedcl(qc_norm *
                                   (dd - self.parfile.nhl_dd.get_sim_nhl(
                                       int(idx), self.k1, self.k2))))
        return nhl_cov.cov()
Ejemplo n.º 7
0
    def get_sim_qlm_mf(self, k, mc_sims, lmax=None):
        """Returns a QE mean-field estimate, by averaging QE estimates from a set simulations (caches the result).

            Args:
                k: quadratic estimator key
                mc_sims: simulation indices to use for the estimate.
                lmax: optionally reduces the lmax of the output healpy array.

        """
        if lmax is None:
            lmax = self.get_lmax_qlm(k)
        assert lmax <= self.get_lmax_qlm(k)
        if k in ['p_tp', 'x_tp']:
            return (self.get_sim_qlm_mf('%stt' % k[0], mc_sims, lmax=lmax) +
                    self.get_sim_qlm_mf('%s_p' % k[0], mc_sims, lmax=lmax))
        if k in ['p_te', 'p_tb', 'p_eb', 'x_te', 'x_tb', 'x_eb']:
            return  self.get_sim_qlm_mf(k[0] + k[2] + k[3], mc_sims, lmax=lmax)  \
                    + self.get_sim_qlm_mf(k[0] + k[3] + k[2], mc_sims, lmax=lmax)
        if '_bh_' in k:  # Bias-hardening
            assert self.resplib is not None, 'resplib arg necessary for this'
            kQE, ksource = k.split('_bh_')
            assert len(ksource) == 1 and ksource + kQE[1:] in self.keys, (
                ksource, kQE)
            assert self.get_lmax_qlm(kQE) == self.get_lmax_qlm(
                ksource + kQE[1:]), 'fix this (easy)'
            lmax = self.get_lmax_qlm(kQE)
            wL = self.resplib.get_response(kQE, ksource) * ut.cli(
                self.resplib.get_response(ksource + kQE[1:], ksource))
            ret = self.get_sim_qlm_mf(kQE, mc_sims, lmax=lmax)
            return ret - hp.almxfl(
                self.get_sim_qlm_mf(ksource + kQE[1:], mc_sims, lmax=lmax), wL)

        assert k in self.keys_fund, (k, self.keys_fund)
        fname = os.path.join(self.lib_dir,
                             'simMF_k1%s_%s.fits' % (k, ut.mchash(mc_sims)))
        if not os.path.exists(fname):
            this_mcs = np.unique(mc_sims)
            MF = np.zeros(hp.Alm.getsize(lmax), dtype=complex)
            if len(this_mcs) == 0: return MF
            for i, idx in ut.enumerate_progress(this_mcs,
                                                label='calculating %s MF' % k):
                MF += self.get_sim_qlm(k, idx, lmax=lmax)
            MF /= len(this_mcs)
            _write_alm(fname, MF)
            print("Cached ", fname)
        return ut.alm_copy(hp.read_alm(fname), lmax=lmax)
Ejemplo n.º 8
0
    def compute_minv(self, lmax, fwd_op, cache_fname=None):
        if cache_fname is not None:
            assert not os.path.exists(cache_fname)

        nrlm = 2 * (lmax + 1)**2
        trlm = np.zeros(nrlm)
        tmat = np.zeros((nrlm, nrlm))

        ntmpl = 0
        if getattr(fwd_op.n_inv_filt, 'templates_p', None) is None:
            print("dense: did not find templates_p attribute")
        else:
            for t in fwd_op.n_inv_filt.templates_p:
                ntmpl += t.nmodes
        ntmpl += 8  # (1 mono + 3 dip) * (e+b)

        print("computing dense preconditioner:")
        print("     lmax  =", lmax)
        print("     ntmpl =", ntmpl)

        for j, i in enumerate_progress(np.arange(0, nrlm),
                                       label='filling matrix'):
            trlm[i] = 1.0
            tmat[:, i] = self.alm2rlm(fwd_op(self.rlm2alm(trlm)))
            trlm[i] = 0.0

        print("   inverting M...")
        eigv, eigw = np.linalg.eigh(tmat)

        assert np.all(eigv[ntmpl:] > 0.)
        eigv_inv = np.zeros_like(eigv)
        eigv_inv[ntmpl:] = 1.0 / eigv[ntmpl:]

        if ntmpl > 0:
            # do nothing to the ntmpl eigenmodes
            # with the lowest eigenvalues.
            print("     eigv[ntmpl-1] = ", eigv[ntmpl - 1])
            print("     eigv[ntmpl]   = ", eigv[ntmpl])
            eigv_inv[0:ntmpl] = 1.0

        self.minv = np.dot(np.dot(eigw, np.diag(eigv_inv)), np.transpose(eigw))

        if cache_fname is not None:
            pk.dump([lmax, self.hashdict(lmax, fwd_op), self.minv],
                    open(cache_fname, 'wb'))
Ejemplo n.º 9
0
    def get_ampl_x_input(self, mc_sims=None):
        """Returns cross-correlation of phi-maps to input lensing maps.

            Uses qlms_x_i library of parfile

        """
        qlmi = self.parfile.qlms_x_in
        if mc_sims is None:
            mc_sims = np.unique(
                np.concatenate(
                    [self.parfile.mc_sims_var, self.parfile.mc_sims_bias]))
        xin = utils.stats(self.nbins)
        qnorm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims):
            qi = qlmi.get_sim_qcl(self.k1, idx)
            xin.add(self._get_binnedcl(qnorm * qi) / self.fid_bandpowers)
        return xin
Ejemplo n.º 10
0
    def compute_minv(self, lmax, fwd_op, cache_fname=None):
        if cache_fname is not None:
            assert not os.path.exists(cache_fname)

        nrlm = (lmax + 1)**2
        trlm = np.zeros(nrlm)
        tmat = np.zeros((nrlm, nrlm))

        ntmpl = 0
        for t in fwd_op.n_inv_filt.templates:
            ntmpl += t.nmodes

        print("computing dense preconditioner:")
        print("     lmax  =", lmax)
        print("     ntmpl =", ntmpl)

        if cache_fname is not None: print(" will cache minv in " + cache_fname)

        for j, i in enumerate_progress(np.arange(0, nrlm),
                                       label='filling matrix'):
            trlm[i] = 1.0
            tmat[:, i] = alm2rlm(fwd_op(rlm2alm(trlm)))
            trlm[i] = 0.0

        print("   inverting M...")
        eigv, eigw = np.linalg.eigh(tmat)

        assert np.all(eigv[ntmpl:] > 0.)
        eigv_inv = np.zeros_like(eigv)
        eigv_inv[ntmpl:] = 1.0 / eigv[ntmpl:]

        if ntmpl > 0:
            print("     eigv[ntmpl-1] = ", eigv[ntmpl - 1])
            print("     eigv[ntmpl]   = ", eigv[ntmpl])
            eigv_inv[0:ntmpl] = 1.0

        self.minv = np.dot(np.dot(eigw, np.diag(eigv_inv)), np.transpose(eigw))

        if cache_fname is not None:
            pk.dump([lmax, self.hashdict(lmax, fwd_op), self.minv],
                    open(cache_fname, 'wb'))
Ejemplo n.º 11
0
def vmaps2vmap_I(pix_vmaps, weights, nside):
    """From individual freq pixel variance maps and weights create expected pixel variance map


       Args:
            pix_vmaps: list of pixel variance maps
            weights: weights for intensity freq. weighting (as applied onto the noise maps)
            nside: desired output map resolution

       See Planck 2018 gravitational lensing paper Eqs 16-17

    """
    assert len(pix_vmaps) == len(weights), (len(pix_vmaps), len(weights))
    nf, lmaxp1 = weights.shape
    lmax_out = min(2 * lmaxp1 - 2, 3 * nside - 1)
    ret_lm = np.zeros(hp.Alm.getsize(lmax_out), dtype=complex)
    for i, (pix_vmap, wl) in enumerate_progress(list(zip(pix_vmaps, weights))):
        m = read_map(pix_vmap)
        vpix = hp.nside2pixarea(hp.npix2nside(m.size), degrees=False)
        this_s2lm = hp.map2alm(m, iter=0, lmax=lmax_out)
        wl2 = _w2wsq(wl, 0, 0, lmax_out) * vpix
        hp.almxfl(this_s2lm, wl2, inplace=True)
        ret_lm += this_s2lm
    return hp.alm2map(ret_lm, nside, verbose=False)
Ejemplo n.º 12
0
def get_nhls(qe_key1,
             qe_key2,
             cls_cmb_dat,
             cls_cmb_filt,
             cls_weight,
             lmin,
             lmax,
             lmax_qlm,
             transf,
             nlevts_filt,
             nlevts_map,
             nlevps_filt,
             nlevps_map,
             joint_TP=False,
             cacher=cachers.cacher_mem()):
    """Collects unnormalized estimator noise levels for a list of filtering noise levels and data map noise levels


        Args:
            qe_key1: first QE estimator key
            qe_key2: second QE estimator key
            cls_cmb_dat: CMB cls of the data maps
            cls_cmb_filt: CMB cls used for the filtering
            cls_weight: CMB cls in the QE weights
            lmin: minimum CMB multipole considered
            lmax: maximum CMB multipole considered
            lmax_qlm: QE output lmax
            transf: CMB transfer function
            nlevts_filt: list or array of filtering temperature noise levels
            nlevts_map: list or array of data map temperature noise levels
            nlevps_filt: list or array of filtering polarization noise levels
            nlevps_map: list or array of data maptemperature noise levels
            joint_TP: uses joint temperature and polarization filtering if set, separate if not
            cacher: can be used to store results

        Returns:
            lists of reconstruction noise levels (GG, CC, GC CG for spin-weight QE)

        Note:
            Results may be stored with the cacher but only the filtering and data noise levels, QE keys and joint_TP are differentiated in the filename

    """
    Nhls = []
    for i, (nlevt_f, nlevt_m, nlevp_f, nlevp_m) in utils.enumerate_progress(
            list(zip(nlevts_filt, nlevts_map, nlevps_filt, nlevps_map)),
            'collecting nhls'):
        fname = 'vmapnhl%s_%s_%s' % (
            'jTP' * joint_TP, qe_key1, qe_key2) + utils.clhash(
                np.array([nlevt_f, nlevt_m, nlevp_f, nlevp_m]))
        if not cacher.is_cached(fname):
            ivf_cls = get_ivf_cls(cls_cmb_dat,
                                  cls_cmb_filt,
                                  lmin,
                                  lmax,
                                  nlevt_f,
                                  nlevp_f,
                                  nlevt_m,
                                  nlevp_m,
                                  transf,
                                  jt_tp=joint_TP)[0]
            this_nhl = nhl.get_nhl(qe_key1,
                                   qe_key2,
                                   cls_weight,
                                   ivf_cls,
                                   lmax,
                                   lmax,
                                   lmax_out=lmax_qlm)
            cacher.cache(fname, this_nhl)
        Nhls.append(cacher.load(fname))
    return Nhls
Ejemplo n.º 13
0
def get_responses(qe_key,
                  cls_cmb_dat,
                  cls_cmb_filt,
                  cls_weight,
                  lmin,
                  lmax,
                  lmax_qlm,
                  transf,
                  nlevts_filt,
                  nlevps_filt,
                  joint_TP=False,
                  cacher=cachers.cacher_mem(),
                  source='p'):
    """Collects estimator responses for a list of filtering noise levels


        Args:
            qe_key: QE estimator key
            cls_cmb_dat: CMB cls of the data maps
            cls_cmb_filt: CMB cls used for the filtering
            cls_weight: CMB cls in the QE weights
            lmin: minimum CMB multipole considered
            lmax: maximum CMB multipole considered
            lmax_qlm: QE output lmax
            transf: CMB transfer function
            nlevts_filt: list or array of filtering temperature noise levels
            nlevps_filt: list or array of filtering polarization noise levels
            joint_TP: uses joint temperature and polarization filtering if set, separate if not
            cacher: can be used to store results
            source: QE response anisotropy source (defaults to lensing)

        Returns:
            lists of responses (GG, CC, GC CG for spin-weight QE)

        Note:
            Results may be stored with the cacher but only the filtering noise levels, QE keys and joint_TP are differentiated in the filename

    """
    resps = []
    for i, (nlevt_f, nlevp_f) in utils.enumerate_progress(
            list(zip(nlevts_filt, nlevps_filt)), 'collecting responses'):
        fname = 'vmapresps%s_%s_%s' % ('jTP' * joint_TP, qe_key,
                                       qe_key) + utils.clhash(
                                           np.array([nlevt_f, nlevp_f]))
        if not cacher.is_cached(fname):
            cls_filt_i = get_ivf_cls(cls_cmb_dat,
                                     cls_cmb_filt,
                                     lmin,
                                     lmax,
                                     nlevt_f,
                                     nlevp_f,
                                     nlevt_f,
                                     nlevp_f,
                                     transf,
                                     jt_tp=joint_TP)[1]
            this_resp = qresp.get_response(qe_key,
                                           lmax,
                                           source,
                                           cls_weight,
                                           cls_cmb_dat,
                                           cls_filt_i,
                                           lmax_qlm=lmax_qlm)
            cacher.cache(fname, this_resp)
        resps.append(cacher.load(fname))
    return resps
Ejemplo n.º 14
0
def get_N0_iter(qe_key: str,
                nlev_t: float or np.ndarray,
                nlev_p: float or np.ndarray,
                beam_fwhm: float,
                cls_unl_fid: dict,
                lmin_cmb,
                lmax_cmb,
                itermax,
                cls_unl_dat=None,
                lmax_qlm=None,
                ret_delcls=False,
                datnoise_cls: dict or None = None):
    r"""Iterative lensing-N0 estimate

        Calculates iteratively partially lensed spectra and lensing noise levels.
        This uses the python camb package to get the partially lensed spectra.

        At each iteration this takes out the resolved part of the lenses and recomputes a N0

        Args:
            qe_key: QE estimator key
            nlev_t: temperature noise level (in :math:`\mu `K-arcmin) (an array can be passed for scale-dependent noise level)
            nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)(an array can be passed for scale-dependent noise level)
            beam_fwhm: Gaussian beam full width half maximum in arcmin
            cls_unl_fid(dict): unlensed CMB power spectra
            lmin_cmb: minimal CMB multipole used in the QE
            lmax_cmb: maximal CMB multipole used in the QE
            itermax: number of iterations to perform
            lmax_qlm(optional): maximum lensing multipole to consider. Defaults to 2 lmax_ivf
            ret_delcls(optional): returns the partially delensed CMB cls as well if set
            datnoise_cls(optional): feeds in custom noise spectra to the data. The nlevs and beam only apply to the filtering in this case

        Returns
            Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0.


        Note:
            this is requiring camb python package for the lensed spectra calc.

     """
    assert qe_key in ['p_p', 'p', 'ptt'], qe_key
    try:
        from camb.correlations import lensed_cls
    except ImportError:
        assert 0, "could not import camb.correlations.lensed_cls"

    if isinstance(lmax_cmb, dict):
        lmaxs_ivf = lmax_cmb
        print("Seeing lmax's:")
        for s in lmaxs_ivf.keys():
            print(s + ': ' + str(lmaxs_ivf[s]))
    else:
        lmaxs_ivf = {s: lmax_cmb for s in ['t', 'e', 'b']}
    lmin_ivf = lmin_cmb
    lmax_ivf = np.max(list(lmaxs_ivf.values()))
    if lmax_qlm is None:
        lmax_qlm = 2 * lmax_ivf
    lmax_qlm = min(lmax_qlm, 2 * lmax_ivf)
    lmin_ivf = max(lmin_ivf, 1)
    transfi2 = utils.cli(
        hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf))**2
    llp2 = np.arange(lmax_qlm + 1, dtype=float)**2 * np.arange(
        1, lmax_qlm + 2, dtype=float)**2 / (2. * np.pi)
    if datnoise_cls is None:
        datnoise_cls = dict()
        if qe_key in ['ptt', 'p']:
            datnoise_cls['tt'] = (nlev_t * np.pi / 180. / 60.)**2 * transfi2
        if qe_key in ['p_p', 'p']:
            datnoise_cls['ee'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
            datnoise_cls['bb'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
    N0s_biased = []
    N0s_unbiased = []
    delcls_fid = []
    delcls_true = []

    N0_unbiased = np.inf
    if cls_unl_dat is None:
        cls_unl_dat = cls_unl_fid

    for irr, it in utils.enumerate_progress(range(itermax + 1)):
        dls_unl_true, cldd_true = cls2dls(cls_unl_dat)
        dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid)
        if it == 0:
            rho_sqd_phi = 0.
        else:
            # The cross-correlation coefficient is identical for the Rfid-biased QE or the rescaled one
            rho_sqd_phi = np.zeros(len(cldd_true))
            rho_sqd_phi[:lmax_qlm + 1] = cldd_true[:lmax_qlm + 1] * utils.cli(
                cldd_true[:lmax_qlm + 1] + llp2 * N0_unbiased[:lmax_qlm + 1])

        cldd_true *= (1. - rho_sqd_phi)  # The true residual lensing spec.
        cldd_fid *= (1. - rho_sqd_phi
                     )  # What I think the residual lensing spec is
        cls_plen_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
        cls_plen_true = dls2cls(lensed_cls(dls_unl_true, cldd_true))

        cls_filt = cls_plen_fid
        cls_f = cls_plen_true
        fal = {}
        dat_delcls = {}
        if qe_key in ['ptt', 'p']:
            fal['tt'] = cls_filt['tt'][:lmax_ivf + 1] + (
                nlev_t * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['tt'] = cls_plen_true['tt'][:lmax_ivf +
                                                   1] + datnoise_cls['ee']
        if qe_key in ['p_p', 'p']:
            fal['ee'] = cls_filt['ee'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            fal['bb'] = cls_filt['bb'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['ee'] = cls_plen_true['ee'][:lmax_ivf +
                                                   1] + datnoise_cls['ee']
            dat_delcls['bb'] = cls_plen_true['bb'][:lmax_ivf +
                                                   1] + datnoise_cls['bb']
        if qe_key in ['p']:
            fal['te'] = np.copy(cls_filt['te'][:lmax_ivf + 1])
            dat_delcls['te'] = np.copy(cls_plen_true['te'][:lmax_ivf + 1])
        for spec in fal.keys():
            fal[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) + 1:] *= 0
        for spec in dat_delcls.keys():
            dat_delcls[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) +
                             1:] *= 0

        fal = utils.cl_inverse(fal)
        for cl in fal.values():
            cl[:lmin_ivf] *= 0.
        for cl in dat_delcls.values():
            cl[:lmin_ivf] *= 0.
        cls_ivfs = utils.cls_dot([fal, dat_delcls, fal], ret_dict=True)
        cls_w = deepcopy(cls_plen_fid)
        for spec in cls_w.keys():  # in principle not necessary
            cls_w[spec][:lmin_ivf] *= 0.
            cls_w[spec][min(lmaxs_ivf[spec[0]], lmaxs_ivf[spec[1]]) + 1:] *= 0

        n_gg = nhl.get_nhl(qe_key,
                           qe_key,
                           cls_w,
                           cls_ivfs,
                           lmax_ivf,
                           lmax_ivf,
                           lmax_out=lmax_qlm)[0]
        r_gg_true = qresp.get_response(qe_key,
                                       lmax_ivf,
                                       'p',
                                       cls_w,
                                       cls_f,
                                       fal,
                                       lmax_qlm=lmax_qlm)[0]
        r_gg_fid = qresp.get_response(
            qe_key, lmax_ivf, 'p', cls_w, cls_w, fal,
            lmax_qlm=lmax_qlm)[0] if cls_f is not cls_w else r_gg_true
        N0_biased = n_gg * utils.cli(
            r_gg_fid**
            2)  # N0 of possibly biased (by Rtrue / Rfid) QE estimator
        N0_unbiased = n_gg * utils.cli(
            r_gg_true**2
        )  # N0 of QE estimator after rescaling by Rfid / Rtrue to make it unbiased
        N0s_biased.append(N0_biased)
        N0s_unbiased.append(N0_unbiased)
        cls_plen_true['pp'] = cldd_true * utils.cli(
            np.arange(len(cldd_true))**2 *
            np.arange(1, len(cldd_true) + 1, dtype=float)**2 / (2. * np.pi))
        cls_plen_fid['pp'] = cldd_fid * utils.cli(
            np.arange(len(cldd_fid))**2 *
            np.arange(1, len(cldd_fid) + 1, dtype=float)**2 / (2. * np.pi))

        delcls_fid.append(cls_plen_fid)
        delcls_true.append(cls_plen_true)

    return (np.array(N0s_biased),
            np.array(N0s_unbiased)) if not ret_delcls else (
                (np.array(N0s_biased), np.array(N0s_unbiased), delcls_fid,
                 delcls_true))
Ejemplo n.º 15
0
    def get_ps_data(self,
                    lmin_ss_s4=100,
                    lmax_ss_s4=2048,
                    mc_sims_ss=None,
                    mc_sims_ds=None):
        ks4 = 'stt'
        twolpo = 2 * np.arange(lmax_ss_s4 + 1) + 1.
        filt = np.ones(lmax_ss_s4 + 1, dtype=float)
        filt[:lmax_ss_s4] *= 0.

        dd_ptsrc = self.parfile.qcls_dd.get_sim_stats_qcl(
            ks4, self.parfile.mc_sims_var).mean()[:lmax_ss_s4 + 1]
        ds_ptsrc = self.parfile.qcls_ds.get_sim_stats_qcl(
            ks4, self.parfile.mc_sims_bias
            if mc_sims_ds is None else mc_sims_ds).mean()[:lmax_ss_s4 + 1]
        ss_ptsrc = self.parfile.qcls_ss.get_sim_stats_qcl(
            ks4, self.parfile.mc_sims_bias
            if mc_sims_ss is None else mc_sims_ss).mean()[:lmax_ss_s4 + 1]
        dat_ptsrc = self.parfile.qcls_dd.get_sim_qcl(ks4, -1)[:lmax_ss_s4 + 1]

        # This simple PS implementation accepts only identical filtering on each four legs.
        assert np.all(self.parfile.qcls_dd.qeA.f2map1.ivfs.get_ftl() ==
                      self.parfile.qcls_dd.qeA.f2map2.ivfs.get_ftl())
        assert np.all(self.parfile.qcls_dd.qeB.f2map1.ivfs.get_ftl() ==
                      self.parfile.qcls_dd.qeB.f2map2.ivfs.get_ftl())
        assert np.all(self.parfile.qcls_dd.qeA.f2map1.ivfs.get_ftl() ==
                      self.parfile.qcls_dd.qeB.f2map1.ivfs.get_ftl())
        ftl = self.parfile.qcls_dd.qeA.f2map1.ivfs.get_ftl()
        qc_resp_ptsrc = nhl.get_nhl(ks4,
                                    ks4, {}, {'tt': ftl},
                                    len(ftl) - 1,
                                    len(ftl) - 1,
                                    lmax_out=lmax_ss_s4)[0]**2

        s4_band_norm = 4.0 / np.sum(4.0 *
                                    (twolpo[lmin_ss_s4:lmax_ss_s4 + 1] *
                                     qc_resp_ptsrc[lmin_ss_s4:lmax_ss_s4 + 1]))
        s4_cl_dat = s4_band_norm * twolpo * (dat_ptsrc - 4. * ds_ptsrc +
                                             2. * ss_ptsrc)
        s4_cl_check = s4_band_norm * twolpo * (dd_ptsrc - 2. * ss_ptsrc)
        s4_cl_systs = s4_band_norm * twolpo * (4. * ds_ptsrc - 4. * ss_ptsrc)
        # phi-induced PS estimator N1
        clpp_fid = utils.camb_clfile(
            os.path.join(self.cls_path,
                         'FFP10_wdipole_lenspotentialCls.dat'))['pp']
        s4_cl_clpp_n1 = s4_band_norm * twolpo * self.get_n1(
            k1=ks4, k2=ks4, unnormed=True)[:lmax_ss_s4 + 1]

        s4_cl_clpp_prim = s4_band_norm * twolpo * self.parfile.qresp_dd.get_response(
            ks4, self.ksource)[:lmax_ss_s4 + 1]**2 * clpp_fid[:lmax_ss_s4 + 1]

        s4_band_dat = np.sum((s4_cl_dat - s4_cl_clpp_prim -
                              s4_cl_clpp_n1)[lmin_ss_s4:lmax_ss_s4 + 1])
        s4_band_check = np.sum((s4_cl_check - s4_cl_clpp_prim -
                                s4_cl_clpp_n1)[lmin_ss_s4:lmax_ss_s4 + 1])
        s4_band_syst = np.abs(np.sum(s4_cl_systs[lmin_ss_s4:lmax_ss_s4 + 1]))

        Cs2s2 = (s4_cl_dat - s4_cl_clpp_prim -
                 s4_cl_clpp_n1) * utils.cli(twolpo) / s4_band_norm
        Cs2s2 *= utils.cli(qc_resp_ptsrc[:lmax_ss_s4 + 1])
        # reconstucted PS power (with correct normalization)
        s4_band_sim_stats = []

        for i, idx in utils.enumerate_progress(self.parfile.mc_sims_var):
            ts4_cl = s4_band_norm * twolpo[: lmax_ss_s4 + 1] * \
                     (self.parfile.qcls_dd.get_sim_qcl(ks4, idx)[:lmax_ss_s4 + 1] - 2. * ss_ptsrc)
            s4_band_sim_stats.append(
                np.sum((ts4_cl - s4_cl_clpp_prim -
                        s4_cl_clpp_n1)[lmin_ss_s4:lmax_ss_s4 + 1]))

        print("ptsrc stats:")
        print('   fit range = [' + str(lmin_ss_s4) + ', ' + str(lmax_ss_s4) +
              ']')
        print('   sim avg has amplitude of ' +
              ('%.3g +- %0.3g (stat), discrepant from zero at %.3f sigma.' %
               (s4_band_check, np.std(s4_band_sim_stats) /
                np.sqrt(len(self.parfile.mc_sims_var)),
                s4_band_check / np.std(s4_band_sim_stats) *
                np.sqrt(len(self.parfile.mc_sims_var)))))
        print('   dat has amplitude of ' +
              ('%.3g +- %0.3g (stat), signif of %.3f sigma.' %
               (s4_band_dat, np.std(s4_band_sim_stats),
                s4_band_dat / np.sqrt(np.var(s4_band_sim_stats)))))
        qc_resp =   self.parfile.qresp_dd.get_response(self.k1, self.ksource) \
                  * self.parfile.qresp_dd.get_response(self.k2, self.ksource)
        # PS spectrum response to ks4, using qe.key- source key symmetry of response functions.
        qlss = self.parfile.qresp_dd.get_response(
            ks4, self.k1[0]) * self.parfile.qresp_dd.get_response(
                ks4, self.k2[0])
        # Correction to apply to estimated spectrum :
        pp_cl_ps = s4_band_dat * utils.cli(qc_resp) * qlss
        return s4_band_dat, s4_band_check, s4_band_syst, s4_band_sim_stats, Cs2s2, pp_cl_ps
Ejemplo n.º 16
0
def get_N0_iter(qe_key, nlev_t, nlev_p, beam_fwhm, cls_unl, lmin_ivf, lmax_ivf, itermax, lmax_qlm=None):
    """Iterative lensing-N0 estimate

        Calculates iteratively partially lensed spectra and lensing noise levels.
        This uses the python camb package to get the partially lensed spectra.

        This makes no assumption on response =  1 / noise hence is about twice as slow as it could be in standard cases.

        Args:
            qe_key: QE estimator key
            nlev_t: temperature noise level (in :math:`\mu `K-arcmin)
            nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)
            beam_fwhm: Gaussian beam full width half maximum in arcmin
            cls_unl(dict): unlensed CMB power spectra
            lmin_ivf: minimal CMB multipole used in the QE
            lmax_ivf: maximal CMB multipole used in the QE
            itermax: number of iterations to perform
            lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf`

        Returns
            Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0.

    #FIXME: this is requiring the full camb python package for the lensed spectra calc.

     """

    assert qe_key in ['p_p', 'p', 'ptt'], qe_key
    try:
        from camb.correlations import lensed_cls
    except ImportError:
        assert 0, "could not import camb.correlations.lensed_cls"

    def cls2dls(cls):
        """Turns cls dict. into camb cl array format"""
        keys = ['tt', 'ee', 'bb', 'te']
        lmax = np.max([len(cl) for cl in cls.values()]) - 1
        dls = np.zeros((lmax + 1, 4), dtype=float)
        refac = np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float) / (2. * np.pi)
        for i, k in enumerate(keys):
            cl = cls.get(k, np.zeros(lmax + 1, dtype=float))
            sli = slice(0, min(len(cl), lmax + 1))
            dls[sli, i] = cl[sli] * refac[sli]
        cldd = np.copy(cls.get('pp', None))
        if cldd is not None:
            cldd *= np.arange(len(cldd)) ** 2 * np.arange(1, len(cldd) + 1, dtype=float) ** 2 /  (2. * np.pi)
        return dls, cldd

    def dls2cls(dls):
        """Inverse operation to cls2dls"""
        assert dls.shape[1] == 4
        lmax = dls.shape[0] - 1
        cls = {}
        refac = 2. * np.pi * utils.cli( np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float))
        for i, k in enumerate(['tt', 'ee', 'bb', 'te']):
            cls[k] = dls[:, i] * refac
        return cls
    if lmax_qlm is None:
        lmax_qlm = 2 * lmax_ivf
    lmax_qlm = min(lmax_qlm, 2 * lmax_ivf)
    lmin_ivf = max(lmin_ivf, 1)
    transfi2 = utils.cli(hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf)) ** 2
    llp2 = np.arange(lmax_qlm + 1, dtype=float) ** 2 * np.arange(1, lmax_qlm + 2, dtype=float) ** 2 / (2. * np.pi)
    N0s = []
    N0 = np.inf
    for irr, it in utils.enumerate_progress(range(itermax + 1)):
        dls_unl, cldd = cls2dls(cls_unl)
        clwf = 0. if it == 0 else cldd[:lmax_qlm + 1] * utils.cli(cldd[:lmax_qlm + 1] + llp2 * N0[:lmax_qlm + 1])
        cldd[:lmax_qlm + 1] *= (1. - clwf)
        cls_plen = dls2cls(lensed_cls(dls_unl, cldd))
        cls_ivfs = {}
        if qe_key in ['ptt', 'p_p', 'p']:
            cls_ivfs['tt'] = cls_plen['tt'][:lmax_ivf + 1] + (nlev_t * np.pi / 180. / 60.) ** 2 * transfi2
        if qe_key in ['p_p', 'p']:
            cls_ivfs['ee'] = cls_plen['ee'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2
            cls_ivfs['bb'] = cls_plen['bb'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2
        if qe_key in ['p']:
            cls_ivfs['te'] = np.copy(cls_plen['te'][:lmax_ivf + 1])
        cls_ivfs = utils.cl_inverse(cls_ivfs)
        for cl in cls_ivfs.values():
            cl[:lmin_ivf] *= 0.
        fal = cls_ivfs
        n_gg = get_nhl(qe_key, qe_key, cls_plen, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm)[0]
        r_gg = qresp.get_response(qe_key, lmax_ivf, 'p', cls_plen, cls_plen, fal, lmax_qlm=lmax_qlm)[0]
        N0 = n_gg * utils.cli(r_gg ** 2)
        N0s.append(N0)
    return np.array(N0s)
Ejemplo n.º 17
0
def get_N0_iter(qe_key: str,
                nlev_t: float,
                nlev_p: float,
                beam_fwhm: float,
                cls_unl_fid: dict,
                lmin_ivf,
                lmax_ivf,
                itermax,
                cls_unl_dat=None,
                lmax_qlm=None,
                ret_delcls=False,
                datnoise_cls: dict or None = None,
                unlQE=False,
                version='1'):
    """Iterative lensing-N0 estimate

        Calculates iteratively partially lensed spectra and lensing noise levels.
        This uses the python camb package to get the partially lensed spectra.

        This makes no assumption on response =  1 / noise hence is about twice as slow as it could be in standard cases.

        Args:
            qe_key: QE estimator key
            nlev_t: temperature noise level (in :math:`\mu `K-arcmin)
            nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)
            beam_fwhm: Gaussian beam full width half maximum in arcmin
            cls_unl_fid(dict): unlensed CMB power spectra
            lmin_ivf: minimal CMB multipole used in the QE
            lmax_ivf: maximal CMB multipole used in the QE
            itermax: number of iterations to perform
            lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf`
            ret_delcls(optional): returns the partially delensed CMB cls as well if set
            datnoise_cls(optional): feeds in custom noise spectra to the data. The nlevs and beam only apply to the filtering in this case

        Returns
            Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0.


        Note: This assumes the unlensed spectra are known

    #FIXME: this is requiring the full camb python package for the lensed spectra calc.

     """
    assert qe_key in ['p_p', 'p', 'ptt'], qe_key
    try:
        from camb.correlations import lensed_cls
    except ImportError:
        assert 0, "could not import camb.correlations.lensed_cls"

    if lmax_qlm is None:
        lmax_qlm = 2 * lmax_ivf
    lmax_qlm = min(lmax_qlm, 2 * lmax_ivf)
    lmin_ivf = max(lmin_ivf, 1)
    transfi2 = utils.cli(
        hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf))**2
    llp2 = np.arange(lmax_qlm + 1, dtype=float)**2 * np.arange(
        1, lmax_qlm + 2, dtype=float)**2 / (2. * np.pi)
    if datnoise_cls is None:
        datnoise_cls = dict()
        if qe_key in ['ptt', 'p']:
            datnoise_cls['tt'] = (nlev_t * np.pi / 180. / 60.)**2 * transfi2
        if qe_key in ['p_p', 'p']:
            datnoise_cls['ee'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
            datnoise_cls['bb'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
    N0s_biased = []
    N0s_unbiased = []
    N1s_biased = []
    N1s_unbiased = []
    delcls_fid = []
    delcls_true = []

    N0_unbiased = np.inf
    N1_unbiased = np.inf
    dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid)
    cls_len_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
    if cls_unl_dat is None:
        cls_unl_dat = cls_unl_fid
        cls_len_true = cls_len_fid
    else:
        dls_unl_true, cldd_true = cls2dls(cls_unl_dat)
        cls_len_true = dls2cls(lensed_cls(dls_unl_true, cldd_true))
    cls_plen_true = cls_len_true
    for irr, it in utils.enumerate_progress(range(itermax + 1)):
        dls_unl_true, cldd_true = cls2dls(cls_unl_dat)
        dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid)
        if it == 0:
            rho_sqd_phi = 0.
        else:
            # The cross-correlation coefficient is identical for the Rfid-biased QE or the rescaled one
            rho_sqd_phi = np.zeros(len(cldd_true))
            rho_sqd_phi[:lmax_qlm + 1] = cldd_true[:lmax_qlm + 1] * utils.cli(
                cldd_true[:lmax_qlm + 1] + llp2 *
                (N0_unbiased[:lmax_qlm + 1] + N1_unbiased[:lmax_qlm + 1]))

        if 'wE' in version:
            assert qe_key in ['p_p']
            if it == 0:
                print('including imperfect knowledge of E in iterations')
            slic = slice(lmin_ivf, lmax_ivf + 1)
            rho_sqd_E = np.zeros(len(dls_unl_true[:, 1]))
            rho_sqd_E[slic] = cls_unl_dat['ee'][slic] * utils.cli(
                cls_plen_true['ee'][slic] + datnoise_cls['ee'][slic])
            dls_unl_fid[:, 1] *= rho_sqd_E
            dls_unl_true[:, 1] *= rho_sqd_E
            cldd_fid *= rho_sqd_phi
            cldd_true *= rho_sqd_phi

            cls_plen_fid_resolved = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
            cls_plen_true_resolved = dls2cls(
                lensed_cls(dls_unl_true, cldd_true))
            cls_plen_fid = {
                ck: cls_len_fid[ck] - (cls_plen_fid_resolved[ck] -
                                       cls_unl_fid[ck][:len(cls_len_fid[ck])])
                for ck in cls_len_fid.keys()
            }
            cls_plen_true = {
                ck:
                cls_len_true[ck] - (cls_plen_true_resolved[ck] -
                                    cls_unl_dat[ck][:len(cls_len_true[ck])])
                for ck in cls_len_true.keys()
            }

        else:
            cldd_true *= (1. - rho_sqd_phi)  # The true residual lensing spec.
            cldd_fid *= (1. - rho_sqd_phi
                         )  # What I think the residual lensing spec is
            cls_plen_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
            cls_plen_true = dls2cls(lensed_cls(dls_unl_true, cldd_true))

        cls_filt = cls_plen_fid if not unlQE else cls_unl_fid
        cls_w = cls_plen_fid if not unlQE else cls_unl_fid
        cls_f = cls_plen_true
        fal = {}
        dat_delcls = {}
        if qe_key in ['ptt', 'p']:
            fal['tt'] = cls_filt['tt'][:lmax_ivf + 1] + (
                nlev_t * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['tt'] = cls_plen_true['tt'][:lmax_ivf +
                                                   1] + datnoise_cls['tt']
        if qe_key in ['p_p', 'p']:
            fal['ee'] = cls_filt['ee'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            fal['bb'] = cls_filt['bb'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['ee'] = cls_plen_true['ee'][:lmax_ivf +
                                                   1] + datnoise_cls['ee']
            dat_delcls['bb'] = cls_plen_true['bb'][:lmax_ivf +
                                                   1] + datnoise_cls['bb']
        if qe_key in ['p']:
            fal['te'] = np.copy(cls_filt['te'][:lmax_ivf + 1])
            dat_delcls['te'] = np.copy(cls_plen_true['te'][:lmax_ivf + 1])
        fal = utils.cl_inverse(fal)
        for cl in fal.values():
            cl[:lmin_ivf] *= 0.
        for cl in dat_delcls.values():
            cl[:lmin_ivf] *= 0.
        cls_ivfs_arr = utils.cls_dot([fal, dat_delcls, fal])
        cls_ivfs = dict()
        for i, a in enumerate(['t', 'e', 'b']):
            for j, b in enumerate(['t', 'e', 'b'][i:]):
                if np.any(cls_ivfs_arr[i, j + i]):
                    cls_ivfs[a + b] = cls_ivfs_arr[i, j + i]

        n_gg = get_nhl(qe_key,
                       qe_key,
                       cls_w,
                       cls_ivfs,
                       lmax_ivf,
                       lmax_ivf,
                       lmax_out=lmax_qlm)[0]
        r_gg_true = qresp.get_response(qe_key,
                                       lmax_ivf,
                                       'p',
                                       cls_w,
                                       cls_f,
                                       fal,
                                       lmax_qlm=lmax_qlm)[0]
        r_gg_fid = qresp.get_response(
            qe_key, lmax_ivf, 'p', cls_w, cls_w, fal,
            lmax_qlm=lmax_qlm)[0] if cls_f is not cls_w else r_gg_true
        N0_biased = n_gg * utils.cli(
            r_gg_fid**
            2)  # N0 of possibly biased (by Rtrue / Rfid) QE estimator
        N0_unbiased = n_gg * utils.cli(
            r_gg_true**2
        )  # N0 of QE estimator after rescaling by Rfid / Rtrue to make it unbiased
        N0s_biased.append(N0_biased)
        N0s_unbiased.append(N0_unbiased)
        cls_plen_true['pp'] = cldd_true * utils.cli(
            np.arange(len(cldd_true))**2 *
            np.arange(1, len(cldd_true) + 1, dtype=float)**2 / (2. * np.pi))
        cls_plen_fid['pp'] = cldd_fid * utils.cli(
            np.arange(len(cldd_fid))**2 *
            np.arange(1, len(cldd_fid) + 1, dtype=float)**2 / (2. * np.pi))

        if 'wN1' in version:
            if it == 0: print('Adding n1 in iterations')
            from lensitbiases import n1_fft
            from scipy.interpolate import UnivariateSpline as spl
            lib = n1_fft.n1_fft(fal,
                                cls_w,
                                cls_f,
                                np.copy(cls_plen_true['pp']),
                                lminbox=50,
                                lmaxbox=5000,
                                k2l=None)
            n1_Ls = np.arange(50, lmax_qlm + 1, 50)
            if lmax_qlm not in n1_Ls: n1_Ls = np.append(n1_Ls, lmax_qlm)
            n1 = np.array(
                [lib.get_n1(qe_key, L, do_n1mat=False) for L in n1_Ls])
            N1_biased = spl(n1_Ls,
                            n1_Ls**2 * (n1_Ls * 1. + 1)**2 * n1 /
                            r_gg_fid[n1_Ls]**2,
                            k=2,
                            s=0,
                            ext='zeros')(np.arange(len(N0_unbiased)))
            N1_biased *= utils.cli(
                np.arange(lmax_qlm + 1)**2 *
                np.arange(1, lmax_qlm + 2, dtype=float)**2)
            N1_unbiased = N1_biased * (r_gg_fid * utils.cli(r_gg_true))**2
        else:
            N1_biased = np.zeros(lmax_qlm + 1, dtype=float)
            N1_unbiased = np.zeros(lmax_qlm + 1, dtype=float)

        delcls_fid.append(cls_plen_fid)
        delcls_true.append(cls_plen_true)

        N1s_biased.append(N1_biased)
        N1s_unbiased.append(N1_unbiased)

    return (np.array(N0s_biased),
            np.array(N0s_unbiased)) if not ret_delcls else (
                (np.array(N0s_biased), np.array(N0s_unbiased), delcls_fid,
                 delcls_true))