示例#1
0
    def _calc_febl(self):
        assert not 'eb' in self.chain.s_cls.keys()

        if len(self.chain.n_inv_filt.n_inv) == 1:
            ninv = self.chain.n_inv_filt.n_inv[0]
            npix = len(ninv)
            NlevP_uKamin = np.sqrt(4. * np.pi / npix / np.sum(ninv) * len(
                np.where(ninv != 0.0)[0])) * 180. * 60. / np.pi
        else:
            assert len(self.chain.n_inv_filt.n_inv) == 3
            ninv = self.chain.n_inv_filt.n_inv
            NlevP_uKamin = 0.5 * np.sqrt(
                4. * np.pi / len(ninv[0]) / np.sum(ninv[0]) *
                len(np.where(ninv[0] != 0.0)[0])) * 180. * 60. / np.pi
            NlevP_uKamin += 0.5 * np.sqrt(
                4. * np.pi / len(ninv[2]) / np.sum(ninv[2]) *
                len(np.where(ninv[2] != 0.0)[0])) * 180. * 60. / np.pi

        print("cinv_p::noiseP_uk_arcmin = %.3f" % NlevP_uKamin)

        s_cls = self.chain.s_cls
        b_transf = self.chain.n_inv_filt.b_transf
        fel = utils.cli(s_cls['ee'][:self.lmax + 1] +
                        (NlevP_uKamin * np.pi / 180. / 60.)**2 /
                        b_transf[0:self.lmax + 1]**2)
        fbl = utils.cli(s_cls['bb'][:self.lmax + 1] +
                        (NlevP_uKamin * np.pi / 180. / 60.)**2 /
                        b_transf[0:self.lmax + 1]**2)

        fel[0:2] *= 0.0
        fbl[0:2] *= 0.0

        return fel, fbl
示例#2
0
 def _apply_ivf_p(self, pmap, soltn=None):
     assert len(pmap[0]) == hp.nside2npix(self.nside) and len(
         pmap[0]) == len(pmap[1])
     elm, blm = hp.map2alm_spin([m for m in pmap], 2, lmax=self.lmax_fl)
     elm = hp.almxfl(
         elm,
         self.get_fel() * utils.cli(self.transf[:len(self.fel)]))
     blm = hp.almxfl(
         blm,
         self.get_fbl() * utils.cli(self.transf[:len(self.fbl)]))
     return elm, blm
示例#3
0
    def get_bmmc(self, mc_sims_dd=None, mc_sims_ss=None):
        """Binned multiplicative MC correction.

            This compares the reconstruction on the simulations to the FFP10 input lensing spectrum.

        """
        assert self.k1[0] == 'p' and self.k2[
            0] == 'p' and self.ksource == 'p', (self.k1, self.k2, self.ksource)
        if mc_sims_dd is None: mc_sims_dd = self.parfile.mc_sims_var
        if mc_sims_ss is None: mc_sims_ss = self.parfile.mc_sims_var
        dd = self.parfile.qcls_dd.get_sim_stats_qcl(self.k1,
                                                    mc_sims_dd,
                                                    k2=self.k2).mean()
        ss = self.parfile.qcls_ss.get_sim_stats_qcl(self.k1,
                                                    mc_sims_ss,
                                                    k2=self.k2).mean()
        cl_pred = utils.camb_clfile(
            os.path.join(self.cls_path,
                         'FFP10_wdipole_lenspotentialCls.dat'))['pp']
        qc_resp = self.parfile.qresp_dd.get_response(
            self.k1, self.ksource) * self.parfile.qresp_dd.get_response(
                self.k2, self.ksource)
        bps = self._get_binnedcl(
            utils.cli(qc_resp) *
            (dd - 2 * ss) - cl_pred[:len(dd)]) - self.get_n1()
        return 1. / (1 + bps / self.fid_bandpowers)
示例#4
0
 def _apply_ivf_t(self, tmap, soltn=None):
     assert len(tmap) == hp.nside2npix(self.nside), (hp.npix2nside(
         tmap.size), self.nside)
     alm = hp.map2alm(tmap * self.get_fmask(), lmax=self.lmax_fl, iter=0)
     return hp.almxfl(
         alm,
         self.get_ftl() * utils.cli(self.transf[:len(self.ftl)]))
示例#5
0
    def get_sim_qlm(self, k, idx, lmax=None):
        """Returns a QE estimate, by computing and caching it if not done previously.

            Args:
                k: quadratic estimator key
                idx: simulation index
                lmax: optionally reduces the lmax of the output healpy array.

        """
        assert k in self.keys, (k, self.keys)
        if lmax is None:
            lmax = self.get_lmax_qlm(k)
        assert lmax <= self.get_lmax_qlm(k)
        if k in ['p_tp', 'x_tp', 'f_tp', 's_tp']:
            return self.get_sim_qlm('%stt' % k[0], idx,
                                    lmax=lmax) + self.get_sim_qlm(
                                        '%s_p' % k[0], idx, lmax=lmax)
        if k in ['p_te', 'p_tb', 'p_eb', 'x_te', 'x_tb', 'x_eb']:
            return self.get_sim_qlm(k[0] + k[2] + k[3], idx,
                                    lmax=lmax) + self.get_sim_qlm(
                                        k[0] + k[3] + k[2], idx, lmax=lmax)

        if '_bh_' in k:  # Bias-hardening
            assert self.resplib is not None, 'resplib arg necessary for this'
            kQE, ksource = k.split('_bh_')
            assert len(ksource) == 1 and ksource + kQE[1:] in self.keys, (
                ksource, kQE)
            assert self.get_lmax_qlm(kQE) == self.get_lmax_qlm(
                ksource + kQE[1:]), 'fix this (easy)'
            lmax = self.get_lmax_qlm(kQE)
            wL = self.resplib.get_response(kQE, ksource) * ut.cli(
                self.resplib.get_response(ksource + kQE[1:], ksource))
            ret = self.get_sim_qlm(kQE, idx, lmax=lmax)
            return ret - hp.almxfl(
                self.get_sim_qlm(ksource + kQE[1:], idx, lmax=lmax), wL)

        assert k in self.keys_fund, (k, self.keys_fund)
        fname = os.path.join(
            self.lib_dir,
            'sim_%s_%04d.fits' % (k, idx) if idx != -1 else 'dat_%s.fits' % k)
        if not os.path.exists(fname):
            if k in ['ptt', 'xtt']: self._build_sim_Tgclm(idx)
            elif k in ['p_p', 'x_p']: self._build_sim_Pgclm(idx)
            elif k in ['p', 'x']: self._build_sim_MVgclm(idx)
            elif k in ['f']: self._build_sim_f(idx)
            elif k in ['stt']: self._build_sim_stt(idx)
            elif k in ['ftt']: self._build_sim_ftt(idx)
            elif k in ['f_p']: self._build_sim_f_p(idx)
            elif k in ['ntt']: self._build_sim_ntt(idx)
            elif k in ['a_p']: self._build_sim_a_p(idx)
            elif k in [
                    'ptt', 'pte', 'pet', 'ptb', 'pbt', 'pee', 'peb', 'pbe',
                    'pbb', 'xtt', 'xte', 'xet', 'xtb', 'xbt', 'xee', 'xeb',
                    'xbe', 'xbb'
            ]:
                self._build_sim_xfiltMVgclm(idx, k)
            else:
                assert 0, k

        return ut.alm_copy(hp.read_alm(fname), lmax=lmax)
示例#6
0
    def get_bamc(self):
        """Binned additive MC correction, with crude error bars.

            This compares the reconstruction on the simulations to the FFP10 input lensing spectrum.

            Note:
                the approximate error corrections to the additive MC correction variance follows Appendix C of
                https://arxiv.org/abs/1807.06210, check this for more details on its validity.

        """
        assert self.k1[0] == 'p' and self.k2[
            0] == 'p' and self.ksource == 'p', (self.k1, self.k2, self.ksource)
        ss2 = 2 * self.parfile.qcls_ss.get_sim_stats_qcl(
            self.k1, self.parfile.mc_sims_var, k2=self.k2).mean()
        cl_pred = utils.camb_clfile(
            os.path.join(
                self.cls_path,
                'FFP10_wdipole_lenspotentialCls.dat'))['pp'][:len(ss2)]
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        bp_stats = utils.stats(self.nbins)
        bp_n1 = self.get_n1()
        for i, idx in utils.enumerate_progress(self.parfile.mc_sims_var,
                                               label='collecting BP stats'):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            bp_stats.add(
                self._get_binnedcl(qc_norm * (dd - ss2) - cl_pred) - bp_n1)
        NMF = len(self.parfile.qcls_dd.mc_sims_mf)
        NB = len(self.parfile.mc_sims_var)
        return bp_stats.mean(), bp_stats.sigmas_on_mean() * np.sqrt(
            (1. + 1. + 2. / NMF + 2 * NB / (float(NMF * NMF))))
示例#7
0
文件: nhl.py 项目: carronj/plancklens
 def _get_qe_derived(self, k):
     if '_bh_' in k:
         kQE, ksource = k.split('_bh_')
         assert len(ksource) == 1.
         wL = self.resplib.get_response(kQE, ksource) * utils.cli(self.resplib.get_response(ksource + kQE[1:], ksource))
         return [(kQE, 1.), (ksource + kQE[1:], -wL)]
     else:
         return [(k, 1.)]
示例#8
0
def get_fal(a, cl_len, nlev, transf, lmin, lmax):
    """Simple diagonal isotropic filter 

    """
    fal = utils.cli(
        cl_len.get(a + a)[:lmax + 1] +
        (nlev / 60. / 180. * np.pi)**2 / transf[:lmax + 1]**2)
    fal[:lmin] *= 0.
    return fal
示例#9
0
文件: nhl.py 项目: carronj/plancklens
 def dls2cls(dls):
     """Inverse operation to cls2dls"""
     assert dls.shape[1] == 4
     lmax = dls.shape[0] - 1
     cls = {}
     refac = 2. * np.pi * utils.cli( np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float))
     for i, k in enumerate(['tt', 'ee', 'bb', 'te']):
         cls[k] = dls[:, i] * refac
     return cls
示例#10
0
    def get_dat_bandpowers(self):
        """Returns data raw band-powers, prior to any biases subtraction or correction.

        """
        qc_resp = self.parfile.qresp_dd.get_response(
            self.k1, self.ksource) * self.parfile.qresp_dd.get_response(
                self.k2, self.ksource)
        return self._get_binnedcl(
            utils.cli(qc_resp) *
            self.parfile.qcls_dd.get_sim_qcl(self.k1, -1, k2=self.k2))
示例#11
0
    def get_mcn0(self):
        """Returns Monte-Carlo N0 lensing bias.

        """
        ss = self.parfile.qcls_ss.get_sim_stats_qcl(self.k1,
                                                    self.parfile.mc_sims_var,
                                                    k2=self.k2).mean()
        qc_resp = self.parfile.qresp_dd.get_response(
            self.k1, self.ksource) * self.parfile.qresp_dd.get_response(
                self.k2, self.ksource)
        return self._get_binnedcl(utils.cli(qc_resp) * (2. * ss))
示例#12
0
    def _calc_ftl(self):
        ninv = self.chain.n_inv_filt.n_inv
        npix = len(ninv[:])
        NlevT_uKamin = np.sqrt(4. * np.pi / npix / np.sum(ninv) * len(
            np.where(ninv != 0.0)[0])) * 180. * 60. / np.pi
        print("cinv_t::noiseT_uk_arcmin = %.3f" % NlevT_uKamin)

        s_cls = self.chain.s_cls
        b_transf = self.chain.n_inv_filt.b_transf

        if s_cls['tt'][0] == 0.: assert self.chain.n_inv_filt.marge_monopole
        if s_cls['tt'][1] == 0.: assert self.chain.n_inv_filt.marge_dipole

        ftl = utils.cli(s_cls['tt'][0:self.lmax + 1] +
                        (NlevT_uKamin * np.pi / 180. / 60.)**2 *
                        utils.cli(b_transf[0:self.lmax + 1]**2))
        if self.chain.n_inv_filt.marge_monopole: ftl[0] = 0.0
        if self.chain.n_inv_filt.marge_dipole: ftl[1] = 0.0

        return ftl
示例#13
0
    def get_dat_nhl(self):
        """Returns N0 lensing bias, semi-analytical version.

            This is not highly accurate on the cut-sky

        """
        qc_resp = self.parfile.qresp_dd.get_response(
            self.k1, self.ksource) * self.parfile.qresp_dd.get_response(
                self.k2, self.ksource)
        return self._get_binnedcl(
            utils.cli(qc_resp) *
            self.parfile.nhl_dd.get_sim_nhl(-1, self.k1, self.k2))
示例#14
0
    def get_mcn0_cov(self, mc_sims_dd=None):
        """Covariance matrix obtained from the realization-independent debiaser.

        """
        if mc_sims_dd is None: mc_sims_dd = self.parfile.mc_sims_var
        mcn0_cov = utils.stats(self.nbins)
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims_dd):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            mcn0_cov.add(self._get_binnedcl(qc_norm * dd))
        return mcn0_cov.cov()
示例#15
0
    def get_response(self, k, ksource, recache=False):
        """
            Args:
                k: QE anisotropy key
                ksource: CMB anisotropy source key

            Returns:
                Response array

        """
        if '_bh_' in k:  # bias-hardened estimator
            kQE, bhksource = k.split('_bh_')
            assert len(ksource) == 1, (kQE, ksource)
            wL = self.get_response(kQE, bhksource, recache=recache)
            wL *= ut.cli(
                self.get_response(bhksource + kQE[1:],
                                  bhksource,
                                  recache=recache))
            ret = self.get_response(kQE, ksource, recache=recache)
            ret -= wL * self.get_response(
                bhksource + kQE[1:], ksource, recache=recache)
            return ret
        s, GorC, sins, ksp = qe_spin_data(k)
        assert s >= 0, s
        if s == 0: assert GorC == 'G', (s, GorC)
        fn = 'qe_' + ksp + k[1:] + '_source_%s_' % ksource + GorC + GorC
        if self.npdb.get(fn) is None or recache:
            GG, CC, GC, CG = get_response(k,
                                          self.lmax_qe,
                                          ksource,
                                          self.cls_weight,
                                          self.cls_cmb,
                                          self.fal,
                                          lmax_qlm=self.lmax_qlm)
            if np.any(CG) or np.any(GC):
                print(
                    "Warning: C-G or G-C responses non-zero but not returned")
                # This may happen only if EB and/or TB are relevant and/or strange estimator mix.

            if recache and self.npdb.get(fn) is not None:
                self.npdb.remove('qe_' + ksp + k[1:] + '_source_%s' % ksource +
                                 '_GG')
                if s > 0:
                    self.npdb.remove('qe_' + ksp + k[1:] +
                                     '_source_%s' % ksource + '_CC')
            self.npdb.add('qe_' + ksp + k[1:] + '_source_%s' % ksource + '_GG',
                          GG)
            if s > 0:
                self.npdb.add(
                    'qe_' + ksp + k[1:] + '_source_%s' % ksource + '_CC', CC)
        return self.npdb.get(fn)
示例#16
0
    def get_n1(self, k1=None, k2=None, unnormed=False):
        """Returns analytical N1 lensing bias.

            This uses the analyical approximation to the QE pair filtering as input.

        """
        k1 = self.k1 if k1 is None else k1
        k2 = self.k2 if k2 is None else k2
        assert k1 == k2, 'check signs for qe' 's of different spins'
        assert self.ksource[0] == 'p', 'check aniso source spectrum'
        # This implementation accepts 2 different qes but pairwise identical filtering on each qe leg.
        assert np.all(self.parfile.qcls_dd.qeA.f2map1.ivfs.get_ftl() ==
                      self.parfile.qcls_dd.qeA.f2map2.ivfs.get_ftl())
        assert np.all(self.parfile.qcls_dd.qeA.f2map1.ivfs.get_fel() ==
                      self.parfile.qcls_dd.qeA.f2map2.ivfs.get_fel())
        assert np.all(self.parfile.qcls_dd.qeA.f2map1.ivfs.get_fbl() ==
                      self.parfile.qcls_dd.qeA.f2map2.ivfs.get_fbl())
        assert np.all(self.parfile.qcls_dd.qeB.f2map1.ivfs.get_ftl() ==
                      self.parfile.qcls_dd.qeB.f2map2.ivfs.get_ftl())
        assert np.all(self.parfile.qcls_dd.qeB.f2map1.ivfs.get_fel() ==
                      self.parfile.qcls_dd.qeB.f2map2.ivfs.get_fel())
        assert np.all(self.parfile.qcls_dd.qeB.f2map1.ivfs.get_fbl() ==
                      self.parfile.qcls_dd.qeB.f2map2.ivfs.get_fbl())

        ivfsA = self.parfile.qcls_dd.qeA.f2map1.ivfs
        ivfsB = self.parfile.qcls_dd.qeB.f2map1.ivfs
        ftlA = ivfsA.get_ftl()
        felA = ivfsA.get_fel()
        fblA = ivfsA.get_fbl()
        ftlB = ivfsB.get_ftl()
        felB = ivfsB.get_fel()
        fblB = ivfsB.get_fbl()
        clpp_fid = utils.camb_clfile(
            os.path.join(self.cls_path,
                         'FFP10_wdipole_lenspotentialCls.dat'))['pp']
        qc_resp = self.parfile.qresp_dd.get_response(
            k1, self.ksource) * self.parfile.qresp_dd.get_response(
                k2, self.ksource)
        n1pp = self.parfile.n1_dd.get_n1(k1,
                                         self.ksource,
                                         clpp_fid,
                                         ftlA,
                                         felA,
                                         fblA,
                                         len(qc_resp) - 1,
                                         kB=k2,
                                         ftlB=ftlB,
                                         felB=felB,
                                         fblB=fblB)
        return self._get_binnedcl(utils.cli(qc_resp) *
                                  n1pp) if not unnormed else n1pp
示例#17
0
    def get_rdn0(self):
        """Returns realization-dependent N0 lensing bias RDN0.

        """
        ds = self.parfile.qcls_ds.get_sim_stats_qcl(self.k1,
                                                    self.parfile.mc_sims_var,
                                                    k2=self.k2).mean()
        ss = self.parfile.qcls_ss.get_sim_stats_qcl(self.k1,
                                                    self.parfile.mc_sims_var,
                                                    k2=self.k2).mean()
        qc_resp = self.parfile.qresp_dd.get_response(
            self.k1, self.ksource) * self.parfile.qresp_dd.get_response(
                self.k2, self.ksource)
        return self._get_binnedcl(utils.cli(qc_resp) * (4 * ds - 2. * ss))
示例#18
0
    def get_nhl_cov(self, mc_sims_dd=None):
        """Covariance matrix obtained from the semi-analytical N0 debiaser.

        """
        if mc_sims_dd is None: mc_sims_dd = self.parfile.mc_sims_var
        nhl_cov = utils.stats(self.nbins)
        qc_norm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource) *
            self.parfile.qresp_dd.get_response(self.k2, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims_dd):
            dd = self.parfile.qcls_dd.get_sim_qcl(self.k1, idx, k2=self.k2)
            nhl_cov.add(
                self._get_binnedcl(qc_norm *
                                   (dd - self.parfile.nhl_dd.get_sim_nhl(
                                       int(idx), self.k1, self.k2))))
        return nhl_cov.cov()
示例#19
0
    def get_sim_qlm_mf(self, k, mc_sims, lmax=None):
        """Returns a QE mean-field estimate, by averaging QE estimates from a set simulations (caches the result).

            Args:
                k: quadratic estimator key
                mc_sims: simulation indices to use for the estimate.
                lmax: optionally reduces the lmax of the output healpy array.

        """
        if lmax is None:
            lmax = self.get_lmax_qlm(k)
        assert lmax <= self.get_lmax_qlm(k)
        if k in ['p_tp', 'x_tp']:
            return (self.get_sim_qlm_mf('%stt' % k[0], mc_sims, lmax=lmax) +
                    self.get_sim_qlm_mf('%s_p' % k[0], mc_sims, lmax=lmax))
        if k in ['p_te', 'p_tb', 'p_eb', 'x_te', 'x_tb', 'x_eb']:
            return  self.get_sim_qlm_mf(k[0] + k[2] + k[3], mc_sims, lmax=lmax)  \
                    + self.get_sim_qlm_mf(k[0] + k[3] + k[2], mc_sims, lmax=lmax)
        if '_bh_' in k:  # Bias-hardening
            assert self.resplib is not None, 'resplib arg necessary for this'
            kQE, ksource = k.split('_bh_')
            assert len(ksource) == 1 and ksource + kQE[1:] in self.keys, (
                ksource, kQE)
            assert self.get_lmax_qlm(kQE) == self.get_lmax_qlm(
                ksource + kQE[1:]), 'fix this (easy)'
            lmax = self.get_lmax_qlm(kQE)
            wL = self.resplib.get_response(kQE, ksource) * ut.cli(
                self.resplib.get_response(ksource + kQE[1:], ksource))
            ret = self.get_sim_qlm_mf(kQE, mc_sims, lmax=lmax)
            return ret - hp.almxfl(
                self.get_sim_qlm_mf(ksource + kQE[1:], mc_sims, lmax=lmax), wL)

        assert k in self.keys_fund, (k, self.keys_fund)
        fname = os.path.join(self.lib_dir,
                             'simMF_k1%s_%s.fits' % (k, ut.mchash(mc_sims)))
        if not os.path.exists(fname):
            this_mcs = np.unique(mc_sims)
            MF = np.zeros(hp.Alm.getsize(lmax), dtype=complex)
            if len(this_mcs) == 0: return MF
            for i, idx in ut.enumerate_progress(this_mcs,
                                                label='calculating %s MF' % k):
                MF += self.get_sim_qlm(k, idx, lmax=lmax)
            MF /= len(this_mcs)
            _write_alm(fname, MF)
            print("Cached ", fname)
        return ut.alm_copy(hp.read_alm(fname), lmax=lmax)
示例#20
0
    def get_ampl_x_input(self, mc_sims=None):
        """Returns cross-correlation of phi-maps to input lensing maps.

            Uses qlms_x_i library of parfile

        """
        qlmi = self.parfile.qlms_x_in
        if mc_sims is None:
            mc_sims = np.unique(
                np.concatenate(
                    [self.parfile.mc_sims_var, self.parfile.mc_sims_bias]))
        xin = utils.stats(self.nbins)
        qnorm = utils.cli(
            self.parfile.qresp_dd.get_response(self.k1, self.ksource))
        for i, idx in utils.enumerate_progress(mc_sims):
            qi = qlmi.get_sim_qcl(self.k1, idx)
            xin.add(self._get_binnedcl(qnorm * qi) / self.fid_bandpowers)
        return xin
示例#21
0
def get_response(qe_key, lmax_ivf, source, cls_weight, cls_cmb, fal, fal_leg2=None, lmax_ivf2=None, lmax_qlm=None):
    r"""QE response calculation

        Args:
            qe_key: Quadratic estimator key (see this module docstring for descriptions)
            lmax_ivf: max. CMB multipole used in the QE
            source: anisotropy source key
            cls_weight(dict): fiducial spectra entering the QE weights (numerator in Eq. 2 of https://arxiv.org/abs/1807.06210)
            cls_cmb(dict): CMB spectra entering the CMB response (in principle lensed spectra, or grad-lensed spectra)
            fal(dict): (isotropic approximation to the) filtering cls. e.g. fal['tt'] :math:`= \frac {1} {C^{\rm TT}_\ell  +  N^{\rm TT}_\ell / b^2_\ell}` for temperature if filtered independently from polarization.
            fal_leg2(dict): Same as *fal* but for the second leg, if different.
            lmax_ivf2(optional): max. CMB multipole used in the QE on the second leg (if different to lmax_ivf)
            lmax_qlm(optional): responses are calculated up to this multipole. Defaults to lmax_ivf + lmax_ivf2

        Note:
            The result is *not* symmetrized with respect to the 'fals', if not the same on the two legs.
            In this case you probably want to run this twice swapping the fals in the second run.

    """
    if lmax_ivf2 is None: lmax_ivf2 = lmax_ivf
    if lmax_qlm is None : lmax_qlm = lmax_ivf + lmax_ivf2
    if '_bh_' in qe_key: # Bias-hardened estimators:
        k, hsource = qe_key.split('_bh_')# kQE hardened against hsource
        assert len(hsource) == 1, hsource
        h = hsource[0]
        RGG_ks, RCC_ks, RGC_ks, RCG_ks = get_response(k, lmax_ivf, source, cls_weight, cls_cmb, fal,
                                                    fal_leg2=fal_leg2, lmax_ivf2=lmax_ivf2, lmax_qlm=lmax_qlm)
        RGG_hs, RCC_hs, RGC_hs, RCG_hs = get_response(h + k[1:], lmax_ivf, source, cls_weight, cls_cmb, fal,
                                                    fal_leg2=fal_leg2, lmax_ivf2=lmax_ivf2, lmax_qlm=lmax_qlm)
        RGG_kh, RCC_kh, RGC_kh, RCG_kh = get_response(k, lmax_ivf, h, cls_weight, cls_cmb, fal,
                                                    fal_leg2=fal_leg2, lmax_ivf2=lmax_ivf2, lmax_qlm=lmax_qlm)
        RGG_hh, RCC_hh, RGC_hh, RCG_hh = get_response(h + k[1:], lmax_ivf, h, cls_weight, cls_cmb, fal,
                                                    fal_leg2=fal_leg2, lmax_ivf2=lmax_ivf2, lmax_qlm=lmax_qlm)
        RGG = RGG_ks - (RGG_kh * RGG_hs  * ut.cli(RGG_hh) + RGC_kh * RCG_hs  * ut.cli(RCC_hh))
        RCC = RCC_ks - (RCG_kh * RGC_hs  * ut.cli(RGG_hh) + RCC_kh * RCC_hs  * ut.cli(RCC_hh))
        RGC = RGC_ks - (RGG_kh * RGC_hs  * ut.cli(RGG_hh) + RGC_kh * RCC_hs  * ut.cli(RCC_hh))
        RCG = RCG_ks - (RCG_kh * RGG_hs  * ut.cli(RGG_hh) + RCC_kh * RCG_hs  * ut.cli(RCC_hh))
        return RGG, RCC, RGC, RCG

    qes = get_qes(qe_key, lmax_ivf, cls_weight, lmax2=lmax_ivf2)
    return _get_response(qes, source, cls_cmb, fal, lmax_qlm, fal_leg2=fal_leg2)
示例#22
0
def mk_patches(Np, pix_ivmap, rvmap_uKamin_data=None, ret_masks=False):
    """Splits the variance maps into equal-area regions with different noise levels

        Args:
            Np: desired number of patches
            pix_ivmap: input inverse pixel variance map used for the filtering
            rvmap_uKamin_data: root variance map in uK amin of the data (if different from pix_ivmap)
            ret_masks: returns the defined series of masks if set


    """

    mask = _read_map(pix_ivmap) > 0
    npix = mask.size
    nside = hp.npix2nside(npix)
    vmap = utils.cli(np.sqrt(_read_map(pix_ivmap))) * np.sqrt(
        hp.nside2pixarea(nside)) / np.pi * 60 * 180.
    edges = np.percentile(vmap[np.where(mask)], np.linspace(0, 100, Np + 1))
    edges[0] = -1.
    edges[-1] = 10000
    nlevs = []  # from filtering variance map
    nlevs_data = []  # from data variance map
    fskies = []
    masks = []
    for i in range(1, Np + 1):
        this_mask = (vmap > edges[i - 1]) & (vmap <= edges[i])
        nlevs.append(np.mean(vmap[mask * this_mask]))
        fskies.append(np.mean(mask * this_mask))
        if rvmap_uKamin_data is not None:
            nlevs_data.append(
                np.mean(_read_map(rvmap_uKamin_data[mask * this_mask])))
        if ret_masks:
            masks.append(this_mask * mask)
    if rvmap_uKamin_data is None:
        nlevs_data = nlevs
    nlev_fid = np.sqrt(4. * np.pi / npix / np.sum(_read_map(pix_ivmap)) *
                       np.sum(mask)) * 180. * 60. / np.pi
    for nf, nd in zip(nlevs, nlevs_data):
        print('%.2f (ftl)   %.2f (dat) uKamin' % (nf, nd))
    print('%.2f (fid)' % nlev_fid)
    return nlevs, nlevs_data, nlev_fid, fskies, masks
示例#23
0
文件: n1.py 项目: markm42/plancklens
    def get_n1(self, kA, k_ind, cl_kind, ftlA, felA, fblA, Lmax, kB=None, ftlB=None, felB=None, fblB=None,
               clttfid=None, cltefid=None, cleefid=None, n1_flat=lambda ell: np.ones(len(ell), dtype=float),
               recache=False, remove_only=False, sglLmode=True):
        r"""Calls a N1 bias

            Args:
                kA: qe_key of QE spectrum first leg
                k_ind: anisotropy source key ('p', for standard lensing N1)
                cl_kind: spectrum of anisotropy source ('p', for standard lensing N1)
                ftlA: first leg T-filtering isotropic approximation
                      (typically :math:`\frac{1}{C_\ell^{TT} + N_\ell^{TT}}`)
                felA: first leg E-filtering isotropic approximation
                      (typically :math:`\frac{1}{C_\ell^{EE} + N_\ell^{EE}}`)
                fblA: first leg B-filtering isotropic approximation
                     (typically :math:`\frac{1}{C_\ell^{BB} + N_\ell^{BB}}`)
                Lmax: maximum multipole of output N1
                kB(optional): qe_key of QE spectrum second leg (if different from the first)
                ftlB(optional): second leg T-filtering isotropic approximation (if different from the first)
                felB(optional): second leg E-filtering isotropic approximation (if different from the first)
                fblB(optional): second leg B-filtering isotropic approximation (if different from the first)
                clttfid(optional): CMB TT spectrum used in QE weights (if different from instance cltt for map-level CMB spectrum)
                cltefid(optional): CMB TE spectrum used in QE weights (if different from instance clte for map-level CMB spectrum)
                cleefid(optional): CMB EE spectrum used in QE weights (if different from instance clee for map-level CMB spectrum)
                n1_flat(optional): function used to flatten the discretized output before returning splined entire array

            Returns:
                N1 bias in the form of a numpy array of size Lmax + 1

            Note:
                This can be called with MPI using a number of processes; in this case the calculations for each multipole will be distributed among these.

        """
        if kB is None: kB = kA
        # FIXME:
        if kA[0] == 's' or kB[0] == 's':
            assert kA[0] == kB[0], 'point source implented following DH gradient convention, you wd probably need to pick a sign there'
        if ftlB is None: ftlB = ftlA
        if felB is None: felB = felA
        if fblB is None: fblB = fblA

        clttfid = self.cltt if clttfid is None else clttfid
        cltefid = self.clte if cltefid is None else cltefid
        cleefid = self.clee if cleefid is None else cleefid


        if kA in estimator_keys and kB in estimator_keys:
            if kA < kB:
                return self.get_n1(kB, k_ind, cl_kind, ftlB, felB, fblB, Lmax, ftlB=ftlA, felB=felA, fblB=fblA, kB=kA,
                                   clttfid=clttfid, cltefid=cltefid, cleefid=cleefid, n1_flat=n1_flat, sglLmode=sglLmode)

            idx = 'splined_kA' + kA + '_kB' + kB + '_ind' + k_ind
            idx += '_clpp' + clhash(cl_kind)
            idx += '_ftlA' + clhash(ftlA)
            idx += '_felA' + clhash(felA)
            idx += '_fblA' + clhash(fblA)
            idx += '_ftlB' + clhash(ftlB)
            idx += '_felB' + clhash(felB)
            idx += '_fblB' + clhash(fblB)
            idx += '_clttfid' + clhash(clttfid)
            idx += '_cltefid' + clhash(cltefid)
            idx += '_cleefid' + clhash(cleefid)
            idx += '_Lmax%s' % Lmax

            ret = self.npdb.get(idx)
            if ret is not None:
                if not recache and not remove_only:
                    return ret
                else:
                    self.npdb.remove(idx)
                    if remove_only:
                        return np.zeros_like(ret)
                    ret = None
            if ret is None:
                Ls = np.unique(np.concatenate([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], np.arange(1, Lmax + 1)[::20], [Lmax]]))
                if sglLmode:
                    n1L = np.zeros(len(Ls), dtype=float)
                    for i, L in enumerate(Ls[mpi.rank::mpi.size]):
                        print("n1: doing L %s kA %s kB %s kind %s" % (L, kA, kB, k_ind))
                        n1L[i] = (self._get_n1_L(L, kA, kB, k_ind, cl_kind, ftlA, felA, fblA, ftlB, felB, fblB, clttfid, cltefid, cleefid, remove_only=remove_only))
                    if mpi.size > 1:
                        mpi.barrier()
                        for i, L in enumerate(Ls): # reoading cached n1L's
                            n1L[i] = (self._get_n1_L(L, kA, kB, k_ind, cl_kind, ftlA, felA, fblA, ftlB, felB, fblB, clttfid,
                                             cltefid, cleefid, remove_only=remove_only))

                else: # entire vector from f90 openmp call
                    lmin_ftlA = np.min([np.where(np.abs(fal) > 0.)[0] for fal in [ftlA, felA, fblA]])
                    lmin_ftlB = np.min([np.where(np.abs(fal) > 0.)[0] for fal in [ftlB, felB, fblB]])
                    n1L = n1f.n1(Ls, cl_kind, kA, kB, k_ind, self.cltt, self.clte, self.clee,
                                 clttfid, cltefid, cleefid,  ftlA, felA, fblA, ftlB, felB, fblB,
                                  lmin_ftlA, lmin_ftlB,  self.dL, self.lps)

                ret = np.zeros(Lmax + 1)
                ret[1:] =  spline(Ls, np.array(n1L) * n1_flat(Ls), s=0., ext='raise', k=3)(np.arange(1, Lmax + 1) * 1.)
                ret[1:] *= cli(n1_flat(np.arange(1, Lmax + 1) * 1.))
                self.npdb.add(idx, ret)
                return ret
            return self.npdb.get(idx)

        if (kA in estimator_keys_derived) and (kB in estimator_keys_derived):
            ret = 0.
            for (tk1, cl1) in _get_est_derived(kA, Lmax):
                for (tk2, cl2) in _get_est_derived(kB, Lmax):
                    tret = self.get_n1(tk1, k_ind, cl_kind, ftlA, felA, fblA, Lmax, ftlB=ftlB, felB=felB, fblB=fblB,
                                       clttfid=clttfid, cltefid=cltefid, cleefid=cleefid,
                                       kB=tk2, n1_flat=n1_flat, sglLmode=sglLmode)
                    tret *= cl1[:Lmax + 1]
                    tret *= cl2[:Lmax + 1]
                    ret += tret
            return ret
        elif (kA in estimator_keys_derived) and (kB in estimator_keys):
            ret = 0.
            for (tk1, cl1) in _get_est_derived(kA, Lmax):
                tret = self.get_n1(tk1, k_ind, cl_kind, ftlA, felA, fblA, Lmax, ftlB=ftlB, felB=felB, fblB=fblB, kB=kB,
                                   clttfid=clttfid, cltefid=cltefid, cleefid=cleefid,
                                   n1_flat=n1_flat, sglLmode=sglLmode)
                tret *= cl1[:Lmax + 1]
                ret += tret
            return ret
        elif (kA in estimator_keys) and (kB in estimator_keys_derived):
            ret = 0.
            for (tk2, cl2) in _get_est_derived(kB, Lmax):
                tret = self.get_n1(kA, k_ind, cl_kind, ftlA, felA, fblA, Lmax, ftlB=ftlB, felB=felB, fblB=fblB, kB=tk2,
                                   clttfid=clttfid, cltefid=cltefid, cleefid=cleefid,
                                   n1_flat=n1_flat, sglLmode=sglLmode)
                tret *= cl2[:Lmax + 1]
                ret += tret
            return ret
        assert 0
示例#24
0
文件: nhl.py 项目: carronj/plancklens
def get_N0_iter(qe_key, nlev_t, nlev_p, beam_fwhm, cls_unl, lmin_ivf, lmax_ivf, itermax, lmax_qlm=None):
    """Iterative lensing-N0 estimate

        Calculates iteratively partially lensed spectra and lensing noise levels.
        This uses the python camb package to get the partially lensed spectra.

        This makes no assumption on response =  1 / noise hence is about twice as slow as it could be in standard cases.

        Args:
            qe_key: QE estimator key
            nlev_t: temperature noise level (in :math:`\mu `K-arcmin)
            nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)
            beam_fwhm: Gaussian beam full width half maximum in arcmin
            cls_unl(dict): unlensed CMB power spectra
            lmin_ivf: minimal CMB multipole used in the QE
            lmax_ivf: maximal CMB multipole used in the QE
            itermax: number of iterations to perform
            lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf`

        Returns
            Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0.

    #FIXME: this is requiring the full camb python package for the lensed spectra calc.

     """

    assert qe_key in ['p_p', 'p', 'ptt'], qe_key
    try:
        from camb.correlations import lensed_cls
    except ImportError:
        assert 0, "could not import camb.correlations.lensed_cls"

    def cls2dls(cls):
        """Turns cls dict. into camb cl array format"""
        keys = ['tt', 'ee', 'bb', 'te']
        lmax = np.max([len(cl) for cl in cls.values()]) - 1
        dls = np.zeros((lmax + 1, 4), dtype=float)
        refac = np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float) / (2. * np.pi)
        for i, k in enumerate(keys):
            cl = cls.get(k, np.zeros(lmax + 1, dtype=float))
            sli = slice(0, min(len(cl), lmax + 1))
            dls[sli, i] = cl[sli] * refac[sli]
        cldd = np.copy(cls.get('pp', None))
        if cldd is not None:
            cldd *= np.arange(len(cldd)) ** 2 * np.arange(1, len(cldd) + 1, dtype=float) ** 2 /  (2. * np.pi)
        return dls, cldd

    def dls2cls(dls):
        """Inverse operation to cls2dls"""
        assert dls.shape[1] == 4
        lmax = dls.shape[0] - 1
        cls = {}
        refac = 2. * np.pi * utils.cli( np.arange(lmax + 1) * np.arange(1, lmax + 2, dtype=float))
        for i, k in enumerate(['tt', 'ee', 'bb', 'te']):
            cls[k] = dls[:, i] * refac
        return cls
    if lmax_qlm is None:
        lmax_qlm = 2 * lmax_ivf
    lmax_qlm = min(lmax_qlm, 2 * lmax_ivf)
    lmin_ivf = max(lmin_ivf, 1)
    transfi2 = utils.cli(hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf)) ** 2
    llp2 = np.arange(lmax_qlm + 1, dtype=float) ** 2 * np.arange(1, lmax_qlm + 2, dtype=float) ** 2 / (2. * np.pi)
    N0s = []
    N0 = np.inf
    for irr, it in utils.enumerate_progress(range(itermax + 1)):
        dls_unl, cldd = cls2dls(cls_unl)
        clwf = 0. if it == 0 else cldd[:lmax_qlm + 1] * utils.cli(cldd[:lmax_qlm + 1] + llp2 * N0[:lmax_qlm + 1])
        cldd[:lmax_qlm + 1] *= (1. - clwf)
        cls_plen = dls2cls(lensed_cls(dls_unl, cldd))
        cls_ivfs = {}
        if qe_key in ['ptt', 'p_p', 'p']:
            cls_ivfs['tt'] = cls_plen['tt'][:lmax_ivf + 1] + (nlev_t * np.pi / 180. / 60.) ** 2 * transfi2
        if qe_key in ['p_p', 'p']:
            cls_ivfs['ee'] = cls_plen['ee'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2
            cls_ivfs['bb'] = cls_plen['bb'][:lmax_ivf + 1] + (nlev_p * np.pi / 180. / 60.) ** 2 * transfi2
        if qe_key in ['p']:
            cls_ivfs['te'] = np.copy(cls_plen['te'][:lmax_ivf + 1])
        cls_ivfs = utils.cl_inverse(cls_ivfs)
        for cl in cls_ivfs.values():
            cl[:lmin_ivf] *= 0.
        fal = cls_ivfs
        n_gg = get_nhl(qe_key, qe_key, cls_plen, cls_ivfs, lmax_ivf, lmax_ivf, lmax_out=lmax_qlm)[0]
        r_gg = qresp.get_response(qe_key, lmax_ivf, 'p', cls_plen, cls_plen, fal, lmax_qlm=lmax_qlm)[0]
        N0 = n_gg * utils.cli(r_gg ** 2)
        N0s.append(N0)
    return np.array(N0s)
示例#25
0
    def __init__(self,
                 lib_dir,
                 lmax,
                 nside,
                 cl,
                 transf,
                 ninv,
                 marge_maps_t=(),
                 marge_monopole=False,
                 marge_dipole=False,
                 pcf='default',
                 rescal_cl='default',
                 chain_descr=None,
                 transf_p=None):
        """Instance for joint temperature-polarization filtering

            Args:
                lib_dir: a few quantities might get cached there
                lmax: CMB filtering performed up to multipole lmax
                nside: healpy resolution of the input maps
                cl: fiducial CMB spectra used to filter the data (dict with 'tt', 'te', 'ee', 'bb' keys)
                transf: CMB transfer function in temperature
                ninv: list of lists with mask paths and / or inverse pixel noise levels.
                        TT, (QQ + UU) / 2 if len(ninv) == 2 or TT, QQ, QU UU if == 4
                        e.g. [[iNevT,mask1,mask2,..],[iNevP,mask1,mask2...]]
                marge_maps_t: maps to project out in the filtering (T-part)
                marge_monopole: marginalizes out the T monopole if set
                marge_dipole: marginalizes out the T dipole if set

                chain_descr: preconditioner mulitgrid chain description (if different from default)
                transf_p: polarization transfer function (if different from temperature)


        """
        assert (lmax >= 1024)
        assert (nside >= 512)
        assert len(
            ninv) == 2 or len(ninv) == 4  # TT, (QQ + UU)/2 or TT,QQ,QU,UU

        if rescal_cl == 'default':
            rescal_cl = {
                a: np.sqrt(
                    np.arange(lmax + 1, dtype=float) *
                    np.arange(1, lmax + 2, dtype=float) / 2. / np.pi)
                for a in ['t', 'e', 'b']
            }
        elif rescal_cl is None:
            rescal_cl = {
                a: np.ones(lmax + 1, dtype=float)
                for a in ['t', 'e', 'b']
            }
        elif rescal_cl == 'tonly':
            rescal_cl = {a: np.ones(lmax + 1, dtype=float) for a in ['e', 'b']}
            rescal_cl['t'] = np.sqrt(
                np.arange(lmax + 1, dtype=float) *
                np.arange(1, lmax + 2, dtype=float) / 2. / np.pi)
        else:
            assert 0
        for k in rescal_cl.keys():
            rescal_cl[k] /= np.mean(
                rescal_cl[k]
            )  # in order not mess around with the TEB relative weights of the spectra
        dl = {
            k: rescal_cl[k[0]] * rescal_cl[k[1]] * cl[k][:lmax + 1]
            for k in cl.keys()
        }  # rescaled cls (Dls by default)
        if transf_p is None:
            transf_p = transf
        transf_dls = {
            a: transf_p[:lmax + 1] * utils.cli(rescal_cl[a])
            for a in ['e', 'b']
        }
        transf_dls['t'] = transf[:lmax + 1] * utils.cli(rescal_cl['t'])

        self.lmax = lmax
        self.nside = nside
        self.cl = cl
        self.transf_t = transf
        self.transf_p = transf_p
        self.ninv = ninv
        self.marge_maps_t = marge_maps_t
        self.marge_maps_p = []

        self.lib_dir = lib_dir
        self.rescal_cl = rescal_cl

        if chain_descr is None:
            pcf = lib_dir + "/dense_tp.pk" if pcf == 'default' else None
            chain_descr = [[
                3, ["split(dense(" + pcf + "), 64, diag_cl)"], 256, 128, 3,
                0.0, cd_solve.tr_cg,
                cd_solve.cache_mem()
            ],
                           [
                               2, ["split(stage(3),  256, diag_cl)"], 512, 256,
                               3, 0.0, cd_solve.tr_cg,
                               cd_solve.cache_mem()
                           ],
                           [
                               1, ["split(stage(2),  512, diag_cl)"], 1024,
                               512, 3, 0.0, cd_solve.tr_cg,
                               cd_solve.cache_mem()
                           ],
                           [
                               0, ["split(stage(1), 1024, diag_cl)"], lmax,
                               nside, np.inf, 1.0e-5, cd_solve.tr_cg,
                               cd_solve.cache_mem()
                           ]]

        n_inv_filt = util.jit(opfilt_tp.alm_filter_ninv,
                              ninv,
                              transf_dls['t'],
                              b_transf_e=transf_dls['e'],
                              b_transf_b=transf_dls['b'],
                              marge_maps_t=marge_maps_t,
                              marge_monopole=marge_monopole,
                              marge_dipole=marge_dipole)
        self.chain = util.jit(multigrid.multigrid_chain, opfilt_tp,
                              chain_descr, dl, n_inv_filt)

        if mpi.rank == 0:
            if not os.path.exists(lib_dir):
                os.makedirs(lib_dir)

            if not os.path.exists(os.path.join(lib_dir, "filt_hash.pk")):
                pk.dump(self.hashdict(),
                        open(os.path.join(lib_dir, "filt_hash.pk"), 'wb'),
                        protocol=2)

            if not os.path.exists(os.path.join(lib_dir, "fal.pk")):
                pk.dump(self._calc_fal(),
                        open(os.path.join(lib_dir, "fal.pk"), 'wb'),
                        protocol=2)

            if not os.path.exists(os.path.join(self.lib_dir, "fmask.fits.gz")):
                fmask = self.calc_mask()
                hp.write_map(os.path.join(self.lib_dir, "fmask.fits.gz"),
                             fmask)

        mpi.barrier()
        utils.hash_check(
            pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')),
            self.hashdict())
示例#26
0
    def __init__(self,
                 lib_dir,
                 lmax,
                 nside,
                 cl,
                 transf,
                 ninv,
                 marge_maps_t=(),
                 marge_monopole=False,
                 marge_dipole=False,
                 pcf='default',
                 rescal_cl='default'):
        """Instance for joint temperature-polarization filtering

            Here ninv is a  list of lists with mask paths and / or inverse pixel noise levels.
            TT, (QQ + UU) / 2 if len(ninv) == 2 or TT, QQ, QU UU if == 4
            e.g. [[iNevT,mask1,mask2,..],[iNevP,mask1,mask2...]]


        """
        assert (lmax >= 1024)
        assert (nside >= 512)
        assert len(
            ninv) == 2 or len(ninv) == 4  # TT, (QQ + UU)/2 or TT,QQ,QU,UU

        if rescal_cl == 'default':
            rescal_cl = np.sqrt(
                np.arange(lmax + 1, dtype=float) *
                np.arange(1, lmax + 2, dtype=float) / 2. / np.pi)
        elif rescal_cl is None:
            rescal_cl = np.ones(lmax + 1, dtype=float)
        dl = {k: rescal_cl**2 * cl[k][:lmax + 1]
              for k in cl.keys()}  # rescaled cls (Dls by default)
        transf_dl = transf[:lmax + 1] * utils.cli(rescal_cl)

        self.lmax = lmax
        self.nside = nside
        self.cl = cl
        self.transf = transf
        self.ninv = ninv
        self.marge_maps_t = marge_maps_t
        self.marge_maps_p = []

        self.lib_dir = lib_dir
        self.rescal_cl = rescal_cl

        pcf = lib_dir + "/dense_tp.pk" if pcf == 'default' else None
        chain_descr = [[
            3, ["split(dense(" + pcf + "), 64, diag_cl)"], 256, 128, 3, 0.0,
            cd_solve.tr_cg,
            cd_solve.cache_mem()
        ],
                       [
                           2, ["split(stage(3),  256, diag_cl)"], 512, 256, 3,
                           0.0, cd_solve.tr_cg,
                           cd_solve.cache_mem()
                       ],
                       [
                           1, ["split(stage(2),  512, diag_cl)"], 1024, 512, 3,
                           0.0, cd_solve.tr_cg,
                           cd_solve.cache_mem()
                       ],
                       [
                           0, ["split(stage(1), 1024, diag_cl)"], lmax, nside,
                           np.inf, 1.0e-5, cd_solve.tr_cg,
                           cd_solve.cache_mem()
                       ]]

        n_inv_filt = util.jit(opfilt_tp.alm_filter_ninv,
                              ninv,
                              transf_dl,
                              marge_maps_t=marge_maps_t,
                              marge_monopole=marge_monopole,
                              marge_dipole=marge_dipole)
        self.chain = util.jit(multigrid.multigrid_chain, opfilt_tp,
                              chain_descr, dl, n_inv_filt)

        if mpi.rank == 0:
            if not os.path.exists(lib_dir):
                os.makedirs(lib_dir)

            if not os.path.exists(os.path.join(lib_dir, "filt_hash.pk")):
                pk.dump(self.hashdict(),
                        open(os.path.join(lib_dir, "filt_hash.pk"), 'wb'),
                        protocol=2)

            # if (not os.path.exists(self.lib_dir + "/fbl.dat")):
            #    fel, fbl = self.calc_febl()
            #    fel.write(self.lib_dir + "/fel.dat", lambda l: 1.0)
            #    fbl.write(self.lib_dir + "/fbl.dat", lambda l: 1.0)

            # if (not os.path.exists(self.lib_dir + "/tal.dat")):
            #    tal = self.calc_tal()
            #    tal.write(self.lib_dir + "/tal.dat", lambda l: 1.0)

            if not os.path.exists(os.path.join(self.lib_dir, "fmask.fits.gz")):
                fmask = self.calc_mask()
                hp.write_map(os.path.join(self.lib_dir, "fmask.fits.gz"),
                             fmask)

        mpi.barrier()
        utils.hash_check(
            pk.load(open(os.path.join(lib_dir, "filt_hash.pk"), 'rb')),
            self.hashdict())
示例#27
0
 def _calc_tal(self):
     return utils.cli(self.transf)
示例#28
0
        def get_n1(self,
                   kA,
                   k_ind,
                   cl_kind,
                   ftlA,
                   felA,
                   fblA,
                   Lmax,
                   kB=None,
                   ftlB=None,
                   felB=None,
                   fblB=None,
                   clttfid=None,
                   cltefid=None,
                   cleefid=None,
                   n1_flat=lambda ell: np.ones(len(ell), dtype=float),
                   sglLmode=True):
            """

            """
            if kB is None: kB = kA
            # FIXME:
            if kA[0] == 's' or kB[0] == 's':
                assert kA[0] == kB[
                    0], 'point source implented following DH gradient convention, you wd probably need to pick a sign there'
            if ftlB is None: ftlB = ftlA
            if felB is None: felB = felA
            if fblB is None: fblB = fblA

            clttfid = self.cltt if clttfid is None else clttfid
            cltefid = self.clte if cltefid is None else cltefid
            cleefid = self.clee if cleefid is None else cleefid

            if kA in estimator_keys and kB in estimator_keys:
                if kA < kB:
                    return self.get_n1(kB,
                                       k_ind,
                                       cl_kind,
                                       ftlA,
                                       felA,
                                       fblA,
                                       Lmax,
                                       ftlB=ftlB,
                                       felB=felB,
                                       fblB=fblB,
                                       kB=kA,
                                       clttfid=clttfid,
                                       cltefid=cltefid,
                                       cleefid=cleefid,
                                       n1_flat=n1_flat)

                idx = 'splined_kA' + kA + '_kB' + kB + '_ind' + k_ind
                idx += '_clpp' + clhash(cl_kind)
                idx += '_ftlA' + clhash(ftlA)
                idx += '_felA' + clhash(felA)
                idx += '_fblA' + clhash(fblA)
                idx += '_ftlB' + clhash(ftlB)
                idx += '_felB' + clhash(felB)
                idx += '_fblB' + clhash(fblB)
                idx += '_clttfid' + clhash(clttfid)
                idx += '_cltefid' + clhash(cltefid)
                idx += '_cleefid' + clhash(cleefid)
                idx += '_Lmax%s' % Lmax

                if self.npdb.get(idx) is None:
                    Ls = np.unique(
                        np.concatenate([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                                        np.arange(1, Lmax + 1)[::10], [Lmax]]))
                    if sglLmode:
                        n1L = np.zeros(len(Ls), dtype=float)
                        for i, L in enumerate(Ls):
                            print("n1: doing L %s kA %s kB %s kind %s" %
                                  (L, kA, kB, k_ind))
                            n1L[i] = (self._get_n1_L(L, kA, kB, k_ind, cl_kind,
                                                     ftlA, felA, fblA, ftlB,
                                                     felB, fblB, clttfid,
                                                     cltefid, cleefid))
                    else:  # entire vector from f90 openmp call
                        lmin_ftlA = np.min([
                            np.where(np.abs(fal) > 0.)[0]
                            for fal in [ftlA, felA, fblA]
                        ])
                        lmin_ftlB = np.min([
                            np.where(np.abs(fal) > 0.)[0]
                            for fal in [ftlB, felB, fblB]
                        ])
                        n1L = n1f.n1(Ls, cl_kind, kA, kB, k_ind, self.cltt,
                                     self.clte, self.clee, clttfid, cltefid,
                                     cleefid, ftlA, felA, fblA, ftlB, felB,
                                     fblB, lmin_ftlA, lmin_ftlB, self.dL,
                                     self.lps)
                    ret = np.zeros(Lmax + 1)
                    ret[1:] = spline(Ls,
                                     np.array(n1L) * n1_flat(Ls),
                                     s=0.,
                                     ext='raise',
                                     k=3)(np.arange(1, Lmax + 1) * 1.)
                    ret[1:] *= cli(n1_flat(np.arange(1, Lmax + 1) * 1.))
                    self.npdb.add(idx, ret)
                return self.npdb.get(idx)

            assert  np.all([np.all(ftlA == ftlB), np.all(felA == felB), np.all(fblA == fblB)]), \
                    'check the est. breakdown is OK for non-identical legs'
            if (kA in estimator_keys_derived) and (kB
                                                   in estimator_keys_derived):
                ret = 0.
                for (tk1, cl1) in _get_est_derived(kA, Lmax):
                    for (tk2, cl2) in _get_est_derived(kB, Lmax):
                        tret = self.get_n1(tk1,
                                           k_ind,
                                           cl_kind,
                                           ftlA,
                                           felA,
                                           fblA,
                                           Lmax,
                                           ftlB=ftlB,
                                           felB=felB,
                                           fblB=fblB,
                                           clttfid=clttfid,
                                           cltefid=cltefid,
                                           cleefid=cleefid,
                                           kB=tk2,
                                           n1_flat=n1_flat)
                        tret *= cl1[:Lmax + 1]
                        tret *= cl2[:Lmax + 1]
                        ret += tret
                return ret
            elif (kA in estimator_keys_derived) and (kB in estimator_keys):
                ret = 0.
                for (tk1, cl1) in _get_est_derived(kA, Lmax):
                    tret = self.get_n1(tk1,
                                       k_ind,
                                       cl_kind,
                                       ftlA,
                                       felA,
                                       fblA,
                                       Lmax,
                                       ftlB=ftlB,
                                       felB=felB,
                                       fblB=fblB,
                                       kB=kB,
                                       clttfid=clttfid,
                                       cltefid=cltefid,
                                       cleefid=cleefid,
                                       n1_flat=n1_flat)
                    tret *= cl1[:Lmax + 1]
                    ret += tret
                return ret
            elif (kA in estimator_keys) and (kB in estimator_keys_derived):
                ret = 0.
                for (tk2, cl2) in _get_est_derived(kB, Lmax):
                    tret = self.get_n1(kA,
                                       k_ind,
                                       cl_kind,
                                       ftlA,
                                       felA,
                                       fblA,
                                       Lmax,
                                       ftlB=ftlB,
                                       felB=felB,
                                       fblB=fblB,
                                       kB=tk2,
                                       clttfid=clttfid,
                                       cltefid=cltefid,
                                       cleefid=cleefid,
                                       n1_flat=n1_flat)
                    tret *= cl2[:Lmax + 1]
                    ret += tret
                return ret
            assert 0
示例#29
0
    maps.cmb_maps_nlev(planck2018_sims.cmb_len_ffp10(),
                       transf,
                       nlev_t,
                       nlev_p,
                       nside,
                       pix_lib_phas=pix_phas),
    {idx: nsims if idx == -1 else idx
     for idx in range(-1, nsims)})
#: Simulation library. Here this combines the ffp10 lensed CMBs together with the transfer function
#  and homogeneous noise as defined by the phase library.
#  A sim library expects the index -1 to point to the data map: the use of 'sim_lib_shuffle' with the funny dictionary
#  in the last argument is just a way to define the data map as one of the simulation (outside of the set used for the analysis).

# --- We turn to the inverse-variance filtering library. In this file we use trivial isotropic filtering,
#     (independent T and Pol. filtering)
ftl = utils.cli(cl_len['tt'][:lmax_ivf + 1] +
                (nlev_t / 60. / 180. * np.pi / transf)**2)
fel = utils.cli(cl_len['ee'][:lmax_ivf + 1] +
                (nlev_p / 60. / 180. * np.pi / transf)**2)
fbl = utils.cli(cl_len['bb'][:lmax_ivf + 1] +
                (nlev_p / 60. / 180. * np.pi / transf)**2)
ftl[:lmin_ivf] *= 0.
fel[:lmin_ivf] *= 0.
fbl[:lmin_ivf] *= 0.
#: Inverse CMB co-variance in T, E and B (neglecting TE coupling).

ivfs = filt_simple.library_fullsky_sepTP(os.path.join(TEMP, 'ivfs'),
                                         sims,
                                         nside,
                                         transf,
                                         cl_len,
                                         ftl,
示例#30
0
文件: n1.py 项目: markm42/plancklens
    def get_n1_jtp(self, kA, k_ind, cl_kind, fAlmat, Lmax, kB=None, fBlmat=None,
            clttfid=None, cltefid=None, cleefid=None, n1_flat=lambda ell: np.ones(len(ell), dtype=float)):

        if kB is None: kB = kA
        # FIXME:
        if kA[0] == 's' or kB[0] == 's':
            assert kA[0] == kB[0], 'point source implented following DH gradient convention, you wd probably need to pick a sign there'
        if fBlmat is None: fBlmat = fAlmat

        clttfid = self.cltt if clttfid is None else clttfid
        cltefid = self.clte if cltefid is None else cltefid
        cleefid = self.clee if cleefid is None else cleefid


        if kA in estimator_keys and kB in estimator_keys:
            if kA < kB:
                return self.get_n1_jtp(kB, k_ind, cl_kind, fBlmat, Lmax, fBlmat=fAlmat, kB=kA,
                                   clttfid=clttfid, cltefid=cltefid, cleefid=cleefid, n1_flat=n1_flat)


            X, Y = kA[1:]
            I, J = kB[1:]
            assert np.all(i in ['t', 'e', 'b'] for i in [X, Y, I, J]),  [X, Y, I, J]
            ret = 0.
            for Xp in ['t', 'e', 'b']:
                FXXp = fAlmat.get(X + Xp, fAlmat.get(Xp + X, [0.]))
                if np.any(FXXp):
                    for Yp in ['t', 'e', 'b']:
                        FYYp = fAlmat.get(Y + Yp, fAlmat.get(Yp + Y, [0.]))
                        if np.any(FYYp):
                            for Ip in ['t', 'e', 'b']:
                                FIIp = fBlmat.get(I + Ip, fBlmat.get(Ip + I, [0.]))
                                if np.any(FIIp):
                                    for Jp in ['t', 'e', 'b']:
                                        FJJp = fBlmat.get(J + Jp, fBlmat.get(Jp + J, [0.]))
                                        if np.any(FJJp):
                                            idx = 'splined_' + X + Xp + Y + Yp + I + Ip + J + Jp
                                            idx += '_clpp' + clhash(cl_kind)
                                            idx += '_fXXp' + clhash(FXXp)
                                            idx += '_fYYp' + clhash(FYYp)
                                            idx += '_fIIp' + clhash(FIIp)
                                            idx += '_fJJp' + clhash(FJJp)
                                            idx += '_clttfid' + clhash(clttfid)
                                            idx += '_cltefid' + clhash(cltefid)
                                            idx += '_cleefid' + clhash(cleefid)
                                            idx += '_Lmax%s' % Lmax

                                            if self.npdb.get(idx) is None:
                                                Ls = np.unique(np.concatenate([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], np.arange(1, Lmax + 1)[::20], [Lmax]]))
                                                n1L = np.zeros(len(Ls), dtype=float)
                                                for i, L in enumerate(Ls):
                                                    print("n1: doing L %s kA %s kB %s kind %s " % (L, kA, kB, k_ind)  + Xp + Yp + Ip + Jp)
                                                    n1L[i] = (self._get_n1_L_jtp(L, kA, kB, k_ind, cl_kind, Xp, Yp, Ip, Jp, fAlmat, fBlmat, clttfid, cltefid, cleefid))
                                                ret = np.zeros(Lmax + 1)
                                                ret[1:] =  spline(Ls, np.array(n1L) * n1_flat(Ls), s=0., ext='raise', k=3)(np.arange(1, Lmax + 1) * 1.)
                                                ret[1:] *= cli(n1_flat(np.arange(1, Lmax + 1) * 1.))
                                                self.npdb.add(idx, ret)
                                            ret = ret +  self.npdb.get(idx)
            return ret
        if (kA in estimator_keys_derived) or (kB in estimator_keys_derived):
            ret = 0.
            for (tk1, cl1) in _get_est_derived(kA, Lmax):
                for (tk2, cl2) in _get_est_derived(kB, Lmax):
                    tret = self.get_n1_jtp(tk1, k_ind, cl_kind, fAlmat, Lmax, kB=tk2, fBlmat=fBlmat,
                                    clttfid=clttfid, cltefid=cltefid, cleefid=cleefid, n1_flat=n1_flat)
                    ret = ret + tret * cl1[:Lmax + 1] * cl2[:Lmax + 1]
            return ret
        assert 0