Esempio n. 1
0
def get_anisotropic_noise(shape,wcs,rms,lknee,alpha,template_file=None,tmin=0,tmax=100):
    """
    This function reads in a 2D PS unredenned template and returns a full 2D noise PS.
    It doesn't use the template in the most sensible way though.
    """
    ops = get_anisotropic_noise_template(shape,wcs,template_file,tmin,tmax)
    return rednoise(enmap.modlmap(shape,wcs),rms,lknee,alpha)*ops
Esempio n. 2
0
def rect_geometry(width_arcmin=None,width_deg=None,px_res_arcmin=0.5,proj="car",pol=False,height_deg=None,height_arcmin=None,xoffset_degree=0.,yoffset_degree=0.,extra=False,**kwargs):
    """
    Get shape and wcs for a rectangular patch of specified size and coordinate center
    """

    if width_deg is not None:
        width_arcmin = 60.*width_deg
    if height_deg is not None:
        height_arcmin = 60.*height_deg
    
    hwidth = width_arcmin/2.
    if height_arcmin is None:
        vwidth = hwidth
    else:
        vwidth = height_arcmin/2.
    arcmin =  utils.arcmin
    degree =  utils.degree
    pos = [[-vwidth*arcmin+yoffset_degree*degree,-hwidth*arcmin+xoffset_degree*degree],[vwidth*arcmin+yoffset_degree*degree,hwidth*arcmin+xoffset_degree*degree]]
    shape, wcs = enmap.geometry(pos=pos, res=px_res_arcmin*arcmin, proj=proj,**kwargs)
    if pol: shape = (3,)+shape
    if extra:
        modlmap = enmap.modlmap(shape,wcs)
        lmax = modlmap.max()
        ells = np.arange(0,lmax,1.)
        return shape,wcs,modlmap,ells
    else:
        return shape, wcs
Esempio n. 3
0
def getspec(f, lmin=50, lmax=4000, deltal=20):
    p2d = enmap.read_map(f)
    shape, wcs = p2d.shape, p2d.wcs
    bin_edges = np.arange(lmin, lmax, deltal)
    modlmap = enmap.modlmap(shape, wcs)
    binner = stats.bin2D(modlmap, bin_edges)
    cents, p1d = binner.bin(p2d)
    return cents, p1d
Esempio n. 4
0
 def __init__(self,
              shape,
              wcs,
              beams,
              rmss,
              lknees,
              alphas,
              aniss,
              inhoms,
              nsplits,
              plancks,
              response_dict,
              ps_dict,
              ellmin=100):
     """
     TODO: inhomogenity
     noise cross covariance
     """
     self.fc = maps.FourierCalc(shape, wcs)
     self.narrays = len(beams)
     self.modlmap = enmap.modlmap(shape, wcs)
     self.beams = beams
     self.inhoms = inhoms
     self.nsplits = nsplits
     self.plancks = plancks.astype(np.bool)
     self.ngens = []
     antemplate = covtools.get_anisotropic_noise_template(shape, wcs)
     for rms, lknee, alpha, anis in zip(rmss, lknees, alphas, aniss):
         if anis:
             template = antemplate.copy()
         else:
             template = 1
         p2d = covtools.rednoise(enmap.modlmap(shape, wcs), rms, lknee,
                                 alpha) * template
         p2d[self.modlmap < ellmin] = 0
         self.ngens.append(maps.MapGen(shape, wcs, p2d[None, None, ...]))
     self.fgens = {}
     assert "cmb" in ps_dict.keys()
     self.components = ps_dict.keys()
     for key in ps_dict.keys():
         self.fgens[key] = maps.MapGen(shape, wcs, ps_dict[key][None, None,
                                                                ...])
     self.shape = shape
     self.wcs = wcs
     self.rdict = response_dict
     self.ellmin = ellmin
Esempio n. 5
0
def compute_ps(map1, map2, mask, beamf1, beamf2):
    """Compute the FFTs, multiply, bin
    """
    kmap1 = enmap.fft(map1 * mask, normalize="phys")
    kmap2 = enmap.fft(map2 * mask, normalize="phys")
    power = (kmap1 * np.conj(kmap2)).real

    bin_edges = np.arange(0, 8000, 40)
    centers = (bin_edges[1:] + bin_edges[:-1]) / 2.
    w2 = np.mean(mask**2.)
    modlmap = enmap.modlmap(map1.shape, map1.wcs)
    binned_power = bin(power / w2 / beamf1(modlmap) / beamf2(modlmap), modlmap,
                       bin_edges)
    return centers, binned_power
Esempio n. 6
0
def mask_kspace(shape, wcs, lxcut=None, lycut=None, lmin=None, lmax=None):
    # copied from orphics
    output = np.ones(shape[-2:], dtype=int)
    if (lmin is not None) or (lmax is not None):
        modlmap = enmap.modlmap(shape, wcs)
    if (lxcut is not None) or (lycut is not None):
        ly, lx = enmap.laxes(shape, wcs, oversample=1)
    if lmin is not None:
        output[np.where(modlmap <= lmin)] = 0
    if lmax is not None:
        output[np.where(modlmap >= lmax)] = 0
    if lxcut is not None:
        output[:, np.where(np.abs(lx) < lxcut)] = 0
    if lycut is not None:
        output[np.where(np.abs(ly) < lycut), :] = 0
    return output
Esempio n. 7
0
def compute_ps(map1, map2, beamf1, beamf2):
    """Compute the FFTs, multiply, bin
    """
    if args.fft:
        kmap1 = enmap.fft(map1*mask, normalize="phys")
        kmap2 = enmap.fft(map2*mask, normalize="phys")
        power = (kmap1*np.conj(kmap2)).real
        bin_edges = np.arange(20,8000,40)
        centers = (bin_edges[1:] + bin_edges[:-1])/2.
        w2 = np.mean(mask**2.)
        modlmap = enmap.modlmap(map1.shape,map1.wcs)
        binned_power = bin(power/w2/beamf1(modlmap)/beamf2(modlmap),modlmap,bin_edges)
        return centers, binned_power
    else:
        ells,cls = pcalc.get_power_scalarXscalar(map1*mask, map2*mask,ret_dl=False)
        return ells,cls/beamf1(ells)/beamf2(ells)
Esempio n. 8
0
def get_n2d(ffts, wmaps, plot_fname=None, coadd_estimator=False, dtype=None):
    assert np.all(np.isfinite(ffts))
    assert np.all(np.isfinite(wmaps))
    shape, wcs = ffts.shape[-2:], ffts.wcs
    modlmap = enmap.modlmap(shape, wcs)
    Ny, Nx = shape[-2:]
    nfreqs = ffts.shape[0]
    npol = ffts.shape[2]
    ncomps = nfreqs * npol

    def ncomp_to_freq_pol(index):
        ifreq = index // 3
        ipol = index % 3
        return ifreq, ipol

    n2d = enmap.zeros((ncomps, ncomps, Ny, Nx), wcs,
                      dtype=dtype)  # WARNING: type)
    pols = ['I', 'Q', 'U']
    for i in range(ncomps):
        for j in range(i, ncomps):
            ifreq, ipol = ncomp_to_freq_pol(i)
            jfreq, jpol = ncomp_to_freq_pol(j)

            isplits = ffts[ifreq, :, ipol]
            iwts = wmaps[ifreq, :, 0]
            if i != j:
                jsplits = ffts[jfreq, :, jpol]
                jwts = wmaps[jfreq, :, 0]
            else:
                jsplits = None
                jwts = None

            n2d[i, j] = noise_power(isplits,
                                    iwts,
                                    jsplits,
                                    jwts,
                                    coadd_estimator=coadd_estimator,
                                    pfunc=naive_power)
            if i != j: n2d[j, i] = n2d[i, j]
            if plot_fname is not None:
                plot("%s_%d_%s_%d_%s" \
                     % (plot_fname,ifreq,pols[ipol],
                        jfreq,pols[jpol]),
                     enmap.enmap(np.arcsinh(np.fft.fftshift(n2d[i,j])),wcs),dg=1,quantile=0)

    return n2d
Esempio n. 9
0
def mask_kspace(shape, wcs, lxcut=None, lycut=None, lmin=None, lmax=None):
    """Produce a Fourier space mask.

    Parameters
    ----------

    shape : tuple
        The shape of the array for the geometry of the footprint. Typically 
        (...,Ny,Nx) for Ny pixels in the y-direction and Nx in the x-direction.
    wcs : :obj:`astropy.wcs.wcs.WCS`
        The wcs object completing the specification of the geometry of the footprint.
    lxcut : int, optional
        The width of a band in number of Fourier pixels to be masked in the lx direction.
        Default is no masking in this band.
    lycut : int, optional
        The width of a band in number of Fourier pixels to be masked in the ly direction.
        Default is no masking in this band.
    lmin : int, optional
        The radial distance in Fourier space below which all Fourier pixels are masked.
        Default is no masking.
    lmax : int, optional
        The radial distance in Fourier space above which all Fourier pixels are masked.
        Default is no masking.

    Returns
    -------

    output : (Ny,Nx) ndarray
        A 2D array containing the Fourier space mask.

    """
    output = np.ones(shape[-2:], dtype=int)
    if (lmin is not None) or (lmax is not None):
        modlmap = enmap.modlmap(shape, wcs)
    if (lxcut is not None) or (lycut is not None):
        ly, lx = enmap.laxes(shape, wcs, oversample=1)
    if lmin is not None:
        output[np.where(modlmap <= lmin)] = 0
    if lmax is not None:
        output[np.where(modlmap >= lmax)] = 0
    if lxcut is not None:
        output[:, np.where(np.abs(lx) < lxcut)] = 0
    if lycut is not None:
        output[np.where(np.abs(ly) < lycut), :] = 0
    return output
Esempio n. 10
0
      def __init__(self,shape,wcs,theory,freqs,beams,noises,lknees,alphas,ellmins,ellmaxes):

          fgn = fg.fgNoises(cosmology.defaultConstants,ksz_file='/home/msyriac/repos/szar/input/ksz_BBPS.txt',
                    ksz_p_file='/home/msyriac/repos/szar/input/ksz_p_BBPS.txt',
                    tsz_cib_file='/home/msyriac/repos/szar/input/sz_x_cib_template.txt',
                    ksz_battaglia_test_csv=None,
                    tsz_battaglia_template_csv="/home/msyriac/repos/szar/input/sz_template_battaglia.csv",
                    rs_template="/home/msyriac/repos/szar/input/fiducial_scalCls_lensed_5_5.txt",
                    rsx_template="/home/msyriac/repos/szar/input/fiducial_scalCls_lensed_1_5.txt",
                    components=['tsz','cibp','cibc','radps'],lmax=20000)
        
          self.modlmap = enmap.modlmap(shape,wcs)
          modlmap = self.modlmap
          self.fgn = fgn
          lmax = self.modlmap.max()
          ells = np.arange(0,lmax,1)
          ps_cmb = theory.lCl('TT',modlmap).reshape((1,1,shape[-2],shape[-1]))
          self.ps_cmb = ps_cmb
          ps_y = fgn.tsz_template(ells).reshape((1,1,ells.size))*self.fgn.c['A_tsz']*2.*np.pi*np.nan_to_num(1./ells/(ells+1.))
          ps_cibp = (fgn.c['A_cibp'] * ((ells/fgn.c['ell0sec'])) ** 2.0 *2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))
          ps_cibc = (fgn.c['A_cibc'] * ((ells/fgn.c['ell0sec'])) ** (2.-fgn.c['n_cib']) *                   2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))
          ps_radps = (fgn.c['A_ps'] * ((ells/fgn.c['ell0sec'])) ** 2 *                   2.*np.pi*np.nan_to_num(1./ells/(ells+1.))).reshape((1,1,ells.size))
          self.cgen = maps.MapGen(shape[-2:],wcs,ps_cmb)
          self.tgen = maps.MapGen(shape[-2:],wcs,ps_y)
          self.cibpgen = maps.MapGen(shape[-2:],wcs,ps_cibp)
          self.cibcgen = maps.MapGen(shape[-2:],wcs,ps_cibc)
          self.radpsgen = maps.MapGen(shape[-2:],wcs,ps_radps)
          self.shape = shape ; self.wcs = wcs
          self.freqs = freqs
          self.kbeams = []
          self.ngens = []
          self.n2ds = []
          for ai,nu in enumerate(self.freqs):
              self.kbeams.append(maps.gauss_beam(fwhm=beams[ai],ell=self.modlmap))
              n2d = cosmology.noise_func(self.modlmap,0,noises[ai],lknee=lknees[ai],alpha=alphas[ai],dimensionless=False,TCMB=2.7255e6)
              n2d[modlmap<ellmins[ai]] = 0
              n2d[modlmap>ellmaxes[ai]] = 0
              n2dmod = n2d.copy()
              n2dmod[modlmap>ellmaxes[ai]] =  1e90
              n2dmod[modlmap<ellmins[ai]] =  1e90
              self.n2ds.append(n2dmod.copy())
              ps_noise = n2d.reshape((1,1,shape[-2],shape[-1]))
              self.ngens.append(maps.MapGen(shape[-2:],wcs,ps_noise))
          self.ellmins = ellmins
          self.ellmaxes = ellmaxes
Esempio n. 11
0
def get_feed_dict(shape,
                  wcs,
                  theory,
                  noise_t,
                  noise_p,
                  fwhm,
                  gtfunc=None,
                  split_estimator=False,
                  noise_scale=1.):
    from pixell import enmap
    import symlens
    modlmap = enmap.modlmap(shape, wcs)
    feed_dict = {}
    feed_dict['uC_T_T'] = theory.lCl(
        'TT', modlmap) if (gtfunc is None) else gtfunc(modlmap)
    feed_dict['uC_T_E'] = theory.lCl('TE', modlmap)
    feed_dict['uC_E_E'] = theory.lCl('EE', modlmap)

    feed_dict['tC_T_T'] = theory.lCl(
        'TT', modlmap) + (noise_t * np.pi / 180. /
                          60.)**2. / symlens.gauss_beam(modlmap, fwhm)**2.
    feed_dict['tC_T_E'] = theory.lCl('TE', modlmap)
    feed_dict['tC_E_E'] = theory.lCl(
        'EE', modlmap) + (noise_p * np.pi / 180. /
                          60.)**2. / symlens.gauss_beam(modlmap, fwhm)**2.
    feed_dict['tC_B_B'] = theory.lCl(
        'BB', modlmap) + (noise_p * np.pi / 180. /
                          60.)**2. / symlens.gauss_beam(modlmap, fwhm)**2.

    if split_estimator:
        ntt = 0
        npp = 0
    else:
        ntt = noise_scale * (noise_t * np.pi / 180. /
                             60.)**2. / symlens.gauss_beam(modlmap, fwhm)**2.
        npp = noise_scale * (noise_p * np.pi / 180. /
                             60.)**2. / symlens.gauss_beam(modlmap, fwhm)**2.
    feed_dict['nC_T_T'] = theory.lCl('TT', modlmap) + ntt
    feed_dict['nC_T_E'] = theory.lCl('TE', modlmap)
    feed_dict['nC_E_E'] = theory.lCl('EE', modlmap) + npp
    feed_dict['nC_B_B'] = theory.lCl('BB', modlmap) + npp

    return feed_dict
Esempio n. 12
0
def util_bin_FFT_CAR(map1, map2, mask, beam1, beam2, lmax=8000):
    """Compute the FFTs, multiply, bin

    Beams are multiplied at bin centers. This is the worst
    job you could do for calculating power spectra.
    """
    # beam_ells = np.arange(lmax+1)

    kmap1 = enmap.fft(map1 * mask, normalize="phys")
    kmap2 = enmap.fft(map2 * mask, normalize="phys")
    power = (kmap1 * np.conj(kmap2)).real

    bin_edges = np.arange(0, lmax, 40)
    centers = (bin_edges[1:] + bin_edges[:-1]) / 2.0
    w2 = np.mean(mask**2.0)
    modlmap = enmap.modlmap(map1.shape, map1.wcs)
    binned_power = util_bin_FFTspec_CAR(power / w2, modlmap, bin_edges)
    binned_power *= beam1[centers.astype(int)]
    binned_power *= beam2[centers.astype(int)]
    return centers, binned_power
Esempio n. 13
0
 def __init__(self,shape,wcs,groups=None):
     # Symbolic
     self.l1x,self.l1y,self.l2x,self.l2y,self.l1,self.l2 = get_ells()
     self.Lx,self.Ly,self.L = get_Ls()
     if groups is None: groups = [self.Lx*self.Lx,self.Ly*self.Ly,self.Lx*self.Ly]
     self._default_groups = groups
     self.integrands = {}
     self.ul1s = {}
     self.ul2s = {}
     self.ogroups = {}
     self.ogroup_weights = {}
     self.ogroup_symbols = {}
     self.l1funcs = []
     self.l2funcs = []
     # Diagnostic
     self.nfft = 0
     self.nifft = 0
     # Numeric
     self.shape,self.wcs = shape,wcs
     self.modlmap = enmap.modlmap(shape,wcs)
     self.lymap,self.lxmap = enmap.lmap(shape,wcs)
     self.pixarea = np.prod(enmap.pixshape(shape,wcs))
Esempio n. 14
0
def test_lens_recon():
    from orphics import lensing, io, cosmology, maps
    from enlib import bench

    deg = 10.
    px = 2.0
    tellmin = 100
    tellmax = 3000
    kellmin = 40
    kellmax = 3000
    grad_cut = None
    bin_width = 80
    beam_arcmin = 0.01
    noise_uk_arcmin = 0.01

    theory = cosmology.default_theory(lpad=30000)
    shape, wcs = s.rect_geometry(width_deg=deg, px_res_arcmin=px)
    flsims = lensing.FlatLensingSims(shape, wcs, theory, beam_arcmin,
                                     noise_uk_arcmin)
    kbeam = flsims.kbeam
    modlmap = enmap.modlmap(shape, wcs)
    fc = maps.FourierCalc(shape, wcs)
    n2d = (noise_uk_arcmin * np.pi / 180. / 60.)**2. / flsims.kbeam**2.
    tmask = s.mask_kspace(shape, wcs, lmin=tellmin, lmax=tellmax)
    kmask = s.mask_kspace(shape, wcs, lmin=kellmin, lmax=kellmax)
    with bench.show("orphics init"):
        qest = lensing.qest(shape,
                            wcs,
                            theory,
                            noise2d=n2d,
                            kmask=tmask,
                            kmask_K=kmask,
                            pol=False,
                            grad_cut=grad_cut,
                            unlensed_equals_lensed=True,
                            bigell=30000)
    bin_edges = np.arange(kellmin, kellmax, bin_width)
    binner = s.bin2D(modlmap, bin_edges)
    i = 0
    unlensed, kappa, lensed, beamed, noise_map, observed = flsims.get_sim(
        seed_cmb=(i, 1),
        seed_kappa=(i, 2),
        seed_noise=(i, 3),
        lens_order=5,
        return_intermediate=True)

    kmap = enmap.fft(observed, normalize="phys")
    # _,kmap,_ = fc.power2d(observed)
    with bench.show("orphics"):
        kkappa = qest.kappa_from_map("TT",
                                     kmap / kbeam,
                                     alreadyFTed=True,
                                     returnFt=True)
    pir2d, kinput = fc.f1power(kappa, kkappa)
    pii2d = fc.f2power(kinput, kinput)
    prr2d = fc.f2power(kkappa, kkappa)
    cents, pir1d = binner.bin(pir2d)
    cents, pii1d = binner.bin(pii2d)
    cents, prr1d = binner.bin(prr2d)

    feed_dict = {}
    cltt = theory.lCl('TT', modlmap)
    feed_dict['uC_T_T'] = theory.lCl('TT', modlmap)
    feed_dict['tC_T_T'] = cltt + n2d
    feed_dict['X'] = kmap / kbeam
    feed_dict['Y'] = kmap / kbeam

    with bench.show("symlens init"):
        Al = s.A_l(shape,
                   wcs,
                   feed_dict,
                   "hdv",
                   "TT",
                   xmask=tmask,
                   ymask=tmask)
    Nl = s.N_l_from_A_l_optimal(shape, wcs, Al)
    with bench.show("symlens"):
        ukappa = s.unnormalized_quadratic_estimator(shape,
                                                    wcs,
                                                    feed_dict,
                                                    "hdv",
                                                    "TT",
                                                    xmask=tmask,
                                                    ymask=tmask)
    nkappa = Al * ukappa

    pir2d2 = fc.f2power(nkappa, kinput)
    cents, pir1d2 = binner.bin(pir2d2)

    cents, Nlkk = binner.bin(qest.N.Nlkk['TT'])
    cents, Nlkk2 = binner.bin(Nl)

    pl = io.Plotter(xyscale='linlog')
    pl.add(cents, pii1d, color='k', lw=3)
    pl.add(cents, pir1d, label='orphics')
    pl.add(cents, pir1d2, label='hdv symlens')
    pl.add(cents, Nlkk, ls="--", label='orphics')
    pl.add(cents, Nlkk2, ls="-.", label='symlens')
    pl.done("ncomp.png")
Esempio n. 15
0
    def get_maps(self, rot_angle1, rot_angle2, compts=None, use_sht=True, ret_alm=True, transfer=None,
                 load_processed=False, save_processed=False, flux_cut=None):
        if compts is None: compts = self.compts
        shape, wcs = self.geometry
        nshape = (len(compts),) + shape[-2:]
        ret = enmap.zeros(nshape, wcs)

        if load_processed and not ret_alm:
            for i, compt_idx in enumerate(compts):
                input_file = self.get_fits_path(self.processed_dir, rot_angle1, rot_angle2, compt_idx)
                print("loading", input_file)
                temp = enmap.read_map(input_file)
                ret[i, ...] = enmap.extract(temp, shape, wcs).copy()
                del temp
            return ret
        else:
            for i, compt_idx in enumerate(compts):
                input_file = self.get_fits_path(self.input_dir, rot_angle1, rot_angle2, compt_idx)
                print("loading", input_file)
                alm = np.complex128(hp.read_alm(input_file, hdu=(1)))
                ret[i, ...] = curvedsky.alm2map(alm, enmap.zeros(nshape[1:], wcs))
                del alm
                if compt_idx in self.highflux_cats:
                    print("adding high flux cats")

                    hiflux_cat = np.load(self.get_highflux_cat_path(compt_idx))
                    hiflux_cat[:, :2] = car2hp_coords(hiflux_cat[:, :2])

                    mat_rot, _, _ = hp.rotator.get_rotation_matrix(
                        (rot_angle1 * utils.degree * -1, rot_angle2 * utils.degree, 0))
                    uvec = hp.ang2vec(hiflux_cat[:, 0], hiflux_cat[:, 1])
                    rot_vec = np.inner(mat_rot, uvec).T
                    temppos = hp.vec2ang(rot_vec)
                    rot_pos = np.zeros(hiflux_cat[:, :2].shape)
                    rot_pos[:, 0] = temppos[0]
                    rot_pos[:, 1] = temppos[1]
                    rot_pos = hp2car_coords(rot_pos)
                    del temppos
                    rot_pix = np.round(enmap.sky2pix(nshape[-2:], wcs, rot_pos.T).T).astype(np.int)
                    loc = np.where((rot_pix[:, 0] >= 0) & (rot_pix[:, 0] < nshape[-2]) & (rot_pix[:, 1] >= 0.) & (
                            rot_pix[:, 1] < nshape[-1]))
                    hiflux_cat = hiflux_cat[loc[0], 2]
                    rot_pix = rot_pix[loc[0], :]

                    hiflux_map = enmap.zeros(nshape[-2:], wcs)
                    hiflux_map[rot_pix[:, 0], rot_pix[:, 1]] = hiflux_cat
                    if flux_cut is not None:
                        tmin = flux_cut * 1e-3 * jysr2thermo(148)
                        loc = np.where(hiflux_map > tmin)
                        hiflux_map[loc] = 0
                    hiflux_map = hiflux_map / enmap.pixsizemap(shape, wcs)
                    ret[i, ...] = ret[i, ...] + hiflux_map
                    del hiflux_map

        alms = None
        if transfer is not None:
            l, f = transfer
            interp_func = scipy.interpolate.interp1d(l, f, bounds_error=False, fill_value=0.)
            if use_sht:
                l_intp = np.arange(self.lmax + 1)
                f_int = interp_func(l_intp)
                alms = curvedsky.map2alm(ret, lmax=self.lmax, spin=0)
                for i in range(len(compts)):
                    alms[i] = hp.almxfl(alms[i], f_int)
                ret = curvedsky.alm2map(alms, ret, spin=0)
            else:
                ftmap = enmap.fft(ret)
                f_int = interp_func(enmap.modlmap(shape, wcs).ravel())
                ftmap = ftmap * np.reshape(f_int, (shape[-2:]))
                ret = enmap.ifft(ftmap).real;
                del ftmap

        if save_processed:
            raise NotImplemented()

        if ret_alm and alms is None:
            alms = curvedsky.map2alm(ret, lmax=self.lmax, spin=0)
        return ret if not ret_alm else (ret, alms)
Esempio n. 16
0
    def get_maps(self, rot_angle1, rot_angle2, compts=None, use_sht=True, ret_alm=True, transfer=None,
                 load_processed=False, save_processed=False, flux_cut=None):
        if compts is None: compts = self.compts
        shape, wcs = self.geometry
        nshape = (len(compts),) + shape[-2:]
        ret = enmap.zeros(nshape, wcs)

        if load_processed and not ret_alm:
            for i, compt_idx in enumerate(compts):
                input_file = self.get_fits_path(self.processed_dir, rot_angle1, rot_angle2, compt_idx)
                print("loading", input_file)
                temp = enmap.read_map(input_file)
                ret[i, ...] = enmap.extract(temp, shape, wcs).copy()
                del temp
            return ret
        else:
            for i, compt_idx in enumerate(compts):
                if "pts" not in compt_idx:
                    input_file = self.get_fits_path(self.input_dir, rot_angle1, rot_angle2, compt_idx)
                    print("loading", input_file)

                    alm = np.complex128(hp.read_alm(input_file, hdu=(1)))
                    ret[i, ...] = curvedsky.alm2map(alm, enmap.zeros(nshape[1:], wcs))
                else:
                    input_file = self.get_fits_path(self.input_dir, rot_angle1, rot_angle2, compt_idx,
                                                    fits_type="enmap")
                    print("loading", input_file)
                    temp = enmap.read_map(input_file)
                    ret[i, ...] = enmap.extract(temp, shape, wcs).copy()
                    del temp
        alms = None
        if transfer is not None:
            l, f = transfer
            interp_func = scipy.interpolate.interp1d(l, f, bounds_error=False, fill_value=0.)
            if use_sht:
                l_intp = np.arange(self.lmax + 1)
                f_int = interp_func(l_intp)
                alms = curvedsky.map2alm(ret, lmax=self.lmax, spin=0)
                for i in range(len(compts)):
                    alms[i] = hp.almxfl(alms[i], f_int)
                ret = curvedsky.alm2map(alms, ret, spin=0)
            else:
                ftmap = enmap.fft(ret)
                f_int = interp_func(enmap.modlmap(shape, wcs).ravel())
                ftmap = ftmap * np.reshape(f_int, (shape[-2:]))
                ret = enmap.ifft(ftmap).real;
                del ftmap

        if save_processed:
            raise NotImplemented()

        if flux_cut is not None:
            flux_map = flux_cut / enmap.pixsizemap(shape, wcs)
            flux_map *= 1e-3 * jysr2thermo(148)
            for i, compt_idx in enumerate(compts):
                if "pts" not in compt_idx: continue
                loc = np.where(ret[i] > flux_map)
                ret[i][loc] = 0.
            del flux_map

        if ret_alm and alms is None:
            alms = curvedsky.map2alm(ret, lmax=self.lmax, spin=0)
        return ret if not ret_alm else (ret, alms)
Esempio n. 17
0
def integrate(shape,
              wcs,
              feed_dict,
              expr,
              xmask=None,
              ymask=None,
              cache=True,
              validate=True,
              groups=None,
              pixel_units=False):
    """
    Integrate an arbitrary expression after factorizing it.

    Parameters
    ----------

    shape : tuple
        The shape of the array for the geometry of the footprint. Typically 
        (...,Ny,Nx) for Ny pixels in the y-direction and Nx in the x-direction.
    wcs : :obj:`astropy.wcs.wcs.WCS`
        The wcs object completing the specification of the geometry of the footprint.
    feed_dict: dict
        Mapping from names of custom symbols to numpy arrays.
    expr: :obj:`sympy.core.symbol.Symbol` 
        A sympy expression containing recognized symbols (see docs)
    xmask: (Ny,Nx) ndarray,optional
        Fourier space 2D mask for the l1 part of the integral. Defaults to ones.
    ymask:  (Ny,Nx) ndarray, optional
        Fourier space 2D mask for the l2 part of the integral. Defaults to ones.
    cache: boolean, optional
        Whether to store in memory and reuse repeated terms. Defaults to true.
    validate: boolean,optional
        Whether to check that the final expression and the original agree. Defaults to True.
    groups: list,optional 
        Group all terms such that they have common factors of the provided list of 
        expressions to reduce the number of FFTs.
    pixel_units: boolean,optional
        Whether the input is in pixel units or not.

    Returns
    -------

    result : (Ny,Nx) ndarray
        The numerical result of the integration of the expression after factorization.

    """
    # Geometry
    modlmap = enmap.modlmap(shape, wcs)
    lymap, lxmap = enmap.lmap(shape, wcs)
    pixarea = np.prod(enmap.pixshape(shape, wcs))
    feed_dict['L'] = modlmap
    feed_dict['Ly'] = lymap
    feed_dict['Lx'] = lxmap
    shape = shape[-2:]
    ones = np.ones(shape, dtype=np.float32)
    val = 0.
    if xmask is None: xmask = ones
    if ymask is None: ymask = ones

    # Expression
    syms = expr.free_symbols
    l1funcs = []
    l2funcs = []
    for sym in syms:
        strsym = str(sym)
        if strsym[-3:] == "_l1": l1funcs.append(sym)
        elif strsym[-3:] == "_l2": l2funcs.append(sym)

    integrands,ul1s,ul2s, \
        ogroups,ogroup_weights, \
        ogroup_symbols = factorize_2d_convolution_integral(expr,l1funcs=l1funcs,l2funcs=l2funcs,
                                                                         validate=validate,groups=groups)

    def _fft(x):
        return fft(x + 0j)

    def _ifft(x):
        return ifft(x + 0j)

    if cache:
        cached_u1s = []
        cached_u2s = []
        for u1 in ul1s:
            l12d = evaluate(u1, feed_dict) * ones
            cached_u1s.append(_ifft(l12d * xmask))
        for u2 in ul2s:
            l22d = evaluate(u2, feed_dict) * ones
            cached_u2s.append(_ifft(l22d * ymask))

    # For each term, the index of which group it belongs to

    def get_l1l2(term):
        if cache:
            ifft1 = cached_u1s[term['l1index']]
            ifft2 = cached_u2s[term['l2index']]
        else:
            l12d = evaluate(term['l1'], feed_dict) * ones
            ifft1 = _ifft(l12d * xmask)
            l22d = evaluate(term['l2'], feed_dict) * ones
            ifft2 = _ifft(l22d * ymask)
        return ifft1, ifft2

    if ogroups is None:
        for i, term in enumerate(integrands):
            ifft1, ifft2 = get_l1l2(term)
            ot2d = evaluate(term['other'], feed_dict) * ones
            ffft = _fft(ifft1 * ifft2)
            val += ot2d * ffft
    else:
        vals = np.zeros((len(ogroup_symbols), ) + shape, dtype=np.float32) + 0j
        for i, term in enumerate(integrands):
            ifft1, ifft2 = get_l1l2(term)
            gindex = ogroups[i]
            vals[gindex, ...] += ifft1 * ifft2 * ogroup_weights[i]
        for i, group in enumerate(ogroup_symbols):
            ot2d = evaluate(ogroup_symbols[i], feed_dict) * ones
            ffft = _fft(vals[i, ...])
            val += ot2d * ffft

    mul = 1 if pixel_units else 1. / pixarea
    return val * mul
Esempio n. 18
0
def calculate_yy(bin_edges,arrays,region,version,cov_versions,beam_version,
                 effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False,
                 pa1_shift = None,
                 pa2_shift = None,
                 pa3_150_shift = None,
                 pa3_090_shift = None,
                 no_act_color_correction=False, ccor_exp = -1,
                 sim_splits=None,unblind=False,all_analytic=False,beta_samples=None):


    """
    
    We calculate the yy power spectrum as follows.
    We restrict the Fourier modes in our analysis to those within bin_edges.
    This way we don't carry irrelevant pixels and thus speed up the ability to MC.
    We accept two covariance versions in cov_versions, which correspond to 
    [act_covariance_from_split_0,act_covariance_from_split_1,other_covs].
    Thus the ACT auto covariances are pre-calculated

    """
    arrays = arrays.split(',')
    narrays = len(arrays)
    if sim_splits is not None: assert not(unblind)
    def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.")
    aspecs = tutils.ASpecs().get_specs
    bandpasses = not(effective_freq)
    savedir = tutils.get_save_path(version,region)
    assert len(cov_versions)==3
    covdirs = [tutils.get_save_path(cov_versions[i],region) for i in range(3)]
    for covdir in covdirs: assert os.path.exists(covdir)
    if not(overwrite):
        assert not(os.path.exists(savedir)), \
       "This version already exists on disk. Please use a different version identifier."
    try: os.makedirs(savedir)
    except:
        if overwrite: pass
        else: raise


    mask = enmap.read_map(covdir+"tilec_mask.fits")


    from scipy.ndimage.filters import gaussian_filter as smooth
    pm = enmap.read_map("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_Mask_Lensing_2048_R2.00_car_deep56_interp_order0.fits")
    wcs = pm.wcs
    mask = enmap.enmap(smooth(pm,sigma=10),wcs) * mask


    shape,wcs = mask.shape,mask.wcs
    Ny,Nx = shape
    modlmap = enmap.modlmap(shape,wcs)
    omodlmap = modlmap.copy()
    ells = np.arange(0,modlmap.max())
    minell = maps.minimum_ell(shape,wcs)
    sel = np.where(np.logical_and(modlmap>=bin_edges[0]-minell,modlmap<=bin_edges[-1]+minell))
    modlmap = modlmap[sel]

    bps = []
    lbeams = []
    kbeams = []
    shifts = []
    cfreqs = []
    lmins = []
    lmaxs = []
    names = []
    for i,qid in enumerate(arrays):
        dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)
        if dm.name=='act_mr3':
            season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq')
            array = '_'.join([array1,array2])
        elif dm.name=='planck_hybrid':
            season,patch,array = None,None,sints.arrays(qid,'freq')
        else:
            raise ValueError
        lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid)
        lmins.append(lmin)
        lmaxs.append(lmax)
        names.append(qid)
        cfreqs.append(cfreq)
        if bandpasses:
            try: 
                fname = dm.get_bandpass_file_name(array) 
                bps.append("data/"+fname)
                if (pa1_shift is not None) and 'PA1' in fname:
                    shifts.append(pa1_shift)
                elif (pa2_shift is not None) and 'PA2' in fname:
                    shifts.append(pa2_shift)
                elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname):
                    shifts.append(pa3_150_shift)
                elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname):
                    shifts.append(pa3_90_shift)
                else:
                    shifts.append(0)

            except:
                warn()
                bps.append(None)
        else:
            try: bps.append(cfreq)
            except:
                warn()
                bps.append(None)

        kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True)
        if dm.name=='act_mr3':
            lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck
        elif dm.name=='planck_hybrid':
            lbeam = None
        else:
            raise ValueError
        lbeams.append(lbeam)
        kbeams.append(kbeam.copy())
    # Make responses
    responses = {}

    def _get_response(comp,param_override=None):
        if bandpasses:
            if no_act_color_correction:
                r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                           param_dict_override=param_override)
            else:
                r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                           ccor_cen_nus=cfreqs, ccor_beams=lbeams, 
                                           ccor_exps = [ccor_exp] * narrays,
                                           param_dict_override=param_override)
        else:
            r = tfg.get_mix(bps, comp,param_dict_override=param_override)
        return r

    for comp in ['tSZ','CMB','CIB']:
        responses[comp] = _get_response(comp,None)


    
    from tilec.utils import is_planck
    ilcgens = []
    okcoadds = []
    for splitnum in range(2):
        covdir = covdirs[splitnum]
        kcoadds = []
        for i,qid in enumerate(arrays):
            lmin = lmins[i]
            lmax = lmaxs[i]

            if is_planck(qid):
                dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)

                _,kcoadd,_ = kspace.process(dm,region,qid,mask,
                                            skip_splits=True,
                                            splits_fname=sim_splits[i] if sim_splits is not None else None,
                                            inpaint=False,fn_beam = None,
                                            plot_inpaint_path = None,
                                            split_set=splitnum)
            else:
                kcoadd_name = covdir + "kcoadd_%s.npy" % qid
                kcoadd = enmap.enmap(np.load(kcoadd_name),wcs)

            kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
            dtype = kcoadd.dtype
            kcoadds.append((kcoadd.copy()*kmask)[sel])

        kcoadds = enmap.enmap(np.stack(kcoadds),wcs)
        okcoadds.append(kcoadds.copy())


        # Read Covmat
        ctheory = ilc.CTheory(modlmap)
        nells = kcoadds[0].size
        cov = np.zeros((narrays,narrays,nells))
        for aindex1 in range(narrays):
            for aindex2 in range(aindex1,narrays):
                qid1 = names[aindex1]
                qid2 = names[aindex2]
                if is_planck(names[aindex1]) or is_planck(names[aindex2]) or all_analytic:
                    lmin,lmax,hybrid,radial,friend,f1,fgroup,wrfit = aspecs(qid1)
                    lmin,lmax,hybrid,radial,friend,f2,fgroup,wrfit = aspecs(qid2)
                    # If both are Planck and same array, get white noise from last bin
                    icov = ctheory.get_theory_cls(f1,f2,a_cmb=1,a_gal=0.8)*kbeams[aindex1]*kbeams[aindex2]
                    if aindex1==aindex2:
                        pcov = enmap.enmap(np.load(covdirs[2]+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs)
                        pbin_edges = np.append(np.arange(500,3000,200) ,[3000,4000,5000,5800])
                        pbinner = stats.bin2D(omodlmap,pbin_edges)
                        w = pbinner.bin(pcov)[1][-1]
                        icov = icov + w
                else:
                    icov = np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2]))[sel]
                if aindex1==aindex2: 
                    icov[modlmap<lmins[aindex1]] = maxval
                    icov[modlmap>lmaxs[aindex1]] = maxval
                cov[aindex1,aindex2] = icov
                cov[aindex2,aindex1] = icov

        assert np.all(np.isfinite(cov))

        ilcgen = ilc.HILC(modlmap,np.stack(kbeams),cov=cov,responses=responses,invert=True)
        ilcgens.append(ilcgen)
      

    solutions = ['tSZ','tSZ-CMB','tSZ-CIB']
    ypowers = {}
    w2 = np.mean(mask**2.)
    binner = stats.bin2D(modlmap,bin_edges)
    np.random.seed(100)
    blinding = np.random.uniform(0.8,1.2) if not(unblind) else 1


    def _get_ypow(sname,dname,dresponse=None,dcmb=False):

        if dresponse is not None:
            assert dname is not None
            for splitnum in range(2):
                ilcgens[splitnum].add_response(dname,dresponse)

        ykmaps = []
        for splitnum in range(2):
            if dcmb:
                assert dname is not None
                ykmap = ilcgens[splitnum].multi_constrained_map(okcoadds[splitnum],sname,[dname,"CMB"])
            else:
                if dname is None:
                    ykmap = ilcgens[splitnum].standard_map(okcoadds[splitnum],sname)
                else:
                    ykmap = ilcgens[splitnum].constrained_map(okcoadds[splitnum],sname,dname)
            ykmaps.append(ykmap.copy())

        ypower = (ykmaps[0]*ykmaps[1].conj()).real / w2
        return binner.bin(ypower)[1] * blinding


    # The usual solutions
    for solution in solutions:

        sols = solution.split('-')
        if len(sols)==2:
            sname = sols[0]
            dname = sols[1]
        elif len(sols)==1:
            sname = sols[0]
            dname = None
        else:
            raise ValueError

        ypowers[solution] = _get_ypow(sname,dname,dresponse=None)


    # The CIB SED samples
    if beta_samples is not None:
        y_bsamples = []
        y_bsamples_cmb = []
        for beta in beta_samples:
            pdict = tfg.default_dict.copy()
            pdict['beta_CIB'] = beta
            response = _get_response("CIB",param_override=pdict)
            y_bsamples.append(  _get_ypow("tSZ","iCIB",dresponse=response,dcmb=False) )
            y_bsamples_cmb.append(  _get_ypow("tSZ","iCIB",dresponse=response,dcmb=True) )
    else:
        y_bsamples = None
        y_bsamples_cmb = None


    return binner.centers,ypowers,y_bsamples,y_bsamples_cmb
Esempio n. 19
0
def build_and_save_ilc(arrays,region,version,cov_version,beam_version,
                       solutions,beams,chunk_size,
                       effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False,
                       pa1_shift = None,
                       pa2_shift = None,
                       pa3_150_shift = None,
                       pa3_090_shift = None,
                       no_act_color_correction=False, ccor_exp = -1, 
                       isotropize=False, isotropize_width=20):

    print("Chunk size is ", chunk_size*64./8./1024./1024./1024., " GB.")
    def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.")
    aspecs = tutils.ASpecs().get_specs
    bandpasses = not(effective_freq)
    savedir = tutils.get_save_path(version,region)
    covdir = tutils.get_save_path(cov_version,region)
    assert os.path.exists(covdir)
    if not(overwrite):
        assert not(os.path.exists(savedir)), \
       "This version already exists on disk. Please use a different version identifier."
    try: os.makedirs(savedir)
    except:
        if overwrite: pass
        else: raise


    mask = enmap.read_map(covdir+"tilec_mask.fits")
    shape,wcs = mask.shape,mask.wcs
    Ny,Nx = shape
    modlmap = enmap.modlmap(shape,wcs)



    arrays = arrays.split(',')
    narrays = len(arrays)
    kcoadds = []
    kbeams = []
    bps = []
    names = []
    lmins = []
    lmaxs = []
    shifts = []
    cfreqs = []
    lbeams = []
    ells = np.arange(0,modlmap.max())
    for i,qid in enumerate(arrays):
        dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)
        lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid)
        cfreqs.append(cfreq)
        lmins.append(lmin)
        lmaxs.append(lmax)
        names.append(qid)
        if dm.name=='act_mr3':
            season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq')
            array = '_'.join([array1,array2])
        elif dm.name=='planck_hybrid':
            season,patch,array = None,None,sints.arrays(qid,'freq')
        else:
            raise ValueError
        kcoadd_name = covdir + "kcoadd_%s.npy" % qid
        kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
        kcoadd = enmap.enmap(np.load(kcoadd_name),wcs)
        dtype = kcoadd.dtype
        kcoadds.append(kcoadd.copy()*kmask)
        kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True)
        if dm.name=='act_mr3':
            lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck
        elif dm.name=='planck_hybrid':
            lbeam = None
        else:
            raise ValueError
        lbeams.append(lbeam)
        kbeams.append(kbeam.copy())
        if bandpasses:
            try: 
                fname = dm.get_bandpass_file_name(array) 
                bps.append("data/"+fname)
                if (pa1_shift is not None) and 'PA1' in fname:
                    shifts.append(pa1_shift)
                elif (pa2_shift is not None) and 'PA2' in fname:
                    shifts.append(pa2_shift)
                elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname):
                    shifts.append(pa3_150_shift)
                elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname):
                    shifts.append(pa3_90_shift)
                else:
                    shifts.append(0)

            except:
                warn()
                bps.append(None)
        else:
            try: bps.append(cfreq)
            except:
                warn()
                bps.append(None)

    kcoadds = enmap.enmap(np.stack(kcoadds),wcs)



    # Read Covmat
    cov = maps.SymMat(narrays,shape[-2:])
    for aindex1 in range(narrays):
        for aindex2 in range(aindex1,narrays):
            icov = enmap.enmap(np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs)
            if isotropize:
                bin_edges = np.append([0.],np.arange(min(lmins),modlmap.max(),isotropize_width))
                binner = stats.bin2D(modlmap,bin_edges)
                ls,c1d = binner.bin(icov)
                icov = maps.interp(ls,c1d)(modlmap)
                
            if aindex1==aindex2: 
                icov[modlmap<lmins[aindex1]] = maxval
                icov[modlmap>lmaxs[aindex1]] = maxval
            cov[aindex1,aindex2] = icov
    cov.data = enmap.enmap(cov.data,wcs,copy=False)
    covfunc = lambda sel: cov.to_array(sel,flatten=True)

    assert cov.data.shape[0]==((narrays*(narrays+1))/2) # FIXME: generalize
    assert np.all(np.isfinite(cov.data))

    # Make responses
    responses = {}
    for comp in ['tSZ','CMB','CIB']:
        if bandpasses:
            if no_act_color_correction:
                responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts)
            else:
                responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                                         ccor_cen_nus=cfreqs, ccor_beams=lbeams, 
                                                         ccor_exps = [ccor_exp] * narrays)
        else:
            responses[comp] = tfg.get_mix(bps, comp)

    ilcgen = ilc.chunked_ilc(modlmap,np.stack(kbeams),covfunc,chunk_size,responses=responses,invert=True)

    # Initialize containers
    solutions = solutions.split(',')
    data = {}
    kcoadds = kcoadds.reshape((narrays,Ny*Nx))
    for solution in solutions:
        data[solution] = {}
        comps = solution.split('-')
        data[solution]['comps'] = comps
        if len(comps)<=2: 
            data[solution]['noise'] = enmap.zeros((Ny*Nx),wcs)
        if len(comps)==2: 
            data[solution]['cnoise'] = enmap.zeros((Ny*Nx),wcs)
        data[solution]['kmap'] = enmap.zeros((Ny*Nx),wcs,dtype=dtype) # FIXME: reduce dtype?
        if do_weights and len(comps)<=2:
            for qid in arrays:
                data[solution]['weight_%s' % qid] = enmap.zeros((Ny*Nx),wcs)
            

    for chunknum,(hilc,selchunk) in enumerate(ilcgen):
        print("ILC on chunk ", chunknum+1, " / ",int(modlmap.size/chunk_size)+1," ...")
        for solution in solutions:
            comps = data[solution]['comps']
            if len(comps)==1: # GENERALIZE
                data[solution]['noise'][selchunk] = hilc.standard_noise(comps[0])
                if do_weights: weight = hilc.standard_weight(comps[0])
                data[solution]['kmap'][selchunk] = hilc.standard_map(kcoadds[...,selchunk],comps[0])
            elif len(comps)==2:
                data[solution]['noise'][selchunk] = hilc.constrained_noise(comps[0],comps[1])
                data[solution]['cnoise'][selchunk] = hilc.cross_noise(comps[0],comps[1])
                ret = hilc.constrained_map(kcoadds[...,selchunk],comps[0],comps[1],return_weight=do_weights)
                if do_weights:
                    data[solution]['kmap'][selchunk],weight = ret
                else:
                    data[solution]['kmap'][selchunk] = ret

            elif len(comps)>2:
                data[solution]['kmap'][selchunk] = np.nan_to_num(hilc.multi_constrained_map(kcoadds[...,selchunk],comps[0],*comps[1:]))

            if len(comps)<=2 and do_weights:
                for qind,qid in enumerate(arrays):
                    data[solution]['weight_%s' % qid][selchunk] = weight[qind]


    del ilcgen,cov

    # Reshape into maps
    name_map = {'CMB':'cmb','tSZ':'comptony','CIB':'cib'}
    beams = beams.split(',')
    for solution,beam in zip(solutions,beams):
        comps = "tilec_single_tile_"+region+"_"
        comps = comps + name_map[data[solution]['comps'][0]]+"_"
        if len(data[solution]['comps'])>1: comps = comps + "deprojects_"+ '_'.join([name_map[x] for x in data[solution]['comps'][1:]]) + "_"
        comps = comps + version

        if do_weights and len(data[solution]['comps'])<=2:
            for qind,qid in enumerate(arrays):
                enmap.write_map("%s/%s_%s_weight.fits" % (savedir,comps,qid), enmap.enmap(data[solution]['weight_%s' % qid].reshape((Ny,Nx)),wcs))
            


        try:
            noise = enmap.enmap(data[solution]['noise'].reshape((Ny,Nx)),wcs)
            enmap.write_map("%s/%s_noise.fits" % (savedir,comps),noise)
        except: pass
        try:
            cnoise = enmap.enmap(data[solution]['cnoise'].reshape((Ny,Nx)),wcs)
            enmap.write_map("%s/%s_cross_noise.fits" % (savedir,comps),cnoise)
        except: pass

        ells = np.arange(0,modlmap.max(),1)
        try:
            fbeam = float(beam)
            kbeam = maps.gauss_beam(modlmap,fbeam)
            lbeam = maps.gauss_beam(ells,fbeam)
        except:
            qid = beam
            bfunc = lambda x: tutils.get_kbeam(qid,x,version=beam_version,sanitize=not(unsanitized_beam),planck_pixwin=False)
            kbeam = bfunc(modlmap)
            lbeam = bfunc(ells)

        kmap = enmap.enmap(data[solution]['kmap'].reshape((Ny,Nx)),wcs)
        smap = enmap.ifft(kbeam*kmap,normalize='phys').real
        enmap.write_map("%s/%s.fits" % (savedir,comps),smap)
        io.save_cols("%s/%s_beam.txt" % (savedir,comps),(ells,lbeam),header="ell beam")


    enmap.write_map(savedir+"/tilec_mask.fits",mask)
Esempio n. 20
0
def noise_average(n2d,dfact=(16,16),lmin=300,lmax=8000,wnoise_annulus=500,bin_annulus=20,
                  lknee_guess=3000,alpha_guess=-4,nparams=None,modlmap=None,
                  verbose=False,method="fft",radial_fit=True,
                  oshape=None,upsample=True,fill_lmax=None,fill_lmax_width=100):
    """Find the empirical mean noise binned in blocks of dfact[0] x dfact[1] . Preserves noise anisotropy.
    Most arguments are for the radial fitting part.
    A radial fit is divided out before downsampling (by default by FFT) and then multplied back with the radial fit.
    Watch for ringing in the final output.
    n2d noise power
    """
    assert np.all(np.isfinite(n2d))
    shape,wcs = n2d.shape,n2d.wcs
    minell = maps.minimum_ell(shape,wcs)
    if modlmap is None: modlmap = enmap.modlmap(shape,wcs)
    Ny,Nx = shape[-2:]
    if radial_fit:
        if nparams is None:
            if verbose: print("Radial fitting...")
            nparams = fit_noise_1d(n2d,lmin=lmin,lmax=lmax,wnoise_annulus=wnoise_annulus,
                                bin_annulus=bin_annulus,lknee_guess=lknee_guess,alpha_guess=alpha_guess)
        wfit,lfit,afit = nparams
        nfitted = rednoise(modlmap,wfit,lfit,afit)
    else:
        nparams = None
        nfitted = 1.
    nflat = enmap.enmap(np.nan_to_num(n2d/nfitted),wcs) # flattened 2d noise power
    if fill_lmax is not None:
        fill_avg = nflat[np.logical_and(modlmap>(fill_lmax-fill_lmax_width),modlmap<=fill_lmax)].mean()
        nflat[modlmap>fill_lmax] = fill_avg
    if oshape is None: oshape = (Ny//dfact[0],Nx//dfact[1])
    if verbose: print("Resampling...")
    nint = enmap.resample(enmap.enmap(nflat,wcs), oshape, method=method)
    if not(upsample):
        if radial_fit:
            nshape,nwcs = nint.shape,nint.wcs
            modlmap = enmap.modlmap(nshape,nwcs)
            nfitted = rednoise(modlmap,wfit,lfit,afit)
        ndown = nint
    else:
        ndown = enmap.enmap(enmap.resample(nint,shape,method=method),wcs)
    outcov = ndown*nfitted
    outcov[modlmap<minell] = 0 #np.inf
    if fill_lmax is not None: outcov[modlmap>fill_lmax] = 0
    # res,_ = curve_fit(ntemplatefunc,cents,dn1d,p0=[lknee_guess,alpha_guess],bounds=([lknee_min,alpha_min],[lknee_max,alpha_max]))



    # bad_ells = modlmap[np.isnan(outcov)]
    # for ell in bad_ells:
    #     print(ell)
    # print(maps.minimum_ell(shape,wcs))
    # from orphics import io
    # # io.hplot(enmap.enmap(np.fft.fftshift(np.log(n2d)),wcs),"fitnoise_npower.png")
    # # io.hplot(enmap.enmap(np.fft.fftshift(np.log(outcov)),wcs),"fitnoise_ndown.png")
    # io.plot_img(enmap.enmap(np.fft.fftshift(np.log(n2d)),wcs),"fitnoise_npower_lowres.png",aspect='auto')#,lim=[-20,-16])
    # io.plot_img(enmap.enmap(np.fft.fftshift(np.log(ndown)),wcs),"fitnoise_ndown_lowres.png",aspect='auto')#,lim=[-20,-16])
    # io.plot_img(enmap.enmap(np.fft.fftshift(np.log(outcov)),wcs),"fitnoise_outcov_lowres.png",aspect='auto')#,lim=[-20,-16])

    # import time
    # t = time.time()
    # fbin_edges = np.arange(lmin,lmax,bin_annulus)
    # fbinner = stats.bin2D(modlmap,fbin_edges)
    # cents, n1d = fbinner.bin(n2d)
    # cents,dn1d = fbinner.bin(outcov)
    # pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='D',scalefn=lambda x: x**2./2./np.pi)
    # pl.add(cents,n1d)
    # pl.add(cents,dn1d,ls="--")
    # pl.done(os.environ['WORK']+"/fitnoise2_%s.png" % t)
    # sys.exit()

    assert not(np.any(np.isnan(outcov)))
    return outcov,nfitted,nparams
Esempio n. 21
0
def test_pol():
    from orphics import lensing, io, cosmology, maps

    est = "hu_ok"
    pols = ['TT', 'EE', 'TE', 'EB', 'TB']
    # est = "hdv"
    # pols = ['TT','EE','TE','ET','EB','TB']

    deg = 5.
    px = 2.0
    tellmin = 30
    tellmax = 3000
    pellmin = 30
    pellmax = 5000
    kellmin = 10
    kellmax = 5000
    bin_width = 40

    beam_arcmin = 1.5
    noise_uk_arcmin = 10.0

    theory = cosmology.default_theory(lpad=30000)
    shape, wcs = s.rect_geometry(width_deg=deg, px_res_arcmin=px)
    modlmap = enmap.modlmap(shape, wcs)
    kbeam = s.gauss_beam(modlmap, beam_arcmin)
    n2d = (noise_uk_arcmin * np.pi / 180. / 60.)**2. / kbeam**2.
    tmask = s.mask_kspace(shape, wcs, lmin=tellmin, lmax=tellmax)
    pmask = s.mask_kspace(shape, wcs, lmin=pellmin, lmax=pellmax)
    kmask = s.mask_kspace(shape, wcs, lmin=kellmin, lmax=kellmax)
    bin_edges = np.arange(kellmin, kellmax, bin_width)
    binner = s.bin2D(modlmap, bin_edges)

    feed_dict = {}
    cltt = theory.lCl('TT', modlmap)
    clee = theory.lCl('EE', modlmap)
    clbb = theory.lCl('BB', modlmap)
    clte = theory.lCl('TE', modlmap)
    feed_dict['uC_T_T'] = cltt
    feed_dict['tC_T_T'] = (cltt + n2d)
    feed_dict['uC_E_E'] = clee
    feed_dict['tC_E_E'] = (clee + n2d * 2.)
    feed_dict['uC_B_B'] = clbb
    feed_dict['tC_B_B'] = (clbb + n2d * 2.)
    feed_dict['uC_T_E'] = clte
    feed_dict['tC_T_E'] = clte

    ells = np.arange(0, 10000, 1)
    pl = io.Plotter(xyscale='loglog')
    pl.add(ells, theory.gCl('kk', ells))
    imask = {'T': tmask, 'E': pmask, 'B': pmask}
    for pol in pols:
        print(pol)
        X, Y = pol
        cents, Nl = binner.bin(
            s.N_l(shape,
                  wcs,
                  feed_dict,
                  est,
                  pol,
                  xmask=imask[X],
                  ymask=imask[Y]))
        pl.add(cents, Nl, label=pol)
    pl._ax.set_xlim(10, kellmax)
    pl.done("nls.png")
Esempio n. 22
0
 def getmodlmap(self, shape, wcs):
     return enmap.modlmap(shape, wcs)
Esempio n. 23
0
    def auto(self, Lmin, Lmax, delta_L):
        """
        Get cutout reconstructed kappa auto-power or cross-power with input cutout kappa
        """
        # for statistics
        st = stats.Stats()
        # Initialize upper-left pixel corner
        iy, ix = 0, 0

        for itile in range(self.ntiles):
            # Get bottom-right pixel corner
            ey = iy + self.npix
            ex = ix + self.npix

            # Slice both cmb maps
            cut_cmb1 = self.cmb1[iy:ey, ix:ex]
            cut_cmb2 = self.cmb2[iy:ey, ix:ex]

            # Get geometry of the cutouts, I assume cut_cmb1 and cut_cmb2 have same geometry
            cut_shape = cut_cmb1.shape
            cut_wcs = cut_cmb1.wcs
            cut_modlmap = enmap.modlmap(cut_shape, cut_wcs)
            ells = np.arange(0, cut_modlmap.max() + 1, 1)
            ctt = self.theory.lCl('TT', ells)

            # Get taper for appodization
            taper, w2 = maps.get_taper_deg(cut_shape, cut_wcs)

            # Define feed_dict for symlens
            feed_dict = {}
            feed_dict['uC_T_T'] = utils.interp(ells, ctt)(cut_modlmap)
            feed_dict['tC_T_T'] = utils.interp(ells, ctt)(cut_modlmap) + (
                self.nlev_t * np.pi / 180. / 60.)**2. / utils.gauss_beam(
                    cut_modlmap, self.beam_arcmin)**2

            # Get cmb mask
            cmask = utils.mask_kspace(cut_shape,
                                      cut_wcs,
                                      lmin=self.ellmin,
                                      lmax=self.ellmax)
            # Get mask for reconstruction
            kmask = utils.mask_kspace(cut_shape, cut_wcs, lmin=Lmin, lmax=Lmax)
            # Stride across the map, horizontally first and
            # increment vertically when at the end of a row
            if (itile + 1) % self.num_x != 0:
                ix = ix + self.npix
            else:
                ix = 0
                iy = iy + self.npix

            # Apodize cutout CMB maps
            cut_cmb1 = taper * cut_cmb1
            cut_cmb2 = taper * cut_cmb2

            # Get the Fourier maps
            cut_cmb1_k = enmap.fft(cut_cmb1, normalize='phys')
            cut_cmb2_k = enmap.fft(cut_cmb2, normalize='phys')

            # Reconstruct kappa fourier maps
            cut_reckap1, noise_2d = cutout_rec(cut_shape, cut_wcs, feed_dict,
                                               cmask, kmask, cut_cmb1_k,
                                               cut_cmb1_k)
            cut_reckap2, noise_2d = cutout_rec(cut_shape, cut_wcs, feed_dict,
                                               cmask, kmask, cut_cmb2_k,
                                               cut_cmb2_k)

            # Get auto powerspectra
            center_L, cut_reckap1_x_reckap1 = powspec(cut_reckap1, cut_reckap1,
                                                      taper, 4, cut_modlmap,
                                                      Lmin, Lmax, delta_L)
            center_L, cut_reckap2_x_reckap2 = powspec(cut_reckap2, cut_reckap2,
                                                      taper, 4, cut_modlmap,
                                                      Lmin, Lmax, delta_L)

            # Get bias
            bias = (cut_reckap2_x_reckap2 -
                    cut_reckap1_x_reckap1) / cut_reckap1_x_reckap1

            # Add to stats
            st.add_to_stats('reckap1 x reckap1', cut_reckap1_x_reckap1)
            st.add_to_stats('reckap2 x reckap2', cut_reckap2_x_reckap2)
            st.add_to_stats('bias', bias)

        # Get spectra and bias statistics
        st.get_stats()

        return center_L, st
Esempio n. 24
0
if nsims > 0:
    bin_edges = np.arange(40, 8000, 40)
    p1ds = []
    for i in range(nsims):
        print("Sim %d of %d ..." % (i + 1, nsims))
        with bench.show("simgen"):
            sims = ngen.generate_sim(season=season,
                                     patch=patch,
                                     array=array,
                                     seed=i,
                                     mask_patch=mask_patch)
            print(sims.nbytes / 1024. / 1024. / 1024., " GB", sims.shape,
                  sims.dtype)
        if args.extract_mask is not None:
            ivars2 = enmap.extract(ivars, eshape, ewcs)
            modlmap = enmap.modlmap(eshape, ewcs)
        else:
            ivars2 = ivars

        if args.debug and i == 0: noise.plot(pout + "_sims", sims)
        if not (args.no_write):
            ngen.save_sims(i,
                           sims,
                           season,
                           patch,
                           array,
                           coadd=coadd,
                           mask_patch=mask_patch)
        n2d_sim = noise.get_n2d_data(sims,
                                     ivars2,
                                     emask,
Esempio n. 25
0
def test_hdv_huok_planck():
    from orphics import lensing, io, cosmology, maps

    shape, wcs = enmap.geometry(shape=(512, 512),
                                res=2.0 * putils.arcmin,
                                pos=(0, 0))
    modlmap = enmap.modlmap(shape, wcs)
    theory = cosmology.default_theory()
    ells = np.arange(0, 3000, 1)
    ctt = theory.lCl('TT', ells)
    # ps,_ = powspec.read_camb_scalar("tests/Aug6_highAcc_CDM_scalCls.dat")
    # ells = range(ps.shape[-1])

    ## Build HuOk TT estimator
    f = s.Ldl1 * s.e('uC_T_T_l1') + s.Ldl2 * s.e('uC_T_T_l2')
    F = f / 2 / s.e('tC_T_T_l1') / s.e('tC_T_T_l2')
    expr1 = f * F
    feed_dict = {}
    feed_dict['uC_T_T'] = s.interp(ells, ctt)(modlmap)
    feed_dict['tC_T_T'] = s.interp(ells, ctt)(modlmap) + (
        33. * np.pi / 180. / 60.)**2. / s.gauss_beam(modlmap, 7.0)**2.
    tellmin = 10
    tellmax = 3000
    xmask = s.mask_kspace(shape, wcs, lmin=tellmin, lmax=tellmax)
    integral = s.integrate(shape,
                           wcs,
                           feed_dict,
                           expr1,
                           xmask=xmask,
                           ymask=xmask).real
    Nl = modlmap**4. / integral / 4.
    bin_edges = np.arange(10, 3000, 40)
    binner = s.bin2D(modlmap, bin_edges)
    cents, nl1d = binner.bin(Nl)

    ## Build HDV TT estimator
    F = s.Ldl1 * s.e('uC_T_T_l1') / s.e('tC_T_T_l1') / s.e('tC_T_T_l2')
    expr1 = f * F
    integral = s.integrate(shape,
                           wcs,
                           feed_dict,
                           expr1,
                           xmask=xmask,
                           ymask=xmask).real
    Nl = modlmap**4. / integral / 4.

    cents, nl1d2 = binner.bin(Nl)

    cents, nl1d3 = binner.bin(
        s.N_l_cross(shape,
                    wcs,
                    feed_dict,
                    "hu_ok",
                    "TT",
                    "hu_ok",
                    "TT",
                    xmask=xmask,
                    ymask=xmask))
    cents, nl1d4 = binner.bin(
        s.N_l_cross(shape,
                    wcs,
                    feed_dict,
                    "hdv",
                    "TT",
                    "hdv",
                    "TT",
                    xmask=xmask,
                    ymask=xmask))
    cents, nl1d5 = binner.bin(
        s.N_l(shape, wcs, feed_dict, "hu_ok", "TT", xmask=xmask, ymask=xmask))
    cents, nl1d6 = binner.bin(
        s.N_l(shape, wcs, feed_dict, "hdv", "TT", xmask=xmask, ymask=xmask))

    clkk = theory.gCl('kk', ells)
    pl = io.Plotter(xyscale='linlog')
    pl.add(cents, nl1d)
    pl.add(cents, nl1d2)
    # pl.add(cents,nl1d3)
    pl.add(cents, nl1d4)
    pl.add(cents, nl1d5)
    pl.add(cents, nl1d6)
    pl.add(ells, clkk)
    pl.done("plcomp.png")
                                        lmax=lmax_A)
                #Leg1, Leg2, for estimator B
                LoadB = u.LoadfftedMaps(mapsObj=mapsObjB,
                                        WR=WR,
                                        ConvertingObj=C,
                                        changemap=changemap,
                                        getfft=u.fft,
                                        lmax=lmax_B)
                if i == iMin:
                    #Get shape and wcs
                    shape = LoadA.read_shape()
                    lonCenter, latCenter = 0, 0
                    shape, wcs = enmap.geometry(shape=shape,
                                                res=1. * putils.arcmin,
                                                pos=(lonCenter, latCenter))
                    modlmap = enmap.modlmap(shape, wcs)
                    #Binner
                    Binner = u.Binner(shape,
                                      wcs,
                                      lmin=10,
                                      lmax=4000,
                                      deltal=deltal,
                                      log=logmode,
                                      nBins=nlogBins)

                    feed_dict = u.Loadfeed_dict(pathlib.Path(spectra_path),
                                                field_names_A, field_names_B,
                                                modlmap)

                    #NOTE, THIS SHOULD BE OUTSIDE THE IF
                    #BUT IF iMax = iMin+1 , then it should be fine, will make code a bit faster
Esempio n. 27
0
def test_shear():
    from orphics import lensing, io, cosmology, maps

    deg = 20.
    px = 2.0
    tellmin = 30
    tellmax = 3500
    kellmin = 10
    kellmax = 3000
    bin_width = 20
    beam_arcmin = 1.4
    noise_uk_arcmin = 7.0

    theory = cosmology.default_theory(lpad=30000)
    shape, wcs = s.rect_geometry(width_deg=deg, px_res_arcmin=px)
    flsims = lensing.FlatLensingSims(shape, wcs, theory, beam_arcmin,
                                     noise_uk_arcmin)
    kbeam = flsims.kbeam
    modlmap = enmap.modlmap(shape, wcs)
    fc = maps.FourierCalc(shape, wcs)
    n2d = (noise_uk_arcmin * np.pi / 180. / 60.)**2. / flsims.kbeam**2.
    tmask = s.mask_kspace(shape, wcs, lmin=tellmin, lmax=tellmax)
    kmask = s.mask_kspace(shape, wcs, lmin=kellmin, lmax=kellmax)
    bin_edges = np.arange(kellmin, kellmax, bin_width)
    binner = s.bin2D(modlmap, bin_edges)
    i = 0
    unlensed, kappa, lensed, beamed, noise_map, observed = flsims.get_sim(
        seed_cmb=(i, 1),
        seed_kappa=(i, 2),
        seed_noise=(i, 3),
        lens_order=5,
        return_intermediate=True)
    _, kmap, _ = fc.power2d(observed)
    pii2d, kinput, _ = fc.power2d(kappa)

    feed_dict = {}
    cltt = theory.lCl('TT', modlmap)
    feed_dict['uC_T_T'] = theory.lCl('TT', modlmap)
    feed_dict['tC_T_T'] = (cltt + n2d)
    feed_dict['X'] = kmap / kbeam
    feed_dict['Y'] = kmap / kbeam

    ells = np.arange(0, 10000, 1)
    ucltt = theory.lCl('TT', ells)
    feed_dict['duC_T_T'] = s.interp(ells,
                                    np.gradient(np.log(ucltt),
                                                np.log(ells)))(modlmap)
    sAl = s.A_l(shape, wcs, feed_dict, "shear", "TT", xmask=tmask, ymask=tmask)
    sNl = s.N_l(shape,
                wcs,
                feed_dict,
                "shear",
                "TT",
                xmask=tmask,
                ymask=tmask,
                Al=sAl)
    sukappa = s.unnormalized_quadratic_estimator(shape,
                                                 wcs,
                                                 feed_dict,
                                                 "shear",
                                                 "TT",
                                                 xmask=tmask,
                                                 ymask=tmask)
    snkappa = sAl * sukappa

    pir2d3 = fc.f2power(snkappa, kinput)
    cents, pir1d3 = binner.bin(pir2d3)
    cents, pii1d = binner.bin(pii2d)
    cents, prr1d = binner.bin(fc.f2power(snkappa, snkappa))

    cents, Nlkk3 = binner.bin(sNl)

    pl = io.Plotter(xyscale='loglog')
    pl.add(ells, theory.gCl('kk', ells))
    pl.add(cents, pii1d, color='k', lw=3)
    pl.add(cents, pir1d3, label='shear')
    pl.add(cents, prr1d)
    pl.add(cents, Nlkk3, ls=":")
    pl._ax.set_xlim(10, 3500)
    pl.done("ncomp.png")