Beispiel #1
0
    def __init__(self,ells,clgg,clkg=None,clkk=None,depth_map=None,lmax=None):
        """Initialize a catalog generator

        Args:
            ells: (nells,) array specifying multipoles at which clgg,clkg,clkk are defined
            clgg: (nells,) array containing power spectrum of the field you want to Poisson sample from
            clkg: (nells,) array containing cross-spectrum with the optional field you don't want to Poisson sample from (optional)
            clkk: (nells,) array containing auto-spectrum of the field you don't want to Poisson sample from (optional)
            depth_map: ndmap specifying depth. Max of this array will be divided out.
                       
        """
        ls = np.arange(0,ells.max(),1)
        self.lmax = ls.max()
        clgg = maps.interp(ells,clgg)(ls)
        if clkg is not None:
            assert clkk is not None
            ncomp = 2
            clkg = maps.interp(ells,clkg)(ls)
            clkk = maps.interp(ells,clkk)(ls)
        else:
            ncomp = 1
        self.shape = (ncomp,)+depth_map.shape[-2:]
        self.wcs = depth_map.wcs
        ps = np.zeros((ncomp,ncomp,ls.size))
        ps[0,0] = clgg
        if clkg is not None:
            ps[1,1] = clkk
            ps[0,1] = clkg
            ps[1,0] = clkg
        self.depth_map = depth_map/depth_map.max()
        assert np.all(self.depth_map>=0)
        self.ps = ps
        self.ncomp = ncomp
Beispiel #2
0
def get_scaled_beams(ells,lbeam,cen_nu_ghz,nus_ghz,ccor_exp=-1):
    """
    Scale a beam specified at multipoles ells with beam transfer
    factors lbeam (normalized to 1 at ell=0) and central frequency
    cen_nu_ghz onto the target frequencies nus_ghz with an exponent
    ccor_exp.

    Parameters
    ----------

    ells : array_like
        A 1d (nells,) array specifying what multipoles correspond to 
        beam transfer factors in lbeam

    lbeam : array_like
        A 1d (nells,) array specifying beam transfer factors normalized
        such that lbeam(ell=0) = 1

    cen_nu_ghz : float
        The "central frequency" in GHz to which lbeam corresponds

    nus_ghz : array_like
        A 1d (nfreqs,) array of frequencies in GHz on to which the 
        beam lbeam should be scaled to

    ccor_exp : float, optional
        The exponent of the beam scaling. Defaults to -1, corresponding
        to diffraction limited optics.

    """
    from orphics import maps
    fbnus = maps.interp(ells,lbeam[None,:],fill_value=(lbeam[0],lbeam[-1]))
    bnus = fbnus(((cen_nu_ghz/nus_ghz)**(-ccor_exp))*ells[:,None])[0].swapaxes(0,1)
    bnus = bnus / bnus[:,:1]
    return bnus
Beispiel #3
0
def signal_average(cov,bin_edges=None,bin_width=40,kind=3,lmin=None,dlspace=True,return_bins=False,**kwargs):
    """
    dcov = cov * ellfact
    bin dcov in annuli
    interpolate back on to ell
    cov = dcov / ellfact
    where ellfact = ell**2 if dlspace else 1
    """
    modlmap = cov.modlmap()
    assert np.all(np.isfinite(cov))

    dcov = cov*modlmap**2. if dlspace else cov.copy()
    if lmin is None:
        minell = maps.minimum_ell(dcov.shape,dcov.wcs)
    else:
        minell = modlmap[modlmap<=lmin].max()

    if bin_edges is None: bin_edges = np.append([2],np.arange(minell,modlmap.max(),bin_width))

    binner = stats.bin2D(modlmap,bin_edges)
    cents,c1d = binner.bin(dcov)

    outcov = enmap.enmap(maps.interp(cents,c1d,kind=kind,fill_value=c1d[-1],**kwargs)(modlmap),dcov.wcs)
    with np.errstate(invalid='ignore'): outcov = outcov / modlmap**2. if dlspace else outcov
    outcov[modlmap<2] = 0
    assert np.all(np.isfinite(outcov))

    if return_bins: return cents,c1d,outcov
    else: return outcov 
Beispiel #4
0
def get_kbeam(qid,
              modlmap,
              sanitize=False,
              version=None,
              planck_pixwin=False,
              **kwargs):
    dmodel = sints.arrays(qid, 'data_model')
    season = sints.arrays(qid, 'season')
    region = sints.arrays(qid, 'region')
    array = sints.arrays(qid, 'array')
    freq = sints.arrays(qid, 'freq')
    dm = sints.models[dmodel]()
    gfreq = array + "_" + freq if not (is_planck(qid)) else freq
    if planck_pixwin and (qid in [
            'p01', 'p02', 'p03', 'p04', 'p05', 'p06', 'p07', 'p08'
    ]):
        nside = get_nside(qid)
        pixwin = hp.pixwin(nside=nside, pol=False)
        ls = np.arange(len(pixwin))
        assert pixwin.ndim == 1
        assert ls.size in [6144, 3072]
        pwin = maps.interp(ls, pixwin)(modlmap)
    else:
        pwin = 1.
    return dm.get_beam(modlmap,
                       season=season,
                       patch=region,
                       array=gfreq,
                       kind='normalized',
                       sanitize=sanitize,
                       version=version,
                       **kwargs) * pwin
Beispiel #5
0
def get_power(map_list, ivar_list, a, b, mask, N=20):
    """
	Calculate the average coadded flattened power spectrum P_{ab} used to generate simulation for the splits.
	Inputs:
	map_list: list of source free splits
	ivar_list: list of the inverse variance maps splits
	a: 0,1,2 for I,Q,U respectively
	b:0,1,2 for I,Q,U, respectively
	N: window to smooth the power spectrum by in the rolling average.
	mask: apodizing mask

	Output:
	1D power spectrum accounted for w2 from 0 to 10000
	"""
    pmap = enmap.pixsizemap(map_list[0].shape, map_list[0].wcs)

    cl_ab = []
    n = len(map_list)
    #calculate the coadd maps
    if a != b:
        coadd_a = coadd_mapnew(map_list, ivar_list, a)
        coadd_b = coadd_mapnew(map_list, ivar_list, b)
    else:
        coadd_a = coadd_mapnew(map_list, ivar_list, a)

    for i in range(n):
        print(i)
        if a != b:
            d_a = map_list[i][a] - coadd_a
            noise_a = d_a * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask
            alm_a = cs.map2alm(noise_a, lmax=10000)
            d_b = map_list[i][b] - coadd_b
            noise_b = d_b * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask
            alm_b = cs.map2alm(noise_b, lmax=10000)
            cls = hp.alm2cl(alm_a, alm_b)
            cl_ab.append(cls)
        else:
            d_a = map_list[i][a] - coadd_a
            noise_a = d_a * np.sqrt(ivar_eff(i, ivar_list) / pmap) * mask
            print("generating alms")
            alm_a = cs.map2alm(noise_a, lmax=10000)
            cls = hp.alm2cl(alm_a)
            cl_ab.append(cls)
    cl_ab = np.array(cl_ab)
    sqrt_ivar = np.sqrt(ivar_eff(0, ivar_list) / pmap)
    mask_ivar = sqrt_ivar * 0 + 1
    mask_ivar[sqrt_ivar <= 0] = 0
    mask = mask * mask_ivar
    mask[mask <= 0] = 0
    w2 = np.sum((mask**2) * pmap) / np.pi / 4.
    power = 1 / n / (n - 1) * np.sum(cl_ab, axis=0)
    ls = np.arange(len(power))
    power[~np.isfinite(power)] = 0
    power = rolling_average(power, N)
    bins = np.arange(len(power))
    power = maps.interp(bins, power)(ls)
    return power / w2
Beispiel #6
0
def get_theory_dicts(nells=None, lmax=9000, grad=True):
    thloc = os.path.dirname(
        os.path.abspath(__file__)) + "/../data/" + config['theory_root']
    ls = np.arange(lmax + 1)
    ucls = {}
    tcls = {}
    theory = cosmology.loadTheorySpectraFromCAMB(thloc,
                                                 get_dimensionless=False)
    ells, gt, ge, gb, gte = np.loadtxt(f"{thloc}_camb_1.0.12_grads.dat",
                                       unpack=True,
                                       usecols=[0, 1, 2, 3, 4])
    if nells is None: nells = {'TT': 0, 'EE': 0, 'BB': 0}
    ucls['TT'] = maps.interp(ells, gt)(ls) if grad else theory.lCl('TT', ls)
    ucls['TE'] = maps.interp(ells, gte)(ls) if grad else theory.lCl('TE', ls)
    ucls['EE'] = maps.interp(ells, ge)(ls) if grad else theory.lCl('EE', ls)
    ucls['BB'] = maps.interp(ells, gb)(ls) if grad else theory.lCl('BB', ls)
    ucls['kk'] = theory.gCl('kk', ls)
    tcls['TT'] = theory.lCl('TT', ls) + nells['TT']
    tcls['TE'] = theory.lCl('TE', ls)
    tcls['EE'] = theory.lCl('EE', ls) + nells['EE']
    tcls['BB'] = theory.lCl('BB', ls) + nells['BB']
    return ucls, tcls
Beispiel #7
0
def filter_map(imap):
    modlmap = imap.modlmap()
    ells = np.arange(0, 8000, 1)
    fcurve = np.exp(-(ells - 4000)**2. / 2. / 200**2.)
    return maps.filter_map(imap, maps.interp(ells, fcurve)(modlmap))
Beispiel #8
0
def build_and_save_ilc(arrays,region,version,cov_version,beam_version,
                       solutions,beams,chunk_size,
                       effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False,
                       pa1_shift = None,
                       pa2_shift = None,
                       pa3_150_shift = None,
                       pa3_090_shift = None,
                       no_act_color_correction=False, ccor_exp = -1, 
                       isotropize=False, isotropize_width=20):

    print("Chunk size is ", chunk_size*64./8./1024./1024./1024., " GB.")
    def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.")
    aspecs = tutils.ASpecs().get_specs
    bandpasses = not(effective_freq)
    savedir = tutils.get_save_path(version,region)
    covdir = tutils.get_save_path(cov_version,region)
    assert os.path.exists(covdir)
    if not(overwrite):
        assert not(os.path.exists(savedir)), \
       "This version already exists on disk. Please use a different version identifier."
    try: os.makedirs(savedir)
    except:
        if overwrite: pass
        else: raise


    mask = enmap.read_map(covdir+"tilec_mask.fits")
    shape,wcs = mask.shape,mask.wcs
    Ny,Nx = shape
    modlmap = enmap.modlmap(shape,wcs)



    arrays = arrays.split(',')
    narrays = len(arrays)
    kcoadds = []
    kbeams = []
    bps = []
    names = []
    lmins = []
    lmaxs = []
    shifts = []
    cfreqs = []
    lbeams = []
    ells = np.arange(0,modlmap.max())
    for i,qid in enumerate(arrays):
        dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)
        lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid)
        cfreqs.append(cfreq)
        lmins.append(lmin)
        lmaxs.append(lmax)
        names.append(qid)
        if dm.name=='act_mr3':
            season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq')
            array = '_'.join([array1,array2])
        elif dm.name=='planck_hybrid':
            season,patch,array = None,None,sints.arrays(qid,'freq')
        else:
            raise ValueError
        kcoadd_name = covdir + "kcoadd_%s.npy" % qid
        kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
        kcoadd = enmap.enmap(np.load(kcoadd_name),wcs)
        dtype = kcoadd.dtype
        kcoadds.append(kcoadd.copy()*kmask)
        kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True)
        if dm.name=='act_mr3':
            lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck
        elif dm.name=='planck_hybrid':
            lbeam = None
        else:
            raise ValueError
        lbeams.append(lbeam)
        kbeams.append(kbeam.copy())
        if bandpasses:
            try: 
                fname = dm.get_bandpass_file_name(array) 
                bps.append("data/"+fname)
                if (pa1_shift is not None) and 'PA1' in fname:
                    shifts.append(pa1_shift)
                elif (pa2_shift is not None) and 'PA2' in fname:
                    shifts.append(pa2_shift)
                elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname):
                    shifts.append(pa3_150_shift)
                elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname):
                    shifts.append(pa3_90_shift)
                else:
                    shifts.append(0)

            except:
                warn()
                bps.append(None)
        else:
            try: bps.append(cfreq)
            except:
                warn()
                bps.append(None)

    kcoadds = enmap.enmap(np.stack(kcoadds),wcs)



    # Read Covmat
    cov = maps.SymMat(narrays,shape[-2:])
    for aindex1 in range(narrays):
        for aindex2 in range(aindex1,narrays):
            icov = enmap.enmap(np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs)
            if isotropize:
                bin_edges = np.append([0.],np.arange(min(lmins),modlmap.max(),isotropize_width))
                binner = stats.bin2D(modlmap,bin_edges)
                ls,c1d = binner.bin(icov)
                icov = maps.interp(ls,c1d)(modlmap)
                
            if aindex1==aindex2: 
                icov[modlmap<lmins[aindex1]] = maxval
                icov[modlmap>lmaxs[aindex1]] = maxval
            cov[aindex1,aindex2] = icov
    cov.data = enmap.enmap(cov.data,wcs,copy=False)
    covfunc = lambda sel: cov.to_array(sel,flatten=True)

    assert cov.data.shape[0]==((narrays*(narrays+1))/2) # FIXME: generalize
    assert np.all(np.isfinite(cov.data))

    # Make responses
    responses = {}
    for comp in ['tSZ','CMB','CIB']:
        if bandpasses:
            if no_act_color_correction:
                responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts)
            else:
                responses[comp] = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                                         ccor_cen_nus=cfreqs, ccor_beams=lbeams, 
                                                         ccor_exps = [ccor_exp] * narrays)
        else:
            responses[comp] = tfg.get_mix(bps, comp)

    ilcgen = ilc.chunked_ilc(modlmap,np.stack(kbeams),covfunc,chunk_size,responses=responses,invert=True)

    # Initialize containers
    solutions = solutions.split(',')
    data = {}
    kcoadds = kcoadds.reshape((narrays,Ny*Nx))
    for solution in solutions:
        data[solution] = {}
        comps = solution.split('-')
        data[solution]['comps'] = comps
        if len(comps)<=2: 
            data[solution]['noise'] = enmap.zeros((Ny*Nx),wcs)
        if len(comps)==2: 
            data[solution]['cnoise'] = enmap.zeros((Ny*Nx),wcs)
        data[solution]['kmap'] = enmap.zeros((Ny*Nx),wcs,dtype=dtype) # FIXME: reduce dtype?
        if do_weights and len(comps)<=2:
            for qid in arrays:
                data[solution]['weight_%s' % qid] = enmap.zeros((Ny*Nx),wcs)
            

    for chunknum,(hilc,selchunk) in enumerate(ilcgen):
        print("ILC on chunk ", chunknum+1, " / ",int(modlmap.size/chunk_size)+1," ...")
        for solution in solutions:
            comps = data[solution]['comps']
            if len(comps)==1: # GENERALIZE
                data[solution]['noise'][selchunk] = hilc.standard_noise(comps[0])
                if do_weights: weight = hilc.standard_weight(comps[0])
                data[solution]['kmap'][selchunk] = hilc.standard_map(kcoadds[...,selchunk],comps[0])
            elif len(comps)==2:
                data[solution]['noise'][selchunk] = hilc.constrained_noise(comps[0],comps[1])
                data[solution]['cnoise'][selchunk] = hilc.cross_noise(comps[0],comps[1])
                ret = hilc.constrained_map(kcoadds[...,selchunk],comps[0],comps[1],return_weight=do_weights)
                if do_weights:
                    data[solution]['kmap'][selchunk],weight = ret
                else:
                    data[solution]['kmap'][selchunk] = ret

            elif len(comps)>2:
                data[solution]['kmap'][selchunk] = np.nan_to_num(hilc.multi_constrained_map(kcoadds[...,selchunk],comps[0],*comps[1:]))

            if len(comps)<=2 and do_weights:
                for qind,qid in enumerate(arrays):
                    data[solution]['weight_%s' % qid][selchunk] = weight[qind]


    del ilcgen,cov

    # Reshape into maps
    name_map = {'CMB':'cmb','tSZ':'comptony','CIB':'cib'}
    beams = beams.split(',')
    for solution,beam in zip(solutions,beams):
        comps = "tilec_single_tile_"+region+"_"
        comps = comps + name_map[data[solution]['comps'][0]]+"_"
        if len(data[solution]['comps'])>1: comps = comps + "deprojects_"+ '_'.join([name_map[x] for x in data[solution]['comps'][1:]]) + "_"
        comps = comps + version

        if do_weights and len(data[solution]['comps'])<=2:
            for qind,qid in enumerate(arrays):
                enmap.write_map("%s/%s_%s_weight.fits" % (savedir,comps,qid), enmap.enmap(data[solution]['weight_%s' % qid].reshape((Ny,Nx)),wcs))
            


        try:
            noise = enmap.enmap(data[solution]['noise'].reshape((Ny,Nx)),wcs)
            enmap.write_map("%s/%s_noise.fits" % (savedir,comps),noise)
        except: pass
        try:
            cnoise = enmap.enmap(data[solution]['cnoise'].reshape((Ny,Nx)),wcs)
            enmap.write_map("%s/%s_cross_noise.fits" % (savedir,comps),cnoise)
        except: pass

        ells = np.arange(0,modlmap.max(),1)
        try:
            fbeam = float(beam)
            kbeam = maps.gauss_beam(modlmap,fbeam)
            lbeam = maps.gauss_beam(ells,fbeam)
        except:
            qid = beam
            bfunc = lambda x: tutils.get_kbeam(qid,x,version=beam_version,sanitize=not(unsanitized_beam),planck_pixwin=False)
            kbeam = bfunc(modlmap)
            lbeam = bfunc(ells)

        kmap = enmap.enmap(data[solution]['kmap'].reshape((Ny,Nx)),wcs)
        smap = enmap.ifft(kbeam*kmap,normalize='phys').real
        enmap.write_map("%s/%s.fits" % (savedir,comps),smap)
        io.save_cols("%s/%s_beam.txt" % (savedir,comps),(ells,lbeam),header="ell beam")


    enmap.write_map(savedir+"/tilec_mask.fits",mask)
Beispiel #9
0
cbfile2 = tutils.get_generic_fname(tdir,
                                   region2,
                                   solution,
                                   'cib',
                                   cversion,
                                   beam=True)

cmap1 = enmap.read_map(cfile1)
cmap2 = enmap.read_map(cfile2)
dmap1 = enmap.read_map(dfile1)
dmap2 = enmap.read_map(dfile2)
modlmap1 = cmap1.modlmap()
modlmap2 = cmap2.modlmap()

lsy1, by1 = np.loadtxt(ybfile1, unpack=True)
by12d = maps.interp(lsy1, by1)(modlmap1)
lsc1, bc1 = np.loadtxt(cbfile1, unpack=True)
bc12d = maps.interp(lsc1, bc1)(modlmap1)
beam_ratio1 = bc12d / by12d
beam_ratio1[~np.isfinite(beam_ratio1)] = 0

lsy2, by2 = np.loadtxt(ybfile2, unpack=True)
by22d = maps.interp(lsy2, by2)(modlmap2)
# ls,bc2 = np.loadtxt(cbfile2,unpack=True)
bc22d = maps.interp(lsc1, bc1)(modlmap2)
beam_ratio2 = bc22d / by22d
beam_ratio2[~np.isfinite(beam_ratio2)] = 0

ymap1 = maps.filter_map(enmap.read_map(yfile1), beam_ratio1)
ymap2 = maps.filter_map(enmap.read_map(yfile2), beam_ratio2)
Beispiel #10
0
def get_pol_noise(ells):
    cents, sn1d, cn1d, scn1d, enoise, bnoise = np.loadtxt("lnoises.txt",
                                                          unpack=True)
    return maps.interp(cents, enoise)(ells), maps.interp(cents, bnoise)(ells)
Beispiel #11
0
        flmax = 3000
    elif (qid1=='p03' or qid2=='p03') and (qid1 not in ['p01','p02']) and (qid2 not in ['p01','p02']):
        flmax = 1000
    else:
        flmax = 300
    
    if flmax<=flmin:  #!!!!!!
        io.save_cols("%sfgcov_%s_%s.txt" % (opath,qid1,qid2),(fells,fells*0))
    else:


        print("Rank %d doing task %d for array %s x %s with lmin %d and lmax %d ..." % (rank,task,qids[0],qids[1],flmin,flmax))

        # ERROR CALC
        c11 = stheory.get_theory_cls(f1,f1,a_cmb=1,a_gal=0.8) 
        n11 = maps.interp(ncents,n1d1)(ccents)/fbeam1(ccents)/fbeam1(ccents)
        c22 = stheory.get_theory_cls(f2,f2,a_cmb=1,a_gal=0.8) 
        n22 = maps.interp(ncents,n1d2)(ccents)/fbeam2(ccents)/fbeam2(ccents)
        c12 = stheory.get_theory_cls(f1,f2,a_cmb=1,a_gal=0.8) 
        n12 = maps.interp(ncents,n1d)(ccents)/fbeam1(ccents)/fbeam2(ccents)
        c11[~np.isfinite(c11)] = 0
        c12[~np.isfinite(c12)] = 0
        c22[~np.isfinite(c22)] = 0
        #cbin_edges = np.arange(20,8000,20)
        cbin_edges = np.arange(20,8000,40)
        LF = cosmology.LensForecast()
        LF.loadGenericCls("11",ccents,c11,ellsNls=ccents,Nls=n11)        
        LF.loadGenericCls("22",ccents,c22,ellsNls=ccents,Nls=n22)        
        LF.loadGenericCls("12",ccents,c12,ellsNls=ccents,Nls=n12)         
        if region=='deep56':
            fsky = 500./41252.
Beispiel #12
0
clkk = lc.getCl("cmb","cmb")
clkg = lc.getCl("cmb","g")
clgg = lc.getCl("g","g")

#theory = cosmology.default_theory()
#clkk = theory.gCl('kk',ells)

ledges = np.arange(80,1000,40)

for region in ['deep56','boss']:
    dmask = sints.get_act_mr3_crosslinked_mask(region=region)
    bmask = maps.binary_mask(dmask,0.95)
    fsky = dmask.area()/np.pi/4. * (bmask.sum()/bmask.size)

    pl = io.Plotter(xyscale='linlog',xlabel='l',ylabel='C')
    pl.add(ells,clkk)
    ls,nls = np.loadtxt("lensing_noise_act_planck_ilc_all_%s.txt" % region,unpack=True)
    pl.add(ls,nls)
    oells,onls = np.loadtxt("cl_K_ilc_noszK_ilc_noszs14&15_%s.txt" % region , unpack=True,usecols=[0,1])
    pl.add(oells,onls-maps.interp(ells,clkk)(oells),ls='--')
    pl.done("lensnoise_%s.png" % region)

    lf = cosmology.LensForecast()
    lf.loadKK(ells,clkk,ls,nls)
    lf.loadKG(ells,clkg)
    lf.loadGG(ells,clgg,ngal=0.026)
    #lf.loadGG(ells,clgg,ngal=0.1)
    snr = lf.sn(ledges,fsky,'kk')[0]
    snr_kg = lf.sn(ledges,fsky,'kg')[0]
    print(fsky*41252,snr,snr_kg)
Beispiel #13
0
    "WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb_map_v1.2.0_%s_beam.txt" % (
        cversion, region, region, cversion)
yfile2 = os.environ[
    "WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb_map_v1.2.0_%s.fits" % (
        cversion, region, region, cversion)
# CMB deproj-tSZ maps
bfile3 = os.environ[
    "WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_map_v1.2.0_%s_beam.txt" % (
        cversion, region, region, cversion)
yfile3 = os.environ[
    "WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb_deprojects_comptony_map_v1.2.0_%s.fits" % (
        cversion, region, region, cversion)

# Reconvolve to the same beam and filter out modes < ellmin
ls, obells = np.loadtxt(bfile, unpack=True)
obeam = maps.interp(ls, obells)(modlmap)
beam_ratio = kbeam / obeam
beam_ratio[~np.isfinite(beam_ratio)] = 0
kmask = maps.mask_kspace(mask.shape, mask.wcs, lmin=lmin, lmax=30000)
# Get coadd map of reference array
kmap = enmap.fft(
    dm.get_coadd("s15", region, array, srcfree=True, ncomp=1)[0] * mask)
# Get pixel window correction
pwin = tutils.get_pixwin(mask.shape[-2:])
bpfile = "data/" + dm.get_bandpass_file_name(array)
# Apply a filter that converts array map to Compton Y units (Eq 8)
f = tfg.get_mix_bandpassed([bpfile],
                           'tSZ',
                           ccor_cen_nus=[freqs[array]],
                           ccor_beams=[lbeam])[0]
f2d = maps.interp(ells, f)(modlmap)
        if redo:
            mask = sints.get_act_mr3_crosslinked_mask(region)
            shape,wcs = mask.shape,mask.wcs

            bfile = os.environ["WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb%s_map_v1.2.0_%s_beam.txt" % (cversion,region,region,method,cversion)
            yfile = os.environ["WORK"] + "/data/depot/tilec/v1.2.0_20200324/map_v1.2.0_%s_%s/tilec_single_tile_%s_cmb%s_map_v1.2.0_%s.fits" % (cversion,region,region,method,cversion)
            w2 = np.mean(mask**2.)

            als,bells = np.loadtxt(bfile,unpack=True)
            imap = enmap.read_map(yfile)
            modlmap = mask.modlmap()
            bin_edges = np.arange(20,6000,80)

            binner = stats.bin2D(modlmap,bin_edges)
            kmap = enmap.fft(imap,normalize='phys')/maps.interp(als,bells)(modlmap)*maps.gauss_beam(modlmap,dfwhm)
            kmap[~np.isfinite(kmap)] = 0
            p2d = (kmap*kmap.conj()).real / w2

            cents,p1d = binner.bin(p2d)
            io.save_cols(os.environ['WORK']+"/cpower_tilec_%s_%s.txt" % (region,method), (cents,p1d))
        else:
            cents,p1d = np.loadtxt("cpower_tilec_%s_%s.txt" % (region,method), unpack=True)

        pl.add(cents,p1d,color=col,label='This work (%s)' % region_map[region])

        if methodp=='sz':
            szlab = "SMICA (PR2)"
        elif methodp=='nosz':
            szlab = "SMICA-nosz (PR3)"
Beispiel #15
0
wfilt[ls>500] = 0
wfilt[~np.isfinite(wfilt)] = 0







# Filtered alms
seed = (0,0,0)
t_alm,e_alm,b_alm = solint.get_kmap(ch,seed,lmin,lmax,filtered=True)

# Reconstruction
with bench.show("recon"):
    rkalm = qe.filter_alms(solint.get_mv_kappa(polcomb,t_alm,e_alm,b_alm),maps.interp(ls,Als[polcomb]))
hp.write_map(config['data_path']+"mbs_sim_v0.1.0_mv_lensing_map.fits",hp.alm2map(rkalm,nside),overwrite=True)
hp.write_map(config['data_path']+"mbs_sim_v0.1.0_mv_lensing_mask.fits",mask,overwrite=True)

# Filtered reconstruction
fkalm = hp.almxfl(rkalm,wfilt)
frmap = hp.alm2map(fkalm,nside=256)
rmap=hp.alm2map(rkalm,nside=256)
# Input kappa
# TODO: Does this really need to be masked?
ikalm = maps.change_alm_lmax(hp.map2alm(hp.alm2map(get_kappa_alm(0).astype(np.complex128),nside=solint.nside)*solint.mask),2*solint.nside)


# Verify input x cross
w4=np.mean(solint.mask**4)
w3 = np.mean(solint.mask**3) # Mask factors
Beispiel #16
0
                                  data_comb=dcomb,
                                  version=None,
                                  sim_index=0)

dmap = enmap.read_map(dfile)
smap = enmap.read_map(sfile)
tmap = enmap.read_map(tfile)

cdmap = enmap.read_map(cdfile)
csmap = enmap.read_map(csfile)
ctmap = enmap.read_map(ctfile)

modlmap = dmap.modlmap()

ls, db = np.loadtxt(dbeam, unpack=True)
dbeam = maps.interp(ls, db)(modlmap)

ls, cdb = np.loadtxt(cdbeam, unpack=True)
cdbeam = maps.interp(ls, cdb)(modlmap)

ls, sb = np.loadtxt(sbeam, unpack=True)
sbeam = maps.interp(ls, sb)(modlmap)

ls, csb = np.loadtxt(csbeam, unpack=True)
csbeam = maps.interp(ls, csb)(modlmap)

#io.hplot(smap,"simmap")

bin_edges = np.arange(20, 6000, 20)
binner = stats.bin2D(modlmap, bin_edges)
p = lambda x: binner.bin((x * x.conj()).real)
Beispiel #17
0
def reconvolve(x, ybname, fwhm):
    if not (do_reconvolve): return x
    ls, bells = np.loadtxt(ybname, unpack=True)
    beam_rat = maps.gauss_beam(modlmap, fwhm) / maps.interp(ls, bells)(modlmap)
    beam_rat[~np.isfinite(beam_rat)] = 0
    return maps.filter_map(x, beam_rat)
conc = 3.2
cc = counts.ClusterCosmology(params,skipCls=True,skipPower=True,skip_growth=True)
z = 0.7
mass = 2e14

thetas = np.geomspace(0.1,10,1000)
kappa = lensing.nfw_kappa(mass,thetas*utils.arcmin,cc,zL=z,concentration=conc,overdensity=180,critical=False,atClusterZ=False)
hthetas,hkappa = np.loadtxt("data/hdv_unfiltered.csv",unpack=True,delimiter=',')

pl = io.Plotter(xyscale='loglog', xlabel='$\\theta$ [arcmin]', ylabel='$\\kappa$')
pl.add(thetas,kappa)
pl.add(hthetas,hkappa,ls='--')
pl.done('test_uhdv.png')

pl = io.Plotter(xyscale='linlin', xlabel='$\\theta$ [arcmin]', ylabel='$\\kappa$')
pl.add(hthetas,hkappa/maps.interp(thetas,kappa)(hthetas),ls='--')
pl.hline(y=1)
pl.done('test_uhdv_ratio.png')


shape,wcs = enmap.geometry(pos=(0,0),shape=(512,512),res=0.2 * utils.arcmin,proj='plain')
kmask = maps.mask_kspace(shape,wcs,lmax=8095)

bin_edges_arcmin= np.arange(0,15,0.4)
cents,k1d = lensing.binned_nfw(mass,z,conc,cc,shape,wcs,bin_edges_arcmin,overdensity=180.,critical=False,at_cluster_z=False,kmask=kmask)

hcents,hk1d = np.loadtxt("data/hdv_filtered_kappa.csv",unpack=True,delimiter=',')

pl = io.Plotter(xyscale='linlin', xlabel='$\\theta$ [arcmin]', ylabel='$\\kappa$')
pl.add(cents*180.*60/np.pi,k1d)
pl.add(hcents,hk1d,ls='--')
Beispiel #19
0
import numpy as np
import os, sys
import soapack.interfaces as sints
from enlib import bench
from actsims import noise

for season in ['s13', 's14', 's15']:
    for apatch in ['boss', 'deep1', 'deep5', 'deep6', 'deep8', 'deep56']:
        #for apatch in ['deep1','deep5','deep6','deep8','deep56','boss']:
        mask = sints.get_act_mr3_crosslinked_mask(apatch)
        ellcen = 5000
        ellsig = 1000
        modlmap = mask.modlmap()
        ells = np.arange(0, modlmap.max())
        mfilter = maps.interp(ells,
                              np.exp(-(ells - ellcen)**2. / 2. /
                                     ellsig**2.))(modlmap)
        for array in ['pa1_f150', 'pa2_f150', 'pa3_f150', 'pa3_f090']:
            dm = sints.ACTmr3(calibrated=False, region=mask)
            fname = "%s_%s_%s" % (season, apatch, array)
            try:
                splits = dm.get_splits(season=season,
                                       patch=apatch,
                                       arrays=[array],
                                       ncomp=3,
                                       srcfree=False)[0, :, :, ...]
                cmap = dm.get_coadd(season=season,
                                    patch=apatch,
                                    array=array,
                                    ncomp=3,
                                    srcfree=False)
Beispiel #20
0
for task in my_tasks:

    # Load sims
    sindex = str(task).zfill(5)
    alm = maps.change_alm_lmax(
        hp.read_alm(sim_location +
                    "fullskyLensedUnabberatedCMB_alm_set00_%s.fits" % sindex,
                    hdu=(1, 2, 3)), mlmax).astype(dtype)

    # Add beam deconvolved noise
    alm = alm + np.nan_to_num(
        qe.almxfl(
            cs.rand_alm_healpy(
                ps_noise, lmax=mlmax, seed=(100, 200, task), dtype=dtype),
            1. / lbeam))
    ntt = maps.interp(ells, np.nan_to_num(ps_noise[0, 0] / lbeam**2.))
    npp = maps.interp(ells, np.nan_to_num(ps_noise[1, 1] / lbeam**2.))

    # Filter
    talm_y = qe.filter_alms(alm[0], lambda x: 1. /
                            (theory.lCl('TT', x) + ntt(x)), args.lmint,
                            args.lmaxt)
    ealm_y = qe.filter_alms(alm[1], lambda x: 1. /
                            (theory.lCl('EE', x) + npp(x)), args.lminp,
                            args.lmaxp)
    balm_y = qe.filter_alms(alm[2], lambda x: 1. /
                            (theory.lCl('BB', x) + npp(x)), args.lminp,
                            args.lmaxp)

    # Inputs
    ikalm = lensing.phi_to_kappa(
Beispiel #21
0
with bench.show("flat sky AL"):
    ls,nlkks,theory,qest = lensing.lensing_noise(ells,ntt,nee,nbb, \
                                                 ellmin_t,ellmin_t,ellmin_t, \
                                                 ellmax_t,ellmax_t,ellmax_t, \
                                                 bin_edges, \
                                                 camb_theory_file_root=None, \
                                                 estimators = ['TT'], \
                                                 delens = False, \
                                                 theory=theory, \
                                                 dimensionless=False, \
                                                 unlensed_equals_lensed=True, \
                                                 grad_cut=None,width_deg=25.,px_res_arcmin=res)

binner = stats.bin2D(qest.N.modLMap, bin_edges)
cents, albinned = binner.bin(qest.AL['TT'])
Al = maps.interp(cents, albinned)(ells)
Nl = maps.interp(ls, nlkks['TT'])(ells)
lpls, lpal = np.loadtxt("data/nls_2000.txt", unpack=True)
pl = io.Plotter(yscale='log', xscale='log')
pl.add(ells, theory.gCl('kk', ells), lw=3, color='k')
pl.add(ells, Nl, ls="--")
pl.add(lpls, lpal * (lpls * (lpls + 1.))**2. / 4., ls="-.")
#pl._ax.set_ylim(1e-10,1e-6)
pl.done(io.dout_dir + "fullsky_qe_result_al.png")

dh_nls = np.nan_to_num(lpal * (lpls * (lpls + 1.))**2. / 4.)
dh_als = np.nan_to_num(dh_nls * 2. / lpls / (lpls + 1))
Al = dh_als

#### MAKE FULL-SKY LENSED SIMS
Beispiel #22
0
def norm_der_cltt(solint, ch, lmin, lmax, uctt_, ucee_, ucte_, ucbb_):
    #these are the unperturbed ones
    uctt = uctt_
    ucee = ucee_
    ucte = ucte_
    ucbb = ucbb_
    #the total goes with the Fs hence are unperturbed
    tctt = uctt + maps.interp(ls, nells)(ells)
    tcee = ucee + maps.interp(ls, nells_P)(ells)
    tcte = ucte
    tcbb = ucbb + maps.interp(ls, nells_P)(ells)
    #onormfname = opath+"norm_lmin_%d_lmax_%d.txt" % (lmin,lmax)

    array1001 = perturbe_clist(uctt, lprime, 1.01)
    array999 = perturbe_clist(uctt, lprime, 0.99)
    N1001 = [[], [], [], [], []]  #list of lists containing tt,ee,eb,te,tb
    N0999 = [[], [], [], [], []]

    for i in range(len(array1001)):

        print(i)
        als, aAls, aal_mv_pol, aal_mv, aAl_te_hdv = qe.symlens_norm(
            array1001[i],
            tctt,
            ucee,
            tcee,
            ucte,
            tcte,
            ucbb,
            tcbb,
            lmin=lmin,
            lmax=lmax,
            plot=False)
        bls, bAls, bal_mv_pol, bal_mv, bAl_te_hdv = qe.symlens_norm(
            array999[i],
            tctt,
            ucee,
            tcee,
            ucte,
            tcte,
            ucbb,
            tcbb,
            lmin=lmin,
            lmax=lmax,
            plot=False)

        N1001[0].append(aAls['TT'])
        N0999[0].append(bAls['TT'])
        N1001[1].append(aAls['EE'])
        N0999[1].append(bAls['EE'])
        N1001[2].append(aAls['EB'])
        N0999[2].append(bAls['EB'])
        N1001[3].append(aAls['TE'])
        N0999[3].append(bAls['TE'])
        N1001[4].append(aAls['TB'])
        N0999[4].append(bAls['TB'])

    delta = diff_cl(uctt, lprime)
    keys = ['TT', 'EE', 'EB', 'TE', 'TB']

    for k in range(len(keys)):
        diff = [als]
        for i in range(len(N1001[1])):

            der = ((N1001[k][i] - N0999[k][i])) / delta[i]
            diff.append(der)
        np.savetxt(
            '/global/homes/j/jia_qu/so-lenspipe/data/n0{}_cltt.txt'.format(
                keys[k]), der)
        np.savetxt('../data/n0{}_cltt.txt'.format(keys[k]), diff)
    cmb_dir = p['data_dir']

    fells,ftt = np.loadtxt('../data/fg.dat',usecols=[0,1],unpack=True)
    ftt = ftt/fells/(fells+1.)*2.*np.pi


    fnames = [f'{cmb_dir}/fullskyLensedUnabberatedCMB_alm_cmbset00_phiset00_00000.fits',f'{cmb_dir}/../../alex/v0.4/fullskyLensedUnabberatedCMB_alm_set00_00000.fits']
    versions = ['v0.5','v0.4']

    pl = io.Plotter('rCell',ylabel='$(C_L-C^{\\rm theory}_L) / (C^{\\rm theory}_L + C^{\\rm fg}_L)$')
    pl2 = io.Plotter('Dell')

    theory = cosmology.default_theory()
    ls = np.arange(10000)
    ftt = maps.interp(fells,ftt)(ls)
    cltt = theory.lCl('TT',ls)

    for i,fname in enumerate(fnames):
        alm = hp.read_alm(fname).astype(np.complex128)
        cls = hp.alm2cl(alm)
        cls = maps.interp(np.arange(cls.size),cls)(ls)

        pl._ax.scatter(ls,(cls-cltt)/(cltt+ftt),s=1)
        pl.add([],[],label=versions[i])
        pl2.add(ls,cls,label=versions[i])

    pl._ax.set_ylim(-0.02,0.02)
    pl.hline(y=0)
    pl._ax.set_xlim(0,10000)
    pl.legend(loc='upper right')
cltt = theory.lCl('TT', ells)

mgen = maps.MapGen(shape, wcs, cltt[None, None])
noise = [10, 20]
ngen1 = maps.MapGen(shape, wcs,
                    (ells * 0 + (noise[0] * np.pi / 180. / 60.)**2.)[None,
                                                                     None])
ngen2 = maps.MapGen(shape, wcs,
                    (ells * 0 + (noise[1] * np.pi / 180. / 60.)**2.)[None,
                                                                     None])

cov = enmap.enmap(np.zeros((shape[0], shape[1], 2, 2)), wcs)
for i in range(2):
    for j in range(2):
        cov[..., i, j] = maps.interp(
            ells,
            cltt)(modlmap) + int(i == j) * (noise[i] * np.pi / 180. / 60.)**2.

cinv = np.linalg.inv(cov)
nsims = 30

np.random.seed(1)

bin_edges = np.arange(80, 3000, 40)
binner = stats.bin2D(modlmap, bin_edges)

s = stats.Stats()

gellmax = modlmap.max()
ells = np.arange(0, gellmax, 1)
ctheory = ilc.CTheory(ells)
Beispiel #25
0
# load theory
camb_root = "data/cosmo2017_10K_acc3"
theory = cosmology.loadTheorySpectraFromCAMB(camb_root,get_dimensionless=False)

# ells corresponding to modes in the alms
nside = 2048
ells = np.arange(0,3*nside+1,1)
    
#lpls,lpal = np.loadtxt("data/nls_%d.txt" % lmax,unpack=True)
lpfile = "/gpfs01/astro/workarea/msyriac/data/sims/msyriac/lenspix/cosmo2017_lmax_fix_lens_lmax_%d_qest_lmax_%d_AL.txt" % (lmax+2000,lmax)
lpls,lpal = np.loadtxt(lpfile,unpack=True,usecols=[0,1])
lpal = lpal / (lpls) / (lpls+1.)

dh_nls = np.nan_to_num(lpal*(lpls*(lpls+1.))**2./4.)
dh_als = np.nan_to_num(dh_nls * 2. / lpls /(lpls+1))
Al = maps.interp(lpls,dh_als)(ells)

sim_location = "/gpfs01/astro/workarea/msyriac/data/sims/msyriac/lenspix/"
ksim_location = "/gpfs01/astro/workarea/msyriac/data/sims/msyriac/lenspix/"


#bin_edges = np.linspace(2,lmax,300)
bin_edges = np.logspace(np.log10(2),np.log10(lmax),100)
binner = stats.bin1D(bin_edges)

mstats = stats.Stats(comm)

lstr = ""
if lmax==2000: lstr = "_lmax2250"

Beispiel #26
0
from pixell import enmap
import numpy as np
import os, sys

mask = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_mask.fits"
)
modlmap = mask.modlmap()

smap = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_map_v1.0.0_rc_joint.fits"
)
l, b = np.loadtxt(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_map_v1.0.0_rc_joint_beam.txt",
    unpack=True)
sbeam = maps.interp(l, b)(modlmap)
cmap = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_deprojects_comptony_map_v1.0.0_rc_joint.fits"
)
l, b = np.loadtxt(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_deprojects_comptony_map_v1.0.0_rc_joint_beam.txt",
    unpack=True)
cbeam = maps.interp(l, b)(modlmap)
nsmap = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_map_v1.0.0_rc_joint_noise.fits"
)
ncmap = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_deprojects_comptony_map_v1.0.0_rc_joint_noise.fits"
)
crossnoise = enmap.read_map(
    "/scratch/r/rbond/msyriac/data/depot/tilec/map_v1.0.0_rc_joint_deep56/tilec_single_tile_deep56_cmb_deprojects_comptony_map_v1.0.0_rc_joint_cross_noise.fits"
Beispiel #27
0
        except:
            totinputs[input_name] = res.copy()
        iis[input_name] = res

    for solution in args.solutions.split(','):
        comps = "tilec_single_tile_" + region + "_"
        comps = comps + name_map[components[solution][0]] + "_"
        if len(components[solution]) > 1:
            comps = comps + "deprojects_" + '_'.join(
                [name_map[x] for x in components[solution][1:]]) + "_"
        comps = comps + version
        fname = "%s/%s.fits" % (savedir, comps)
        imap = enmap.read_map(fname)
        ls, bells = np.loadtxt("%s/%s_beam.txt" % (savedir, comps),
                               unpack=True)
        kbeam = maps.interp(ls, bells)(modlmap)

        # rbeam = maps.gauss_beam(imap.modlmap(),10.)/kbeam
        # rbeam[~np.isfinite(rbeam)]=0
        # if task==0: io.hplot(maps.filter_map(imap,rbeam),"/scratch/r/rbond/msyriac/cmap")# !!!
        # sys.exit()

        with np.errstate(divide='ignore', invalid='ignore'):
            kmap = enmap.fft(imap, normalize='phys') / kbeam
        kmap[~np.isfinite(kmap)] = 0
        res = binner.bin(np.real(kmap * kmap.conj()))[1] / w2
        try:
            totautos[solution] = totautos[solution] + res
        except:
            totautos[solution] = res.copy()
        input_name = components[solution][0]
Beispiel #28
0
def get_ilc_noise(ells):
    cents, sn1d, cn1d, scn1d, _, _ = np.loadtxt("lnoises.txt", unpack=True)
    return maps.interp(cents, sn1d)(ells), maps.interp(
        cents, cn1d)(ells), maps.interp(cents, scn1d)(ells)
Beispiel #29
0
for task in my_tasks:

    # Choose a seed. This has to be varied when simulating.
    seed = (0,0,task+sindex)

    # If debugging, get unfiltered maps and plot Cls
    if task==0 and debug_cmb:
        t_alm,e_alm,b_alm = solint.get_kmap(channel,seed,lmin,lmax,filtered=False)
        tcl = hp.alm2cl(t_alm)
        ls = np.arange(tcl.size)
        pl = io.Plotter('Cell')
        pl.add(ls,tcl/w2)
        ls2,nells,nells_P = solint.get_noise_power(channel,beam_deconv=True)
        theory = cosmology.default_theory()
        pl.add(ls,theory.lCl('TT',ls) + maps.interp(ls2,nells)(ls),color='k')
        pl._ax.set_xlim(1,6000)
        pl._ax.set_ylim(1e-6,1e3)
        pl.done(f'{solenspipe.opath}/tcl.png')
        imap = enmap.downgrade(solint.alm2map(np.asarray([t_alm,e_alm,b_alm]),ncomp=3) * maps.binary_mask(mask),2)
        for i in range(3): io.hplot(imap[i],f'{solenspipe.opath}/imap_{i}',mask=0)


    with bench.show("sim"):
        # Get simulated, prepared filtered T, E, B maps, i.e. (1/(C+N) * teb_alm)
        t_alm,e_alm,b_alm = solint.get_kmap(channel,seed,lmin,lmax,filtered=True)
        # Get the reconstructed kappa map alms and filter it with the normalization
        recon_alms = qe.filter_alms(solint.get_mv_kappa(polcomb,t_alm,e_alm,b_alm),maps.interp(Als['L'],Als[polcomb]))
    
    # Subtract a meanfield if necessary
    recon_alms = recon_alms - mf_alm
Beispiel #30
0
        pl.done("band_%s.png" % qid)
    

    

    if gauss_beam:
        if 'p' in qid:
            fwhm = dm.fwhms[array]
        else:
            fwhm = 1.4 * (150./cfreq)
        lbeam = maps.gauss_beam(ells,fwhm)
    else:
        lbeam = tutils.get_kbeam(qid,ells,sanitize=False,planck_pixwin=False) 

    with bench.show("beamint"):
        fbnus = maps.interp(ells,lbeam[None,:],fill_value=(lbeam[0],lbeam[-1]))
        bnus = fbnus((cfreq/nu_ghz)*ells[:,None])[0].swapaxes(0,1)
        bnus = bnus / bnus[:,:1]

        pl = io.Plotter(xlabel='l',ylabel='b')
        for i in range(bnus.shape[0]):

            if trans[i]>1e-1: pl.add(ells,bnus[i]/lbeam)
        pl.hline(y=1)
        pl._ax.set_ylim(0.8,1.2)
        pl.done("abeams_%s.png" % qid)