Beispiel #1
0
    inputKappaStack = 0.
    szStack = 0.

    N = numClusters

    for i in range(N):
        print(i)

        kappa = enmap.read_map(saveName + "_kappa_" + str(i) + "_" +
                               str(snap) + ".hdf")
        inputKappaMap = enmap.read_map(saveName + "_inpkappa_" + str(i) + "_" +
                                       str(snap) + ".hdf")
        szMap = enmap.read_map(saveName + "_sz_" + str(i) + "_" + str(snap) +
                               ".hdf")

        kappaStack += kappa
        inputKappaStack += inputKappaMap
        szStack += szMap

    pl = Plotter()
    pl.plot2d(kappaStack / N)
    pl.done(outDir + "recon_" + str(snap) + ".png")

    pl = Plotter()
    pl.plot2d(inputKappaStack / N)
    pl.done(outDir + "truestack_" + str(snap) + ".png")

    pl = Plotter()
    pl.plot2d(szStack / N)
    pl.done(outDir + "szstack_" + str(snap) + ".png")
Beispiel #2
0
                  noiseTX,
                  noisePX,
                  tellmin,
                  tellmax,
                  pellmin,
                  pellmax,
                  beamY=beamY,
                  noiseTY=noiseTY,
                  noisePY=noisePY)
ls, Nls = myNls.getNl(polComb=polComb, halo=halo)

# ls,Nls = np.loadtxt("../SZ_filter/data/LA_pol_Nl.txt",unpack=True,delimiter=',')

ellkk = np.arange(2, 9000, 1)
Clkk = theory.gCl("kk", ellkk)
pl = Plotter(scaleY='log', scaleX='log')
pl.add(ellkk, 4. * Clkk / 2. / np.pi)
pl.add(ls, 4. * Nls / 2. / np.pi)
pl.legendOn(loc='lower left', labsize=10)
pl.done("output/nl.png")

overdensity = 200.
critical = False
atClusterZ = False

# overdensity=180.
# critical=False
# atClusterZ=False
kellmax = 8000

sn, k, std = NFWMatchedFilterSN(cc,
Beispiel #3
0
#beamRange = np.arange(1.5,9.5,0.5)
#beamRange = np.arange(9.5,30.5,2.5)
#beamRange = np.arange(1.5,5.0,0.5)

beamX = 10.0
noiseRange = np.arange(3.0, 30.0, 4.0)

swap = False

#for polComb in ['TT','EB']:
for polComb in ['EB']:

    for delens in [False, True]:
        if polComb == 'TT' and delens: continue
        pl = Plotter(scaleY='log', labelX="$L$", labelY="$C_L$")
        sns = []
        #for beamX in beamRange:
        for noiseTX in noiseRange:
            noisePX = np.sqrt(2.) * noiseTX

            myNls = NlGenerator(lmap, theory, gradCut=gradCut)

            kmax = getMax(polComb, tellmaxY, pellmaxY)

            bin_edges = np.arange(kmin, kmax, dell) + dell
            myNls.updateBins(bin_edges)

            if swap:
                tempB = beamY
                beamY = beamX
Beispiel #4
0
# Read config
iniFile = "input/params.ini"
Config = SafeConfigParser()
Config.optionxform = str
Config.read(iniFile)

px = 180 * 60 / 60000.
gradCut = 60000.

expName = "DM-18arcsec"
lensName = "lensing"
ls, Nls, ellbb, dlbb, efficiency = lensNoise(Config,
                                             expName,
                                             lensName,
                                             px=px,
                                             gradCut=gradCut,
                                             bigell=gradCut,
                                             plot=True)

print efficiency
print ls
print Nls

from orphics.tools.io import Plotter

pl = Plotter(scaleY='log')
pl.add(ls, Nls)
pl._ax.set_ylim(1e-12, 1e-6)
#pl._ax.set_xlim(2,4000)
pl.done("nls.png")
Beispiel #5
0
oml = 1. - cc.om - cc.ob - cc.omnuh2 / cc.h / cc.h
print(oml)
# cbias = np.zeros((mrange.size,zcents.size))

# for i,z in enumerate(zcents):
#     cbias[:,i] = hbias(10**mrange,zcents,cc.h,cc.om,oml)

Mg, zg = np.meshgrid(10**mrange, zcents)
cbias = hbias(Mg, zg, cc.h, cc.om, oml).T

print(cbias)
print((hb.shape))
print((cbias.shape))
#sys.exit()

pl = Plotter()
pl.plot2d(hb)
pl.done(outDir + "hb.png")

pl = Plotter()
pl.plot2d(cbias)
pl.done(outDir + "cbias.png")

ls = "-"
lab = ""
pl = Plotter(labelX="$z$", labelY="b", ftsize=14)
pl.add(zcents,
       hb[np.where(np.isclose(mrange, 14.0)), :].ravel(),
       ls=ls,
       label=lab + " 10^14 Msol/h")
pl.add(zcents,
Beispiel #6
0
        SZProf = SZ_Cluster_Model(cc,clusterDict,rms_noises = noise,fwhms=beam,freqs=freq,lknee=lknee,alpha=alpha)
        Ndn = HMF.N_of_mqz_SZ(lndM*massMultiplier,qbin_edges,SZProf)
        
    
        dNdp = old_div((getNmzq(Nup,mexp_edges,z_edges,qbin_edges)-getNmzq(Ndn,mexp_edges,z_edges,qbin_edges)),val)


        Nz = dNdp.copy().sum(axis=-1).sum(axis=0)
        Nm = dNdp.copy().sum(axis=-1).sum(axis=-1)
        Nq = dNdp.copy().sum(axis=0).sum(axis=0)
        
        yNzs[key].append((val,Nz,Nm,Nq))
        

        
    pl = Plotter(labelX="$z$",labelY="$dN$")
    xstep = 0.01
    for i,val in enumerate(vals):
        assert yNzs[key][i][0]==val
        pl.add(getCents(z_edges)+((i-old_div(len(vals),2))*xstep),yNzs[key][i][1],label=key+" "+str(val))
    pl.legendOn(labsize=10,loc='upper right')
    pl.done(outDir+key+"_Nz_step.png")
    pl = Plotter(labelX="$M$",labelY="$dN$")
    xstep = 0.01
    for i,val in enumerate(vals):
        assert yNzs[key][i][0]==val
        pl.add(getCents(mexp_edges)+((i-old_div(len(vals),2))*xstep),yNzs[key][i][2],label=key+" "+str(val))
    pl.legendOn(labsize=10,loc='upper right')
    pl.done(outDir+key+"_Nm_step.png")
    pl = Plotter(labelX="$q$",labelY="$dN$",scaleX='log')
    xstep = 0.1
Beispiel #7
0

kmin = 40
deg = 10.
px = 0.5
dell = 10
cambRoot = "data/ell28k_highacc"
theory = loadTheorySpectraFromCAMB(cambRoot,unlensedEqualsLensed=False,useTotal=False,lpad=9000)
lmap = lm.makeEmptyCEATemplate(raSizeDeg=deg, decSizeDeg=deg,pixScaleXarcmin=px,pixScaleYarcmin=px)


frange = np.arange(2.,3100.,1.)
frangeC = np.arange(100.,5000.,20.)
Clkk = theory.gCl("kk",frange)

pl = Plotter(scaleY='log',labelX="$L$",labelY="$C_L$")

for polComb in ['EB']:

    #for delens,lines,alpha in zip([False,True],['-','--'],[1.0,0.5]):
    for delens,lines,alpha in zip([False],['-'],[1.0]):
        sns = []
        for noiseFile in ['fiducial']+glob.glob("data/colin*"):
        #for noiseFile in ['fiducial']+glob.glob("data/colin???.txt"):

            myNls = NlGenerator(lmap,theory,gradCut=gradCut)

            kmax = getMax(polComb,tellmaxY,pellmaxY)


            bin_edges = np.arange(kmin,kmax,dell)+dell
Beispiel #8
0
kmin = 40

deg = 10.
px = 0.5
dell = 10

kellrange = np.arange(80., 2100., 20.)
kfrange = np.arange(80., 2100., 1.)

Clkk = theory.gCl("kk", kfrange)

cmb_bin_edges = np.arange(10, 9000, 10)

Nlmvinv = 0.
pl = Plotter(scaleY='log')
for polComb in ['TT', 'TE', 'EE', 'EB', 'TB']:
    kmax = getMax(polComb, tellmaxY, pellmaxY)
    bin_edges = np.arange(kmin, kmax, dell) + dell
    lmap = lm.makeEmptyCEATemplate(raSizeDeg=deg,
                                   decSizeDeg=deg,
                                   pixScaleXarcmin=px,
                                   pixScaleYarcmin=px)
    myNls = NlGenerator(lmap, theory, bin_edges, gradCut=gradCut)
    myNls.updateBins(bin_edges)

    nTX,nPX,nTY,nPY = myNls.updateNoise(beamX,noiseTX,noisePX,tellminX,tellmaxX, \
                      pellminX,pellmaxX,beamY=beamY,noiseTY=noiseTY, \
                      noisePY=noisePY,tellminY=tellminY,tellmaxY=tellmaxY, \
                      pellminY=pellminY,pellmaxY=pellmaxY,lkneesX=(lkneeTX,lkneePX),alphasX=(alphaTX,alphaPX), \
                                        lkneesY=(lkneeTY,lkneePY),alphasY=(alphaTY,alphaPY),lxcutTX=lxcutTX, \
Beispiel #9
0
    fclbbTot = lambda x: 0.  #fclbb(x)*(1.+fgPer/100.)
    r0 = rSigma(fsky, ellBBRange, fnBBSmall, fdCls, fclbbTot, fflbb)
    printC("sigma(r) without delensing: " + str(r0), color="green", bold=True)
    rs.append(r0)
    fdlbb = cmb.noise_pad_infinity(
        interp1d(ellbb, dlbb * TCMB**2., fill_value=np.inf,
                 bounds_error=False), spellmin, spellmax)

    fclbbTot = lambda x: 0.  #fdlbb(x)+fclbb(x)*fgPer/100.

    r = rSigma(fsky, ellBBRange, fnBBSmall, fdCls, fclbbTot, fflbb)
    printC("sigma(r) with delensing: " + str(r), color="green", bold=True)
    rdelens.append(r)

outDir = os.environ['WWW'] + "plots/fixtime/" + saveName + "_"

pl = Plotter(labelX="$f_{\\mathrm{sky}}$", labelY="delensing %")
pl.add(fskyList, efficiencies)
pl.done(outDir + "efficiencies.png")

pl = Plotter(labelX="$f_{\\mathrm{sky}}$", labelY="Clkk S/N")
pl.add(fskyList, sns)
pl.done(outDir + "sns.png")

pl = Plotter(labelX="$f_{\\mathrm{sky}}$", labelY="sig(r)")
pl.add(fskyList, rs, ls="--", label="no delensing")
pl.add(fskyList, rdelens, label="with delensing")
pl.legendOn(loc='upper right', labsize=12)
pl.done(outDir + "rs.png")
Beispiel #10
0
cc = ClusterCosmology(fparams, constDict, clTTFixFile=clttfile)

from matplotlib.patches import Rectangle

expList = [
    'S4-1.0-paper', 'S4-1.5-paper', 'S4-2.0-paper', 'S4-2.5-paper',
    'S4-3.0-paper'
]
labList = ['S4 1.0\'', 'S4 1.5\'', 'S4 2.0\'', 'S4 2.5\'', 'S4 3.0\'']
pad = 0.05

pl = Plotter(labelX="$z$",
             labelY="$N(z)$",
             ftsize=20,
             scaleY='log',
             figsize=(6, 4),
             thk=2,
             labsize=16)
#pl = Plotter(labelX="$z$",labelY="$N(z)$",ftsize=12)

colList = ['C0', 'C1', 'C2', 'C3', 'C4']
Ndict = {}
for expName, col, labres in zip(expList, colList, labList):

    mgrid, zgrid, siggrid = pickle.load(
        open(
            bigDataDir + "szgrid_" + expName + "_" + gridName + "_v" +
            version + ".pkl", 'rb'))

    z_edges = zgrid
Beispiel #11
0
    def fitAuto(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),constRange=np.arange(0.1,2.0,0.01),debug=False,store=False):
        # evaluate likelihood on a 2d grid and fit to a gaussian
        # store fit as new theory curve

        width = amplitudeRange[1]-amplitudeRange[0]
        height = constRange[1]-constRange[0]
        Likelihood = lambda x,y: np.exp(-0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y))
        #Likelihood = lambda x,y: -0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y)

        Likes = np.array([[Likelihood(x,y) for x in amplitudeRange] for y in constRange])

        ampLike = np.sum(Likes,axis=0)    
        constLike = np.sum(Likes,axis=1)

        ampLike = ampLike / (ampLike.sum()*width) #normalize
        constLike = constLike / (constLike.sum()*height) #normalize
                

        ampBest,ampErr = cfit(norm.pdf,amplitudeRange,ampLike,p0=[amplitudeRange.mean(),0.1*amplitudeRange.mean()])[0]
        constBest,constErr = cfit(norm.pdf,constRange,constLike,p0=[constRange.mean(),0.1*constRange.mean()])[0]


        if debug:
            pl = Plotter()
            pl.plot2d(Likes)
            pl.done("output/like2d.png")
                        
            pl = Plotter()
            fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange])
            pl.add(amplitudeRange,ampLike,label="amplikes")
            pl.add(amplitudeRange,fitVals,label="fit")
            pl.legendOn()
            pl.done("output/amplike1d.png")

            pl = Plotter()
            fitVals = np.array([norm.pdf(x,constBest,constErr) for x in constRange])
            pl.add(constRange,constLike,label="constlikes")
            pl.add(constRange,fitVals,label="fit")
            pl.legendOn()
            pl.done("output/constlike1d.png")

            #sys.exit()
            
        if not(store):
            return constBest,constErr
        else:
            
            self.datas[keyData]['binned'] -= constBest
            self.datas[keyData]['unbinned'] -= constBest
            
            fitKey = keyData+"_fitTo_"+keyTheory
            self.datas[fitKey] = {}
            self.datas[fitKey]['covmat'] = None
            self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest
            self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest
            self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr)
            self.datas[fitKey]['amp']=(ampBest,ampErr)
            self.datas[fitKey]['const']=(constBest,constErr)
            self.datas[fitKey]['isFit'] = True
    
            return fitKey
Beispiel #12
0
def stack_on_map(lite_map,
                 width_stamp_arcminute,
                 pix_scale,
                 ra_range,
                 dec_range,
                 catalog=None,
                 n_random_points=None):
    from skimage.transform import resize
    import orphics.tools.stats as stats

    width_stamp_degrees = width_stamp_arcminute / 60.
    Np = np.int(width_stamp_arcminute / pix_scale + 0.5)
    pad = np.int(Np / 2 + 0.5)
    print("Expected width in pixels = ", Np)

    lmap = lite_map
    stack = 0
    N = 0

    if catalog is not None:
        looprange = goodobjects
        print(looprange)
        assert n_random_points is None
        random = False
    else:
        assert n_random_points is not None
        assert len(ra_range) == 2
        assert len(dec_range) == 2
        looprange = range(0, n_random_points)
        random = True

    for i in looprange:
        if random:
            ra = np.random.uniform(*ra_range)
            dec = np.random.uniform(*dec_range)
        if random == False:
            ra = catalog[i][1]  #1 for ACT catalog 2 for SDSS and redmapper
            dec = catalog[i][2]  #2 for ACT catalog 3 for SDSS and redmapper
        ix, iy = lmap.skyToPix(ra, dec)
        if ix >= pad and ix < lmap.Nx - pad and iy >= pad and iy < lmap.Ny - pad:
            print(i)
            smap = lmap.selectSubMap(ra - width_stamp_degrees / 2.,
                                     ra + width_stamp_degrees / 2.,
                                     dec - width_stamp_degrees / 2.,
                                     dec + width_stamp_degrees / 2.)
            #cutout = zoom(smap.data.copy(),zoom=(float(Np)/smap.data.shape[0],float(Np)/smap.data.shape[1]))
            cutout = resize(smap.data.copy(), output_shape=(Np, Np))
            cutouts.append(cutout - randomstack)
            stack = stack + cutout
            xMap, yMap, modRMap, xx, yy = fmaps.getRealAttributes(smap)
            N = N + 1.
            ixs.append(ix)
            iys.append(iy)
            modRMaps.append(modRMap)
        else:
            print("skip")
    stack = stack / N - randomstack
    print(N)
    if catalog is not None:
        io.quickPlot2d(stack, out_dir + "stack.png")
    else:
        np.save('randomstamp', stack)

    dt = pix_scale
    arcmax = 20.
    thetaRange = np.arange(0., arcmax, dt)
    breal = stats.bin2D(modRMap * 180. * 60. / np.pi, thetaRange)
    cents, recons = breal.bin(stack)
    pl = Plotter(labelX='Distance from Center (arcminutes)',
                 labelY='Temperature Fluctuation ($\mu K$)',
                 ftsize=10)
    pl.add(cents, recons)
    pl._ax.axhline(y=0., ls="--", alpha=0.5)
    pl.done(out_dir + "profiles.png")
    return stack, cents, recons
Beispiel #13
0
noiseList = 5.2 * np.sqrt(fskyList / 0.4)

if TorP == "T":

    outDir = saveName + "_T_"
    lknee_list = [0, 1000, 2000, 3000, 4000, 5000]

elif TorP == "P":

    outDir = saveName + "_P_"
    lknee_list = [0, 200, 400, 600, 800, 1000]

else:
    raise ValueError

pl = Plotter(labelX="$f_{\\mathrm{sky}}$", labelY="Clkk S/N")

for lknee in lknee_list:
    efficiencies = []
    mnus = []
    sns = []
    rs = []
    rdelens = []

    for noiseNow, fskyNow in zip(noiseList, fskyList):
        # Get lensing noise curve. If you want to override something from the Config file in order to make plots varying it,
        # change from None to the value you want.

        if TorP == "T":

            ls, Nls, ellbb, dlbb, efficiency, cc = lensNoise(
Beispiel #14
0
mmaxes = []
zmins = []
zmaxes = []
dms = []
dzs = []

# CB_color_cycle = ['#1C110A','#E4D6A7','#E9B44C','#9B2915','#50A2A7']
# import matplotlib as mpl
# mpl.rcParams['axes.color_cycle'] = CB_color_cycle

collist = ["C0", "C1", "C3", "C4"]

mindicesList = [60, 80, 120, 160]

from orphics.tools.io import Plotter
pl = Plotter(labelX="$z$", labelY="S/N per cluster", ftsize=16, figsize=(6, 5))

for gridFile, ls, lab, outPlot in zip(gridList, ['-', '--'],
                                      ['CMB lensing', 'optical lensing'],
                                      ['cmb', 'owl']):

    filen = gridFile
    medges, zedges, errgrid = pickle.load(open(filen, 'rb'))
    M_edges = 10**medges
    M = old_div((M_edges[1:] + M_edges[:-1]), 2.)
    mexpgrid = np.log10(M)
    zgrid = old_div((zedges[1:] + zedges[:-1]), 2.)

    mmin = mexpgrid[0]
    mmax = mexpgrid[-1]
    zmin = zgrid[0]
Beispiel #15
0
# Fiducial number counts
new_z_edges, N_fid = sfisher.rebinN(
    np.load(sfisher.fid_file(bigDataDir, saveId)), pzcutoff, z_edges)
N_fid = N_fid * fsky
print "Effective number of clusters: ", N_fid.sum()

from collections import OrderedDict
priorList = OrderedDict()
priorList['tau'] = 0.01
priorList['H0'] = 10.0
priorList['tau2'] = 0.0001

import os
if 'mnu' in fishName:
    pl = Plotter(labelY="$\sigma(" + paramLatexList[paramList.index("mnu")] +
                 ")$ (meV)",
                 labelX="Iteration",
                 ftsize=20)
elif 'w0' in fishName:
    pl = Plotter(labelY="$\\frac{\sigma(" +
                 paramLatexList[paramList.index("w0")] + ")}{" +
                 paramLatexList[paramList.index("w0")] + "}\%$",
                 labelX="Iteration",
                 ftsize=20)

#for doBAO in [False,True]:
for fishSection in ["fisher-" + fishName, "fisher-" + fishName + "-DESI"]:

    priorNameList = []
    priorValueList = []
    iterations = 0