Example #1
0
    def plotCls(self,saveFile,keys=None,xlimits=None,ylimits=None,transform=True,showBinnedTheory=False,scaleX='linear',scaleY='linear'):

        nsigma = 2.
        
        binCenters = self.binner.getBinCenters()

        if transform:
            ylab = "$\ell C_{\ell}$"
            mult = binCenters
            multTh = 1.#binCenters*0.+1.
        else:
            ylab = "$C_{\ell}$"
            mult = binCenters*0.+1.
            multTh = 0.#binCenters*0.
            
        pl = Plotter(labelX="$\ell$",labelY=ylab,scaleX=scaleX,scaleY=scaleY)


        
        if keys is None: keys = list(self.datas.keys())
        for key in keys:

            dat = self.datas[key]

            if dat['covmat'] is None:
                #This is a theory curve
                ells = np.array(list(range(len(dat['unbinned']))))
                if dat['isFit']:
                    ls="--"
                    lw=1
                else:
                    ls="-"
                    lw=2
                    
                base_line, = pl.add(ells,(multTh*(ells-1)+1.)*dat['unbinned'],label=dat['label'],lw=lw,ls=ls)
                if dat['isFit']:
                    pl._ax.fill_between(ells,(multTh*(ells-1)+1.)*dat['unbinned']*(1.-nsigma*dat['amp'][1]/dat['amp'][0]),(multTh*(ells-1)+1.)*dat['unbinned']*(1.+nsigma*dat['amp'][1]/dat['amp'][0]),alpha=0.3, facecolor=base_line.get_color())
                    
                if showBinnedTheory:
                    pl.add(binCenters[:len(dat['binned'])],mult[:len(dat['binned'])]*dat['binned'],
                           ls='none',marker='x',mew=2,markersize=10,label=dat['label']+' binned')
                  
            else:
                errs = np.sqrt(np.diagonal(dat['covmat']))
                print((dat['label']))
                pl.addErr(binCenters[:len(dat['binned'])],mult[:len(dat['binned'])]*dat['binned'],mult[:len(dat['binned'])]*errs,label=dat['label'],marker='o',elinewidth=2,markersize=10,mew=2,)


        [i.set_linewidth(2.0) for i in list(pl._ax.spines.values())]
        pl._ax.tick_params(which='major',width=2)
        pl._ax.tick_params(which='minor',width=2)
        pl._ax.axhline(y=0.,ls='--')
    
        if not(xlimits is None):
            pl._ax.set_xlim(*xlimits)
        else:
            pl._ax.set_xlim(self.binner.bin_edges[0],self.binner.bin_edges[-1])    
        if not(ylimits is None): pl._ax.set_ylim(*ylimits)
        pl.legendOn(loc='lower left',labsize=10)
        pl.done(saveFile)
Example #2
0
    def fit(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),debug=False,numbins=-1):
        # evaluate likelihood on a 1d grid and fit to a gaussian
        # store fit as new theory curve

        width = amplitudeRange[1]-amplitudeRange[0]
        Likelihood = lambda x: np.exp(-0.5*self.chisq(keyData,keyTheory,amp=x,numbins=numbins))
        Likes = np.array([Likelihood(x) for x in amplitudeRange])
        Likes = Likes / (Likes.sum()*width) #normalize

        ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0]

        
        
        if debug:
            fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange])
            pl = Plotter()
            pl.add(amplitudeRange,Likes,label="likes")
            pl.add(amplitudeRange,fitVals,label="fit")
            pl.legendOn()
            pl.done("output/debug_coreFit.png")

        fitKey = keyData+"_fitTo_"+keyTheory
        self.datas[fitKey] = {}
        self.datas[fitKey]['covmat'] = None
        self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest
        #self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest
        self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr)
        self.datas[fitKey]['amp']=(ampBest,ampErr)
        self.datas[fitKey]['isFit'] = True

        return fitKey
Example #3
0
        statsRecon[polComb] = getStats(listAllReconPower[polComb])
        fp = interp1d(centers,
                      statsRecon[polComb]['mean'],
                      fill_value='extrapolate')
        pl.add(ellkk, (fp(ellkk)) - Clkk, color=col, lw=2)

        Nlkk2d = qest.N.Nlkk[polComb]
        ncents, npow = stats.binInAnnuli(Nlkk2d, p2d.modLMap, bin_edges)
        pl.add(ncents, npow, color=col, lw=2, ls="--")

    avgInputPower = totAllInputPower / N
    pl.add(centers, avgInputPower, color='cyan',
           lw=3)  # ,label = "input x input"

    pl.legendOn(labsize=10, loc='lower left')
    pl._ax.set_xlim(kellmin, kellmax)
    pl.done("tests/output/power.png")

    # cross compare to power of input (percent)
    pl = Plotter()

    for polComb, col in zip(polCombList, colorList):
        cross = statsCross[polComb]['mean']

        pl.add(centers, (cross - avgInputPower) * 100. / avgInputPower,
               label=polComb,
               color=col,
               lw=2)

    pl.legendOn(labsize=10, loc='upper right')
Example #4
0
print lmap.data.shape
myNls = NlGenerator(lmap,theory,bin_edges,gradCut=gradCut)


#polCombList = ['TT','EE','ET','TE','EB','TB']
colorList = ['red','blue','green','cyan','orange','purple']
polCombList = ['EB']
#colorList = ['red']
ellkk = np.arange(2,9000,1)
Clkk = theory.gCl("kk",ellkk)    


pl = Plotter(scaleY='log',scaleX='log')
pl.add(ellkk,4.*Clkk/2./np.pi)

for beamX in np.arange(1.5,10.,1.0):
    myNls.updateNoise(beamX,noiseT,noiseP,tellmin,tellmax,pellmin,pellmax,beamY=beamY)
    for polComb,col in zip(polCombList,colorList):
        ls,Nls = myNls.getNl(polComb=polComb,halo=halo)

        pl.add(ls,4.*Nls/2./np.pi,label=str(beamX))#polComb)#,color=col

pl.legendOn(loc='lower left',labsize=10)
pl.done("output/hucomp.png")





sys.exit()
Example #5
0
File: FigS8.py Project: mntw/szar
        s8now = np.mean(s81zs[np.logical_and(zrange >= zleft,
                                             zrange < zright)])
        print((lab, zleft, zright, yerr, s8now, yerr * 100. / s8now, "%"))
        #s8now = np.mean(s81zs[np.logical_and(zrange>=zleft,zrange<zright)])/s81
        #yerrsq = (1./sum([1/x**2. for x in errselect]))
        #yerr = (s8now/s80mean)*np.sqrt(yerrsq/s8now**2. + yerrsq0/s80mean**2.)
        errcents.append(yerr)
        ms8.append(s8now)
        currentAxis.add_patch(
            Rectangle((zcent - xerr + pad, 1. - old_div(yerr, s8now)),
                      2 * xerr - old_div(pad, 2.),
                      2. * yerr / s8now,
                      facecolor=col,
                      alpha=1.0))
    print("=====================")
    pl._ax.fill_between(zrange, 1., 1., label=lab, alpha=0.75, color=col)

#pl.add(zrange,s82zs/s81zs,label="$w=-0.97$",color='red',alpha=0.5)
pl.add(zrange, old_div(s81zs, s81zs), color='white', alpha=0.5,
       ls="--")  #,label="$w=-1$")

# pl.add(zrange,s82zs/s81zs/s82*s81,label="$w=-0.97$",color='red',alpha=0.5)
# pl.add(zrange,s81zs*0.+1.,label="$w=-1$",color='black',alpha=0.5,ls="--")

pl.legendOn(labsize=12, loc="lower left")
pl._ax.set_ylim(0.88, 1.12)  # res
#pl._ax.set_ylim(0.95,1.05) # fsky
#pl._ax.text(0.8,.82,"Madhavacheril et. al. in prep")
pl.done(outDir + "FigS8.pdf")
#pl.done(outDir+"s8SO.png")
Example #6
0
             figsize=(8, 6),
             scaleY='log')
pl._ax.set_ylim(1, 10000)
pl._ax.set_xlim(100., 5000.)
pl.add(el_ilc, cls_ilc * ellfac, color='black')
pl.add(eln, nl * ellfac2, label="$N_\ell$ CCATP")
pl.add(eln2, nl2 * ellfac2, label="$N_\ell$ SO")
pl.addErr(el_ilc, cls_ilc * ellfac, err_ilc * ellfac, label="CCATP")
pl.addErr(el_ilc2 + 10, cls_ilc2 * ellfac, err_ilc2 * ellfac, label="SO")
#pl.legend(loc='upper right',labsize=10)
pl.done(outDir + experimentName + "_cmb_cls" + constraint_tag[cf] + ".png")
ls = np.arange(2, 8000, 10)

pl = Plotter(labelX="$\ell$",
             labelY="$C_\ell \, (1 + \ell) \ell / 2\pi \, [\mu \mathrm{K}]$",
             ftsize=12,
             figsize=(8, 6),
             scaleY='log')
pl._ax.set_ylim(0.1, 1000)
pl._ax.set_xlim(100., 8000.)
pl.add(el_il, cls_il * ellfac, color='black')
pl.add(elnc, nlc * ellfac2, label="$N_\ell$ CCATP")
pl.add(elnc2, nlc2 * ellfac2, label="$N_\ell$ SO")
pl.addErr(el_il, cls_il * ellfac, err_il * ellfac, label="CCATP")
pl.addErr(el_il2 + 10, cls_il2 * ellfac, err_il2 * ellfac, label="SO")
pl.legendOn(loc='upper right', labsize=10)
pl.done(outDir + experimentName + "_y_cls" + constraint_tag[cf] + ".png")
ls = np.arange(2, 8000, 10)

#ksz = fgs.ksz_temp(ls)/ls/(ls+1.)*2.*np.pi/ cc.c['TCMBmuK']**2.
Example #7
0
pl = Plotter(scaleY='log',
             scaleX='log',
             labelX="$\\theta$ (arcmin)",
             labelY="$\\kappa$")
pl.add(thetas * 180. * 60. / np.pi, kappas)
pl.add(thetas * 180. * 60. / np.pi, kappas2)
pl.add(thetas * 180. * 60. / np.pi, kappas3)
pl.done("output/kappa.png")

pl = Plotter()
pl.add(thetas * 180. * 60. / np.pi, (kappas - kappas2) * 100. / kappas,
       label="numerical integration")
pl.add(thetas * 180. * 60. / np.pi, (kappas - kappas3) * 100. / kappas,
       label="analytical")
pl.legendOn()
pl.done("output/kappadiff.png")

lc = LimberCosmology(cosmoDict,
                     constDict,
                     lmax=3000,
                     pickling=True,
                     numz=100,
                     kmax=42.47,
                     nonlinear=True,
                     skipPower=True)

lc.addDeltaNz("z1", 1.)

kappasCMB = halos.kappa_nfw(thetas, z, comL, M500, c500, R500,
                            lc.kernels["cmb"]["window_z"](z))
Example #8
0
            #     lcents,Nlbinned = binner.bin(nPX)
            #     pl.add(lcents,Nlbinned,label=labname,ls="--")

            pl.add(ls,Nls,label=labname,ls=lines,alpha=alpha)
                

            

            LF = LensForecast()
            LF.loadKK(frange,Clkk,ls,Nls)
            sn,errs = LF.sn(snrange,fsky,"kk")
            #print errs
            sns.append(sn)
            print((noiseFile, " S/N " , sn))



            
# pl.add(frangeC,theory.lCl('EE',frangeC))
# pl.legendOn(loc='lower right',labsize = 8)
# #pl._ax.set_xlim(0,3000)
# #pl._ax.set_ylim(1.e-9,1.e-6)
# pl.done("beamVary_"+polComb+".pdf")

            
pl.add(frange,Clkk,color="black")
pl.legendOn(loc='lower right',labsize = 8)
pl._ax.set_xlim(0,3000)
pl._ax.set_ylim(1.e-9,1.e-6)
pl.done("beamVary_"+polComb+".pdf")
Example #9
0
File: FigBeam.py Project: mntw/szar
                                                overdensity=overdensity,
                                                critical=critical,
                                                atClusterZ=atClusterZ,
                                                rayleighSigmaArcmin=ray)

                print((sn * np.sqrt(1000.)))
                sns.append(old_div(1., (sn * np.sqrt(1000.))))

            fgpart = ""
            mispart = ""
            if miscenter:
                mispart = ", miscentered"
                col = "C0"
            else:
                col = "C1"
            if lensName == "CMB_all":
                lenspart = "T+P"
            else:
                lenspart = "P only"
            if not (doFg):
                fgpart = ", no foregrounds"
                col = "black"
                al = 0.5
            else:
                al = 1.0

            lab = lenspart + fgpart + mispart
            pl.add(beamList, sns, label=lab, ls=linestyle, alpha=al, color=col)
pl.legendOn(loc="upper left", labsize=12)
pl.done(out_dir + "FigBeam.pdf")
Example #10
0
        print((zleft, zright, N))
        if k == 0:
            lab = labres
        else:
            lab = None

        currentAxis.add_patch(
            Rectangle((zcent - xerr + pad, 0),
                      2 * xerr - old_div(pad, 2.),
                      N,
                      facecolor=col,
                      label=lab))  #,alpha=0.5))
        Ndict[labres].append(N)
        k += 1

pl.legendOn(labsize=12, loc='upper right')
pl._ax.set_ylim(1, 5.e4)
pl._ax.set_xlim(0., 3.)
pl.done(out_dir + "FigCountsA.pdf")

pl = Plotter(labelX="$z$",
             labelY="Ratio",
             ftsize=20,
             figsize=(6, 2),
             scaleY='log',
             thk=2,
             labsize=16)

colList = ['C0', 'C1', 'C2', 'C3', 'C4']
Nref = Ndict['S4 3.0\'']
#Nref = Ndict['S4 2.0\'']
Example #11
0
                                   lpad=9000)
pl = Plotter(scaleY='log')

for i, filen in enumerate(fileList):

    ells, d1, Nls, d2 = np.loadtxt(filen, unpack=True)
    Nls = Nls / TCMB**2.

    if i == 0:
        pl.add(ells, theory.lCl('EE', ells), lw=2)

    lab = ""
    if "5m" in filen:
        lab = "5m"
        ls = "-"
    else:
        lab = "6m"
        ls = "--"

    if "synchGal" in filen:
        lab += "_dust_synch"
    elif "dustGal" in filen:
        lab += "_dust"
    pl.add(ells, Nls, label=lab, ls=ls)

    np.savetxt("data/colin_" + lab + ".txt",
               np.vstack((ells, Nls)).transpose())

pl.legendOn(labsize=10, loc='lower left')
pl.done("output/colin.png")
Example #12
0
    def fitAuto(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),constRange=np.arange(0.1,2.0,0.01),debug=False,store=False):
        # evaluate likelihood on a 2d grid and fit to a gaussian
        # store fit as new theory curve

        width = amplitudeRange[1]-amplitudeRange[0]
        height = constRange[1]-constRange[0]
        Likelihood = lambda x,y: np.exp(-0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y))
        #Likelihood = lambda x,y: -0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y)

        Likes = np.array([[Likelihood(x,y) for x in amplitudeRange] for y in constRange])

        ampLike = np.sum(Likes,axis=0)    
        constLike = np.sum(Likes,axis=1)

        ampLike = ampLike / (ampLike.sum()*width) #normalize
        constLike = constLike / (constLike.sum()*height) #normalize
                

        ampBest,ampErr = cfit(norm.pdf,amplitudeRange,ampLike,p0=[amplitudeRange.mean(),0.1*amplitudeRange.mean()])[0]
        constBest,constErr = cfit(norm.pdf,constRange,constLike,p0=[constRange.mean(),0.1*constRange.mean()])[0]


        if debug:
            pl = Plotter()
            pl.plot2d(Likes)
            pl.done("output/like2d.png")
                        
            pl = Plotter()
            fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange])
            pl.add(amplitudeRange,ampLike,label="amplikes")
            pl.add(amplitudeRange,fitVals,label="fit")
            pl.legendOn()
            pl.done("output/amplike1d.png")

            pl = Plotter()
            fitVals = np.array([norm.pdf(x,constBest,constErr) for x in constRange])
            pl.add(constRange,constLike,label="constlikes")
            pl.add(constRange,fitVals,label="fit")
            pl.legendOn()
            pl.done("output/constlike1d.png")

            #sys.exit()
            
        if not(store):
            return constBest,constErr
        else:
            
            self.datas[keyData]['binned'] -= constBest
            self.datas[keyData]['unbinned'] -= constBest
            
            fitKey = keyData+"_fitTo_"+keyTheory
            self.datas[fitKey] = {}
            self.datas[fitKey]['covmat'] = None
            self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest
            self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest
            self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr)
            self.datas[fitKey]['amp']=(ampBest,ampErr)
            self.datas[fitKey]['const']=(constBest,constErr)
            self.datas[fitKey]['isFit'] = True
    
            return fitKey
Example #13
0
####

pl = Plotter(scaleY='log')  #,scaleX='log')
pl.add(ellkk, 4. * Clkk / 2. / np.pi)

# CHECK THAT NORM MATCHES HU/OK
for polComb, col in zip(polCombList, colorList):
    if polComb == 'EB':
        lsold, Nlsold, eff = myNls.iterativeDelens(polComb, 1.0, True)
    else:
        lsold, Nlsold = myNls.getNl(polComb=polComb, halo=halo)

    try:
        huFile = 'data/hu_' + polComb.lower() + '.csv'
        huell, hunl = np.loadtxt(huFile, unpack=True, delimiter=',')
    except:
        huFile = 'data/hu_' + polComb[::-1].lower() + '.csv'
        huell, hunl = np.loadtxt(huFile, unpack=True, delimiter=',')

    pl.add(Ls,
           4. * crosses[polComb + polComb] / 2. / np.pi,
           color=col,
           label=polComb)
    #pl.add(Ls,4.*Nls[polComb]/2./np.pi,color=col,alpha=0.2)
    pl.add(lsold, 4. * Nlsold / 2. / np.pi, color=col, alpha=1.0, ls="-.")
    #pl.add(huell,hunl,ls='--',color=col)
pl.add(Ls, 4. * Nmv / 2. / np.pi, color="black", alpha=1.0)

pl.legendOn(labsize=10)
pl.done(outDir + "testbin.png")
Example #14
0
               bold=True)

        # File root name for Fisher derivatives

        # CLKK S/N ============================================

        # Calculate Clkk S/N
        #Clkk = fidCls[:,4]
        kellmin, kellmax = listFromConfig(Config, 'lensing', 'Lrange')
        fsky = fskyNow  #Config.getfloat(expName,'fsky')

        frange = np.arange(0, kellmax)  #np.array(range(len(Clkk)))
        Clkk = cc.theory.gCl("kk", frange)
        snrange = np.arange(kellmin, kellmax)
        LF = LensForecast()
        LF.loadKK(frange, Clkk, ls, Nls)
        sn, errs = LF.sn(snrange, fsky, "kk")
        printC("Lensing autopower S/N: " + str(sn), color="green", bold=True)

        # pl = Plotter(scaleY='log',scaleX='log')
        # pl.add(frange,Clkk)
        # pl.add(ls,Nls)
        # pl._ax.set_ylim(-max(Clkk),max(Clkk))
        # pl.done("clkk.png")

        sns.append(sn)

    pl.add(fskyList, sns, label="lknee_" + TorP + "=" + str(lknee))
pl.legendOn(loc="lower right", labsize=10)
pl.done(outDir + "sns.png")