예제 #1
0
def stat_analysis(cutouts, binsize, arcmax, cents, modRMaps):
    profiles = []
    for i in range(0, len(cutouts)):
        thetaRange = np.arange(0., arcmax, binsize)
        breali = bin2D(modRMaps[i] * 180. * 60. / np.pi, thetaRange)
        a = breali.bin(cutouts[i])[1]
        profiles.append(a)
    statistics = stats.getStats(profiles)
    mean = statistics['mean']
    error = statistics['errmean']
    covmat = statistics['cov']
    corrcoef = stats.cov2corr(covmat)
    io.quickPlot2d(corrcoef, 'corrcoef.png')
    pl = Plotter(labelX='Distance from Center (arcminutes)',
                 labelY='Temperature Fluctuation ($\mu K$)',
                 ftsize=10)
    pl.add(cents, mean)
    pl.addErr(cents, mean, yerr=error)
    pl._ax.axhline(y=0., ls="--", alpha=0.5)
    pl.done(out_dir + "error.png")
예제 #2
0
    def get_stats(self,verbose=True):
        """
        Collect from all MPI cores and calculate statistics for
        1d measurements.
        """
        import orphics.tools.stats as stats
        import numpy as np

        if self.rank in self.loopover:
            for k,label in enumerate(self.vectors.keys()):
                self.comm.send(np.array(self.vectors[label]).shape[0], dest=self.root, tag=self.tag_start*2000+k)

            for k,label in enumerate(self.vectors.keys()):
                send_dat = np.array(self.vectors[label]).astype(np.float64)
                self.comm.Send(send_dat, dest=self.root, tag=self.tag_start+k)

        else:
            self.stats = {}
            self.numobj = {}
            for k,label in enumerate(self.vectors.keys()):
                self.numobj[label] = []
                self.numobj[label].append(np.array(self.vectors[label]).shape[0])
                for core in self.loopover: #range(1,self.numcores):
                    if verbose: print("Waiting for core ", core , " / ", self.numcores)
                    data = self.comm.recv(source=core, tag=self.tag_start*2000+k)
                    self.numobj[label].append(data)

            
            for k,label in enumerate(self.vectors.keys()):
                self.vectors[label] = np.array(self.vectors[label])
            for core in self.loopover: #range(1,self.numcores):
                if verbose: print("Waiting for core ", core , " / ", self.numcores)
                for k,label in enumerate(self.vectors.keys()):
                    expected_shape = (self.numobj[label][core],self.vectors[label].shape[1])
                    data_vessel = np.empty(expected_shape, dtype=np.float64)
                    self.comm.Recv(data_vessel, source=core, tag=self.tag_start+k)
                    self.vectors[label] = np.append(self.vectors[label],data_vessel,axis=0)

            for k,label in enumerate(self.vectors.keys()):
                self.stats[label] = stats.getStats(self.vectors[label])
예제 #3
0
def plot_stats(cents, cont, ells, ells_pp, theory):
    if len(cont['tt']) < 3: return

    print("Calculating stats...")
    st = {}
    for spec in ['tt', 'ee', 'te', 'bb', 'pp']:
        st[spec] = stats.getStats(cont[spec])

    print("Plotting...")
    pl = io.Plotter(scaleY='log', scaleX='log')
    for spec in ['tt', 'ee', 'bb']:
        pl.add(ells, theory[spec] * ells**2., lw=2)
        pl.addErr(cents,
                  st[spec]['mean'] * cents**2.,
                  yerr=st[spec]['errmean'] * cents**2.,
                  ls="none",
                  marker="o")

    pl.done(os.environ['WORK'] + "/web/plots/clsauto.png")
    pl = io.Plotter(scaleX='log')
    for spec in ['te']:
        pl.add(ells, theory[spec] * ells**2., lw=2)
        pl.addErr(cents,
                  st[spec]['mean'] * cents**2.,
                  yerr=st[spec]['errmean'] * cents**2.,
                  ls="none",
                  marker="o")

    pl.done(os.environ['WORK'] + "/web/plots/clste.png")
    pl = io.Plotter(scaleY='log')
    for spec in ['pp']:
        pl.add(ells_pp, theory[spec], lw=2)
        pl.addErr(cents,
                  st[spec]['mean'],
                  yerr=st[spec]['errmean'],
                  ls="none",
                  marker="o")
    pl._ax.set_xlim(2, 3000)
    pl.done(os.environ['WORK'] + "/web/plots/clspp.png")
예제 #4
0
            comm.Recv(rcvInputPowerMat, source=job, tag=i)
            listAllCrossPower[polComb] = np.vstack(
                (listAllCrossPower[polComb], rcvInputPowerMat))
            print "Waiting for ", job, " ", polComb, " auto"
            comm.Recv(rcvInputPowerMat, source=job, tag=i + 80)
            listAllReconPower[polComb] = np.vstack(
                (listAllReconPower[polComb], rcvInputPowerMat))

    statsCross = {}
    statsRecon = {}

    pl = Plotter(scaleY='log')
    pl.add(ellkk, Clkk, color='black', lw=2)

    for polComb, col in zip(polCombList, colorList):
        statsCross[polComb] = getStats(listAllCrossPower[polComb])
        pl.addErr(centers,
                  statsCross[polComb]['mean'],
                  yerr=statsCross[polComb]['errmean'],
                  ls="none",
                  marker="o",
                  markersize=8,
                  label="recon x input " + polComb,
                  color=col,
                  mew=2,
                  elinewidth=2)

        statsRecon[polComb] = getStats(listAllReconPower[polComb])
        fp = interp1d(centers,
                      statsRecon[polComb]['mean'],
                      fill_value='extrapolate')
예제 #5
0
            cpower = fmaps.get_simple_power_enmap(enmap1=kappa_recon,
                                                  enmap2=downk)
            cents_pwr, cclkk = dbinner_dat.bin(cpower)
            apowers[polcomb].append(aclkk)
            cpowers[polcomb].append(cclkk)

        kappa_stack[polcomb] += kappa_recon

if cluster:
    profstats = {}

    for polcomb in pol_list:
        kappa_stack[polcomb] /= Nsims
        k = kappa_stack[polcomb]
        io.quickPlot2d(k, out_dir + "kappa_recon_" + polcomb + ".png")
        profstats[polcomb] = stats.getStats(profiles[polcomb])

    pl = io.Plotter(scaleX='log')

    cents, inp_profile = binner_sim.bin(kappa_map)
    pl.add(cents, inp_profile, ls="--")
    inp_kappa = fmaps.filter_map(kappa_map,
                                 kappa_map.copy() * 0. + 1.,
                                 modlmap_sim,
                                 lowPass=kellmax,
                                 highPass=kellmin)
    inp_kappa -= inp_kappa.mean()
    cents, inp_profile = binner_sim.bin(inp_kappa)
    pl.add(cents, inp_profile, ls="-")

    down_input = enmap.downgrade(kappa_map,
예제 #6
0
def stack_on_map(lite_map,
                 width_stamp_arcminute,
                 pix_scale,
                 ra_range,
                 dec_range,
                 catalog=None,
                 n_random_points=None):
    width_stamp_degrees = width_stamp_arcminute / 60.
    Np = np.int(width_stamp_arcminute / pix_scale + 0.5)
    pad = np.int(Np / 2 + 0.5)
    print("Expected width in pixels = ", Np)
    lmap = lite_map
    stack = 0
    N = 0

    if catalog is not None:
        looprange = range(0, len(catalog))
        assert n_random_points is None
        random = False
    else:
        assert n_random_points is not None
        assert len(ra_range) == 2
        assert len(dec_range) == 2
        looprange = range(0, n_random_points)
        random = True
    print(looprange)
    for i in looprange:
        banana = True
        mass = catalog[i][10]
        if random:
            ra = np.random.uniform(*ra_range)
            dec = np.random.uniform(*dec_range)
        if random == False:
            ra = catalog[i][1]  #1 for ACT catalog 2 for SDSS
            dec = catalog[i][2]  #2 for ACT catalog 3 for SDSS
        for j in range(0, 2130):
            distance = np.sqrt((ra - RAps[j])**2 + (dec - DECps[j])**2)
            crit = 0.25
            if distance < crit:
                banana = False
                print('too close')
        ix, iy = lmap.skyToPix(ra, dec)
        if ix >= pad and ix < lmap.Nx - pad and iy >= pad and iy < lmap.Ny - pad and banana == True and mass > 8:
            print(i)
            #print(ra,dec)
            smap = lmap.selectSubMap(ra - width_stamp_degrees / 2.,
                                     ra + width_stamp_degrees / 2.,
                                     dec - width_stamp_degrees / 2.,
                                     dec + width_stamp_degrees / 2.)
            #print (smap.data.shape)
            #cutout = zoom(smap.data.copy(),zoom=(float(Np)/smap.data.shape[0],float(Np)/smap.data.shape[1])
            cutout = resize(smap.data.copy(),
                            output_shape=(Np, Np)) - randomstack
            xMap, yMap, modRMap, xx, yy = fmaps.getRealAttributes(smap)
            dt = pix_scale
            arcmax = 20.
            thetaRange = np.arange(0., arcmax, dt)
            breali = bin2D(modRMap * 180. * 60. / np.pi, thetaRange)
            a = breali.bin(cutout)[1]
            profiles.append(a)
            io.quickPlot2d(cutout, str(i) + "cutout.png")
            #print (cutout.shape)
            stack = stack + cutout
            N = N + 1
        else:
            print("skip")
    stack = stack / N  #-randomstack
    #print(stack.shape())
    #print(smap.data.shape)
    # print(stack)
    print(N)
    statistics = stats.getStats(profiles)
    mean = statistics['mean']
    error = statistics['errmean']
    corrcoef = statistics['corr']
    covmat = statistics['covmat']
    print(mean / error)
    np.save('statistics', statistics)
    #np.save('newrandomstamp',stack)
    # io.quickPlot2d(stack,out_dir+"newACTstack.png")
    dt = pix_scale
    arcmax = 20.
    thetaRange = np.arange(0., arcmax, dt)
    breal = bin2D(modRMap * 180. * 60. / np.pi, thetaRange)
    cents, recons = breal.bin(stack)
    pl = Plotter(labelX='Distance from Center (arcminutes)',
                 labelY='Temperature Fluctuation ($\mu K$)',
                 ftsize=10)
    pl.add(cents, mean)
    pl.addErr(cents, mean, yerr=error)
    pl._ax.axhline(y=0., ls="--", alpha=0.5)
    pl.done(out_dir + "error.png")
    print(covmat)
    io.quickPlot2d(covmat, 'covmat.png')
    return (stack, cents, recons)
예제 #7
0
        pl.add(cents_pwr, en0subbed, alpha=0.4, ls="--")
    except:
        print("skipping")
        continue

    clkkn0pers.append((sn0subbed - en0subbed) * 100. / en0subbed)
    clkkpers.append((saclkk - eaclkk) * 100. / eaclkk)
    sdppers.append((ssdp - esdp) * 100. / esdp)
    clttpers.append((sdcltt - edcltt) * 100. / edcltt)

pl._ax.set_xlim(0, 3000)
pl._ax.set_ylim(1.e-9, 1.e-6)
pl.done("clkk.png")

clkkn0stats = stats.getStats(clkkn0pers)
clkkstats = stats.getStats(clkkpers)
sdpstats = stats.getStats(sdppers)
clttstats = stats.getStats(clttpers)

pl = io.Plotter(labelX="$L$", labelY="% diff")
pl.addErr(cents_pwr,
          clkkn0stats['mean'],
          yerr=clkkn0stats['errmean'],
          label="sdn0subbed clkk",
          ls="-")
pl.addErr(cents_pwr,
          clkkstats['mean'],
          yerr=clkkstats['errmean'],
          label="raw clkk",
          alpha=0.2,
예제 #8
0
def getDLnMCMB(ells,Nls,clusterCosmology,log10Moverh,z,concentration,arcStamp,pxStamp,arc_upto,bin_width,expectedSN,Nclusters=1000,numSims=30,saveId=None,numPoints=1000,nsigma=8.,overdensity=500.,critical=True,atClusterZ=True):

    import flipper.liteMap as lm
    if saveId is not None: from orphics.tools.output import Plotter

    M = 10.**log10Moverh

    cc = clusterCosmology

    stepfilter_ellmax = max(ells)
    

    lmap = lm.makeEmptyCEATemplate(raSizeDeg=arcStamp/60., decSizeDeg=arcStamp/60.,pixScaleXarcmin=pxStamp,pixScaleYarcmin=pxStamp)

    xMap,yMap,modRMap,xx,xy = fmaps.getRealAttributes(lmap)
    lxMap,lyMap,modLMap,thetaMap,lx,ly = fmaps.getFTAttributesFromLiteMap(lmap)

    kappaMap,retR500 = NFWkappa(cc,M,concentration,z,modRMap*180.*60./np.pi,winAtLens,overdensity,critical,atClusterZ)
    finetheta = np.arange(0.01,arc_upto,0.01)
    finekappa,retR500 = NFWkappa(cc,M,concentration,z,finetheta,winAtLens,overdensity,critical,atClusterZ)
    kappaMap = fmaps.stepFunctionFilterLiteMap(kappaMap,modLMap,stepfilter_ellmax)

    generator = fmaps.GRFGen(lmap,ells,Nls)
    
    bin_edges = np.arange(0.,arc_upto,bin_width)
    binner = bin2D(modRMap*180.*60./np.pi, bin_edges)
    centers, thprof = binner.bin(kappaMap)


    if saveId is not None:
        pl = Plotter()
        pl.plot2d(kappaMap)
        pl.done("output/"+saveId+"kappa.png")

    
    expectedSNGauss = expectedSN*np.sqrt(numSims)
    sigma = 1./expectedSNGauss
    amplitudeRange = np.linspace(1.-nsigma*sigma,1.+nsigma*sigma,numPoints)

    lnLikes = 0.
    bigStamp = 0.
    for i in range(numSims):
        profiles,totstamp = getProfiles(generator,stepfilter_ellmax,kappaMap,binner,Nclusters)
        bigStamp += totstamp
        stats = getStats(profiles)
        if i==0 and (saveId is not None):
            pl = Plotter()
            pl.add(centers,thprof,lw=2,color='black')
            pl.add(finetheta,finekappa,lw=2,color='black',ls="--")
            pl.addErr(centers,stats['mean'],yerr=stats['errmean'],lw=2)
            pl._ax.set_ylim(-0.01,0.3)
            pl.done("output/"+saveId+"profile.png")

            pl = Plotter()
            pl.plot2d(totstamp)
            pl.done("output/"+saveId+"totstamp.png")


        Likes = getAmplitudeLikelihood(stats['mean'],stats['covmean'],amplitudeRange,thprof)
        lnLikes += np.log(Likes)


    width = amplitudeRange[1]-amplitudeRange[0]

    Likes = np.exp(lnLikes)
    Likes = Likes / (Likes.sum()*width) #normalize
    ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0]

    sn = ampBest/ampErr/np.sqrt(numSims)
    snAll = ampBest/ampErr
    if snAll<5.: print "WARNING: ", saveId, " run with mass ", M , " and redshift ", z , " has overall S/N<5. \
    Consider re-running with a greater numSims, otherwise estimate of per Ncluster S/N will be noisy."

    if saveId is not None:
        Fit = np.array([np.exp(-0.5*(x-ampBest)**2./ampErr**2.) for x in amplitudeRange])
        Fit = Fit / (Fit.sum()*width) #normalize
        pl = Plotter()
        pl.add(amplitudeRange,Likes,label="like")
        pl.add(amplitudeRange,Fit,label="fit")
        pl.legendOn(loc = 'lower left')
        pl.done("output/"+saveId+"like.png")
        pl = Plotter()
        pl.plot2d(bigStamp/numSims)
        pl.done("output/"+saveId+"bigstamp.png")

        np.savetxt("data/"+saveId+"_m"+str(log10Moverh)+"_z"+str(z)+".txt",np.array([log10Moverh,z,1./sn]))
    
    return 1./sn
예제 #9
0
파일: mpiLens.py 프로젝트: msyriac/alhazen
            super_dumbs.append(sdp)

            n0sub = kpower - sd
            cents_pwr, n0subbed = dbinner_dat.bin(n0sub)
            n0subs.append(n0subbed)

clkk = parray_dat.clkk

astats = {}
cstats = {}

for polcomb in pol_list:
    kappa_stack[polcomb] /= Nsims
    k = kappa_stack[polcomb]
    io.quickPlot2d(k, out_dir + "kappa_recon_" + polcomb + ".png")
    astats[polcomb] = stats.getStats(apowers[polcomb])
    cstats[polcomb] = stats.getStats(cpowers[polcomb])

n0stats = stats.getStats(n0subs)

pl = io.Plotter(scaleY='log')
pl.add(fine_ells, clkk, alpha=0.2)
vals = []
for j, polcomb in enumerate(pol_list):
    vals.append((cstats[polcomb]['mean'] + cstats[polcomb]['errmean']).ravel())
    vals.append((cstats[polcomb]['mean'] - cstats[polcomb]['errmean']).ravel())
    pl.addErr(cents_pwr,
              cstats[polcomb]['mean'],
              yerr=cstats[polcomb]['errmean'],
              label=polcomb,
              ls="none",