def plotCls(self,saveFile,keys=None,xlimits=None,ylimits=None,transform=True,showBinnedTheory=False,scaleX='linear',scaleY='linear'): nsigma = 2. binCenters = self.binner.getBinCenters() if transform: ylab = "$\ell C_{\ell}$" mult = binCenters multTh = 1.#binCenters*0.+1. else: ylab = "$C_{\ell}$" mult = binCenters*0.+1. multTh = 0.#binCenters*0. pl = Plotter(labelX="$\ell$",labelY=ylab,scaleX=scaleX,scaleY=scaleY) if keys is None: keys = self.datas.keys() for key in keys: dat = self.datas[key] if dat['covmat'] is None: #This is a theory curve ells = np.array(range(len(dat['unbinned']))) if dat['isFit']: ls="--" lw=1 else: ls="-" lw=2 base_line, = pl.add(ells,(multTh*(ells-1)+1.)*dat['unbinned'],label=dat['label'],lw=lw,ls=ls) if dat['isFit']: pl._ax.fill_between(ells,(multTh*(ells-1)+1.)*dat['unbinned']*(1.-nsigma*dat['amp'][1]/dat['amp'][0]),(multTh*(ells-1)+1.)*dat['unbinned']*(1.+nsigma*dat['amp'][1]/dat['amp'][0]),alpha=0.3, facecolor=base_line.get_color()) if showBinnedTheory: pl.add(binCenters[:len(dat['binned'])],mult[:len(dat['binned'])]*dat['binned'], ls='none',marker='x',mew=2,markersize=10,label=dat['label']+' binned') else: errs = np.sqrt(np.diagonal(dat['covmat'])) print dat['label'] pl.addErr(binCenters[:len(dat['binned'])],mult[:len(dat['binned'])]*dat['binned'],mult[:len(dat['binned'])]*errs,label=dat['label'],marker='o',elinewidth=2,markersize=10,mew=2,) [i.set_linewidth(2.0) for i in pl._ax.spines.itervalues()] pl._ax.tick_params(which='major',width=2) pl._ax.tick_params(which='minor',width=2) pl._ax.axhline(y=0.,ls='--') if not(xlimits is None): pl._ax.set_xlim(*xlimits) else: pl._ax.set_xlim(self.binner.bin_edges[0],self.binner.bin_edges[-1]) if not(ylimits is None): pl._ax.set_ylim(*ylimits) pl.legendOn(loc='lower left',labsize=10) pl.done(saveFile)
def fit(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),debug=False,numbins=-1): # evaluate likelihood on a 1d grid and fit to a gaussian # store fit as new theory curve width = amplitudeRange[1]-amplitudeRange[0] Likelihood = lambda x: np.exp(-0.5*self.chisq(keyData,keyTheory,amp=x,numbins=numbins)) Likes = np.array([Likelihood(x) for x in amplitudeRange]) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] if debug: fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange]) pl = Plotter() pl.add(amplitudeRange,Likes,label="likes") pl.add(amplitudeRange,fitVals,label="fit") pl.legendOn() pl.done("output/debug_coreFit.png") fitKey = keyData+"_fitTo_"+keyTheory self.datas[fitKey] = {} self.datas[fitKey]['covmat'] = None self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr) self.datas[fitKey]['amp']=(ampBest,ampErr) self.datas[fitKey]['isFit'] = True return fitKey
myCamb = CAMB("/astro/u/msyriac/software/cambRed/params_testbed.ini",seed=rank) myCamb.setParam("accuracy_boost",1) myCamb.setParam("redshift_file(2)","../../repos/cmb-lensing-projections/data/dndz/cmass_dndz.csv") Hand = dndzHand(A,a,b,c) norm = quad(Hand.dndz,0.,3.5)[0] #print norm distro = Hand.dndz(zlist)/norm savemat = np.vstack((zlist,distro)).transpose() redFile = "temp_dndz_"+str(rank)+"_.csv" np.savetxt(redFile,savemat,delimiter=' ') pl.add(zlist,distro) outRoot = "dndzTest"+str(rank) myCamb.setParam("output_root",outRoot) myCamb.setParam("redshift_file(1)",redFile) myCamb.call(suppress=False) remove(redFile) theoryFile = outRoot+"_scalCovCls.dat" colnum = sn.getColNum(0,1,2) norm="lsq" transpower=[2.,0.5] readCls = smartCls(theoryFile) Cls = np.array(readCls.getCol(colnum=colnum,norm=norm,transpower=transpower)) ells = readCls.ells
fileList = glob.glob("output/"+derivRoot+"*.csv") farr = np.loadtxt("output/"+derivRoot[:-5]+"fCls.csv",delimiter=',') arrs = {} for fileN in fileList: lab = fileN[len("output/"+derivRoot):-4] arrs[lab] = np.loadtxt(fileN,delimiter=',') specList = ['TT','EE','BB','TE','KK','KT'] for spec in specList: if spec=='BB': continue pls = Plotter(scaleY='log',scaleX='log') ind = specList.index(spec) for lab in arrs: arr = arrs[lab] y = arr[:,ind]**2./farr[:,ind]**2. if lab=='tau': ls = '--' else: ls = "-" pls.add(range(arr.shape[0]),y,label=lab,ls=ls) pls.legendOn(loc='upper right',labsize=8) pls._ax.set_xlim(20.,4000.) pls.done("output/d"+spec+".png")
def main(argv): iniFile = 'input/noise.ini' config = ConfigParser() config.read(iniFile) mass = 1.E14 conc = 3.0 z = 1.0 #noiseT = 1.0 #beam = 5.0 #ellbeam = 7000. Nclus = 1000. arcupto = 10. mss = [] #beams = np.arange(1.0,7.0,0.1) beams = [float(argv[0])] noiseT = float(argv[1]) xy = argv[2] print beams #sys.exit() for beam in beams: ellbeam = 7000 #ellbeam = int(np.sqrt(8.*np.log(2.)) / beam *60. * 180./np.pi) print "Ellbeam, ", ellbeam Nsupp = 10000. px = 0.1 arc = 20. #xy = 'TT' bin_width = 10 #int(beam/px) Nreals = 5000 Nbins = int(arcupto/bin_width) scale = px thetas = np.arange(bin_width*scale/2.,arcupto,scale*bin_width) lensedClFile = "../cmb-lensing-projections/data/TheorySpectra/ell28k_highacc_lensedCls.dat" unlensedClFile = "../cmb-lensing-projections/data/TheorySpectra/ell28k_highacc_scalCls.dat" cmbells,cltt,clee,clbb,clte = Lens.loadCls(lensedClFile) ucmbells,ucltt,uclee,uclte,dummy = Lens.loadCls(unlensedClFile) uclbb = clbb.copy()[:len(ucmbells)] uClsNow = [ucltt,uclee,uclbb,uclte] uClsFid = [ucltt,uclee,uclbb,uclte] lClsFid = [cltt,clee,clbb,clte] template = "../DerivGen/data/order5_lensedCMB_T_beam_cutout_3.fits" templateMap = liteMap.liteMapFromFits(template) Lens.makeBinfile("../cmb-lensing-projections/data/bintemp.dat",first=2,last=9000,width=20) lmin = 2. lmax = 8000. if xy=='mv': NormGen = Lens.AL(templateMap,'TT',ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L1,Nl1,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) NormGen = Lens.AL(templateMap,'EB',ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L2,Nl2,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) assert np.all(L1==L2) L = L1.copy() Nl = 1./(1./Nl1+1./Nl2) else: NormGen = Lens.AL(templateMap,xy,ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L,Nl,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) pl = Plotter(scaleX='log',scaleY='log') pl.add(L,Nl) pl.done('stampNl.png') #sys.exit() rads = [] print "Making stamps to determine profile covmat..." for i in range(Nreals): #print i lm = liteMap.makeEmptyCEATemplate(raSizeDeg=arc/60., decSizeDeg=arc/60.,pixScaleXarcmin = px, pixScaleYarcmin=px) if i==0: #print lm.data.shape Npix = lm.data.shape[0] lfilt = stepFunctionFilterFromLiteMap(lm,ellbeam) kapmaker = kappaMaker(Cosmology(defaultLCDM),mass,conc,z,storeKap=False) kapstamp,kaprad = kapmaker.getKappaAndProfile(Npix,scale=px,beam=None,bin_width=bin_width) # pl = Plotter() # pl.plot2d(kapstamp) # pl.done('kappa.png') # sys.exit() lm.fillWithGaussianRandomField(L,Nl/Nsupp,bufferFactor = 1) stamp = lm.data.copy() stamp = stamp+kapstamp.copy() stamp = np.nan_to_num(filterDataFromTemplate(stamp,lfilt)) prof = radial_data(stamp,annulus_width=bin_width).mean #print prof rads.append(prof) if i%1000==0: print i radmeans, covMean, cov, errMean,err,corrcoef = getStats(rads,Nreals) thstamp = np.nan_to_num(filterDataFromTemplate(kapstamp,lfilt)) thprof = radial_data(thstamp,annulus_width=bin_width).mean siginv = np.linalg.pinv(cov[:len(thetas),:len(thetas)]) #print siginv #print radmeans[:len(thetas)] b = np.dot(siginv,radmeans[:len(thetas)]) chisq = np.dot(radmeans[:len(thetas)],b) print np.sqrt(chisq*Nclus/Nsupp) #print radmeans #print err pl = Plotter() pl.addErr(thetas,radmeans[:len(thetas)],yerr=err[:len(thetas)]) pl.add(thetas,kapmaker.kappa(thetas)) pl._ax.set_ylim(-0.01,0.25) pl._ax.set_xlim(0.0,arcupto) pl.done('profile.png') pl = Plotter() pl.plot2d(corrcoef) pl.done('corrcoef.png') pl = Plotter() pl.plot2d(stamp) pl.done('stamp.png') amplitudeRange = np.arange(-1.,2.,0.01) width = amplitudeRange[1]-amplitudeRange[0] amplist = [] print "Fitting amplitudes..." for i in range(Nreals): prof = rads[i][:len(thetas)] Likelihood = lambda x: np.exp(-0.5*fchisq(prof,siginv,thprof[:len(thetas)],amp=x)) Likes = np.array([Likelihood(x) for x in amplitudeRange]) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] #print ampBest,ampErr amplist.append(ampBest) ms = plotstat( -1.,2.,0.01 , (np.array(amplist)) , "amps",fit=True,scale=np.sqrt(Nclus/Nsupp)) mss.append(ms) pl = Plotter() pl.add(beams,mss) pl.done('beam.png') np.savetxt("output/m1beamsms"+xy+argv[0]+"_noise"+str(noiseT)+".txt",np.vstack((beams,mss)).transpose())
def fitAuto(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),constRange=np.arange(0.1,2.0,0.01),debug=False,store=False): # evaluate likelihood on a 2d grid and fit to a gaussian # store fit as new theory curve width = amplitudeRange[1]-amplitudeRange[0] height = constRange[1]-constRange[0] Likelihood = lambda x,y: np.exp(-0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y)) #Likelihood = lambda x,y: -0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y) Likes = np.array([[Likelihood(x,y) for x in amplitudeRange] for y in constRange]) ampLike = np.sum(Likes,axis=0) constLike = np.sum(Likes,axis=1) ampLike = ampLike / (ampLike.sum()*width) #normalize constLike = constLike / (constLike.sum()*height) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,ampLike,p0=[amplitudeRange.mean(),0.1*amplitudeRange.mean()])[0] constBest,constErr = cfit(norm.pdf,constRange,constLike,p0=[constRange.mean(),0.1*constRange.mean()])[0] if debug: pl = Plotter() pl.plot2d(Likes) pl.done("output/like2d.png") pl = Plotter() fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange]) pl.add(amplitudeRange,ampLike,label="amplikes") pl.add(amplitudeRange,fitVals,label="fit") pl.legendOn() pl.done("output/amplike1d.png") pl = Plotter() fitVals = np.array([norm.pdf(x,constBest,constErr) for x in constRange]) pl.add(constRange,constLike,label="constlikes") pl.add(constRange,fitVals,label="fit") pl.legendOn() pl.done("output/constlike1d.png") #sys.exit() if not(store): return constBest,constErr else: self.datas[keyData]['binned'] -= constBest self.datas[keyData]['unbinned'] -= constBest fitKey = keyData+"_fitTo_"+keyTheory self.datas[fitKey] = {} self.datas[fitKey]['covmat'] = None self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr) self.datas[fitKey]['amp']=(ampBest,ampErr) self.datas[fitKey]['const']=(constBest,constErr) self.datas[fitKey]['isFit'] = True return fitKey
zsource = 1.0 myInt.addDeltaNz('delta', zsource) for i, zwidth in enumerate(np.arange(0.01, 0.1, 0.01)): myInt.addStepNz('step' + str(i), zsource - zwidth / 2., zsource + zwidth / 2.) print("getting cls..") pl = Plotter(scaleY='log', scaleX='log') ellrange = list(range(2, ellmax, 1)) myInt.generateCls(ellrange) for i, tag in enumerate(sorted(myInt.kernels.keys())): if tag == "cmb": continue retcl = myInt.getCl("cmb", tag) if tag == "delta": compcl = retcl.copy() lw = 2 ls = "--" else: lw = 1 ls = "-" pl.add(ellrange, retcl, label=tag, ls=ls, lw=lw) rat = (retcl / compcl) ratio = (np.abs(rat[np.logical_and(ellrange > 100., ellrange < 2000.)] - 1.)).max() * 100. print((tag, ratio, " %")) pl.legendOn(loc='upper right', labsize=10) pl.done("output/estcls.png")
print("getting cls..") ellrange = list(range(2, ellmax, 1)) myInt.generateCls(ellrange) truthCl = myInt.getCl("cmb", "cmb") estCl1 = myInt.getCl("cmbDelta", "cmbDelta") estCl2 = myInt.getCl("cmbStep1", "cmbStep1") estCl3 = myInt.getCl("cmbStep2", "cmbStep2") elapsedTime = time.time() - startTime print(("Estimation took ", elapsedTime, " seconds.")) pl = Plotter(scaleY='log', scaleX='log') cells = LF.theory.gCl("kk", ells) pl.add(ellrange, truthCl, label="true", ls='-') pl.add(ellrange, estCl1, label="delta", ls='-') pl.add(ellrange, estCl2, label="step1", ls='-') pl.add(ellrange, estCl3, label="step2", ls='-') pl.add(ells, cells, label="CAMBkk", color='red', ls='--') pl.legendOn(loc='upper right', labsize=10) pl.done("output/estcls.png") pl = Plotter() for clNow, lab in zip([truthCl, estCl1, estCl2, estCl3], ["truth", "delta", "step 40", "step 1"]): intmm = interp1d(ellrange, clNow, bounds_error=False, fill_value=0.)(ells) pl.add(ells, intmm / LF.theory.gCl("kk", ells), label=lab) pl.legendOn(loc='upper right', labsize=10) pl._ax.set_ylim(0.9, 1.1)
print xy,beam if files==[]: print "skipping" continue print files[0] noises = [] mss = [] for filen in files: beam,ms = np.loadtxt(filen,unpack=True) noise = float(filen[-7:-4]) noises.append(noise) mss.append(ms) print "last noise ", noises[-1] #print mss[-1] #pl.add(noises,mss,lw=3,label=xy+" "+str(beam) + " arcmin",ls='--',color=col) pl.add(noises,np.array(mss)*0.01,lw=3,label=str(beam) + " arcmin",ls='-',color=col1) pl._ax.set_xlim(1.,5.) #pl._ax.set_ylim(0.,10.) pl._ax.set_ylim(0.,4.*0.01) pl.legendOn() pl.done("beam.png")
myInt.generateCls(ellrange) retclkks = myInt.getCl("cmb", "cmb") retclkss = myInt.getCl("cmb", "s") retclsss = myInt.getCl("s", "s") retclsks = myInt.getCl("s", "cmb") retclkgs = myInt.getCl("cmb", "g") retclggs = myInt.getCl("g", "g") retclgks = myInt.getCl("g", "cmb") elapsedTime = time.time() - startTime print(("Estimation took ", elapsedTime, " seconds.")) pl = Plotter(scaleY='log', scaleX='log') cells = LF.theory.gCl("kk", ells) pl.add(ellrange, retclkks, label="MMkk", color='red', ls='-') pl.add(ells, cells, label="CAMBkk", color='red', ls='--') cells = LF.theory.gCl("ks", ells) pl.add(ellrange, retclkss, label="MMks", color='blue', ls='-') pl.add(ellrange, retclsks, label="MMsk", color='blue', ls='-.', lw=2) pl.add(ells, cells, label="CAMBks", color='blue', ls='--') cells = LF.theory.gCl("ss", ells) pl.add(ellrange, retclsss, label="MMss", color='green', ls='-') pl.add(ells, cells, label="CAMBss", color='green', ls='--') cells = LF.theory.gCl("kg", ells) pl.add(ellrange, retclkgs, label="MMkg", color='purple', ls='-') pl.add(ellrange, retclgks, label="MMgk", color='purple', ls='-.', lw=2) pl.add(ells, cells, label="CAMBkg", color='purple', ls='--')
dummy = makeBinfile(tempBinfile,2.,4000.,100.,redundant=True) clkkFile = "../actpLens/data/fidkk.dat" clkk = np.loadtxt(clkkFile) lkk = np.arange(2,len(clkk)+2) N = 20 estcls = [] for i in range(N): kappaMap,taperMap = getTaperedMap(lkk,clkk) print((kappaMap.data.shape)) print((kappaMap.info())) sys.exit() lower, upper, center, bin_means = getBinnedPower(kappaMap,tempBinfile,taperMap) estcls.append(bin_means) print(i) clmeans, covMean, cov, errMean,err,corrcoef = getStats(estcls,N) pl = Plotter() pl.add(lkk,lkk*clkk) #pl.add(center,center*bin_means,ls="none",marker="x",color='red',markersize=8,mew=3) pl.addErr(center,center*clmeans,yerr=center*errMean,ls="none",marker="o",color='red',markersize=8,mew=3) pl._ax.set_xlim(0.,3500.) pl.done("clpower.png")