def fit(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),debug=False,numbins=-1): # evaluate likelihood on a 1d grid and fit to a gaussian # store fit as new theory curve width = amplitudeRange[1]-amplitudeRange[0] Likelihood = lambda x: np.exp(-0.5*self.chisq(keyData,keyTheory,amp=x,numbins=numbins)) Likes = np.array([Likelihood(x) for x in amplitudeRange]) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] if debug: fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange]) pl = Plotter() pl.add(amplitudeRange,Likes,label="likes") pl.add(amplitudeRange,fitVals,label="fit") pl.legendOn() pl.done("output/debug_coreFit.png") fitKey = keyData+"_fitTo_"+keyTheory self.datas[fitKey] = {} self.datas[fitKey]['covmat'] = None self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr) self.datas[fitKey]['amp']=(ampBest,ampErr) self.datas[fitKey]['isFit'] = True return fitKey
def fit(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),debug=False,numbins=-1): # evaluate likelihood on a 1d grid and fit to a gaussian # store fit as new theory curve width = amplitudeRange[1]-amplitudeRange[0] Likelihood = lambda x: np.exp(-0.5*self.chisq(keyData,keyTheory,amp=x,numbins=numbins)) Likes = np.array([Likelihood(x) for x in amplitudeRange]) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] if debug: fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange]) pl = Plotter() pl.add(amplitudeRange,Likes,label="likes") pl.add(amplitudeRange,fitVals,label="fit") pl.legendOn() pl.done("output/debug_coreFit.png") fitKey = keyData+"_fitTo_"+keyTheory self.datas[fitKey] = {} self.datas[fitKey]['covmat'] = None self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest #self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr) self.datas[fitKey]['amp']=(ampBest,ampErr) self.datas[fitKey]['isFit'] = True return fitKey
def cic(self): # Fit exponential curves to unsupported Pb-210 activity profile and derive # age and rate assuming constant initial concentration. See example: # Donnelly and Bertness (2001) PNAS 98: 14218-14223. # Sometimes background/supported level of Pb-210 is unknown. Just in case, # this algorithm fits an intercept to the data that predicts the background # rate. When background samples are present, these intercept values should # be near zero # Initialize variables self.data['initial'] = np.ones(self.max_iter) * np.nan self.data['rate'] = np.ones(self.max_iter) * np.nan self.data['intercept'] = np.ones(self.max_iter) * np.nan self.data['rsquared'] = np.ones(self.max_iter) * np.nan # Step through the Monte Carlo iterations for ii in np.arange(self.max_iter): # Define initial guess at y-intercept self.temp_unsupport = np.mean(self.data['unsupported']\ [self.data['depth']==np.min(self.data['depth']),ii],axis=0) # Fit an exponential model to the data, with and without predicting # a baseline background if self.bkgrd < 0: temp_fit = cfit(self.cic_forward,\ self.data['mass'],self.data['unsupported'][:,ii], \ p0=np.array([self.temp_unsupport,self.initial_depo_rate,\ self.initial_depo_intercept]))[0] self.data['intercept'][ii] = temp_fit[2] else: temp_fit = cfit(self.cic_forward_nointercept,\ self.data['mass'],self.data['unsupported'][:,ii], \ p0=np.array([self.temp_unsupport,self.initial_depo_rate]))[0] self.data['intercept'][ii] = 0 # Extract model parameters self.data['initial'][ii] = temp_fit[0] self.data['rate'][ii] = temp_fit[1] self.data['rsquared'][ii] = np.corrcoef(self.data['unsupported'][:,ii],\ self.cic_forward(self.data['unsupported'][:,ii],\ self.data['initial'][ii],self.data['rate'][ii], \ self.data['intercept'][ii]))[1,0]**2 self.data['age'][:, ii] = self.data['mass'] / self.data['rate'][ii] # Forward-predict the activity values self.data['predicted'] = self.data['initial'] * \ np.exp(-self.decay*self.data['age'])
def done(self): jdelim = args.delimiter if args.delimiter != None else ' ' for curve,i in zip(args.curve, args.curvef): if len(self.tup) > 0: args.outfile.write(jdelim.join(self.tup) + jdelim) popt, pcov = cfit(i, self.xdata, self.ydata, p0=args.params) try: pvar = np.diag(pcov) except: pvar = [None] args.outfile.write(jdelim.join(map(str, popt)) + jdelim + curve + jdelim + jdelim.join(map(str, pvar)) + '\n')
def done(self): jdelim = args.delimiter if args.delimiter != None else ' ' for curve, i in zip(args.curve, args.curvef): if len(self.tup) > 0: args.outfile.write(jdelim.join(self.tup) + jdelim) popt, pcov = cfit(i, self.xdata, self.ydata, p0=args.params) try: pvar = np.diag(pcov) except: pvar = [None] args.outfile.write( jdelim.join(map(str, popt)) + jdelim + curve + jdelim + jdelim.join(map(str, pvar)) + '\n')
def fit_noise_power(ells, nls, ell_fit=5000., lknee_guess=2000., alpha_guess=-4.0): ''' Fit beam-convolved (i.e. does not know about beam) noise power (uK^2 units) to an atmosphere+white noise model parameterized by rms_noise, lknee, alpha ell_fit is the ell above which an average of the nls is taken to estimate the rms white noise ''' from scipy.optimize import curve_fit as cfit noise_guess = np.sqrt(np.nanmean( nls[ells > ell_fit])) * (180. * 60. / np.pi) nlfitfunc = lambda ell, l, a: noise_func( ell, 0., noise_guess, l, a, dimensionless=False) popt, pcov = cfit(nlfitfunc, ells, nls, p0=[lknee_guess, alpha_guess]) lknee_fit, alpha_fit = popt return noise_guess, lknee_fit, alpha_fit
nts = [16, 24, 32, 64, 128, 256] for nt in nts: with open("data_{0}.txt".format(nt)) as f: data = f.read().split('\n')[:-1] to_num = lambda row: np.asarray( list(map(lambda x: float(x), row.split(',')[:-1]))) rho = list(map(to_num, data[0::3])) u = list(map(to_num, data[1::3])) p = list(map(to_num, data[2::3])) x = np.linspace(0., 1., 17) popt, _ = cfit(func, x, u[-1]) ufs.append(popt[0]) deltas.append(popt[1]) plt.plot(16 / np.asarray(nts), np.asarray(deltas) / np.pi, '.-', label='delta') plt.xlim(max(16 / np.asarray(nts)), min(16 / np.asarray(nts))) plt.xticks(16 / np.asarray(nts)) plt.xlabel("c Dt/Dx") plt.ylabel("delta/pi") plt.show() plt.plot(16 / np.asarray(nts), np.asarray(ufs) / 0.1, '.-') plt.xlim(max(16 / np.asarray(nts)), min(16 / np.asarray(nts))) plt.xticks(16 / np.asarray(nts)) plt.xlabel("c Dt/Dx") plt.ylabel("uf/u0")
def main(argv=None): if argv is None: npoints = 10000 elif hasattr(argv, "__len__"): if len(argv) > 1: npoints = int(argv[1]) else: print("Usage:") print("fit [npoints]") else: # expected a number npoints = argv def gauss(t0, *param0): param = numpy.array(param0) t = numpy.array(t0) dummy = 2.3548200450309493 * (t - param[3]) / param[4] return param[0] + param[1] * t + param[2] * myexp(-0.5 * dummy * dummy) def myexp(x): # put a (bad) filter to avoid over/underflows # with no python looping return numpy.exp(x * numpy.less(abs(x), 250)) -\ 1.0 * numpy.greater_equal(abs(x), 250) xx = numpy.arange(npoints, dtype=numpy.float) yy = gauss(xx, *[10.5, 2, 1000.0, 20., 15]) sy = numpy.sqrt(abs(yy)) parameters = [0.0, 1.0, 900.0, 25., 10] stime = time.time() fittedpar, cov, ddict = leastsq(gauss, xx, yy, parameters, sigma=sy, left_derivative=False, full_output=True, check_finite=True) etime = time.time() sigmapars = numpy.sqrt(numpy.diag(cov)) print("Took ", etime - stime, "seconds") print("Function calls = ", ddict["nfev"]) print("chi square = ", ddict["chisq"]) print("Fitted pars = ", fittedpar) print("Sigma pars = ", sigmapars) try: from scipy.optimize import curve_fit as cfit SCIPY = True except ImportError: SCIPY = False if SCIPY: counter = 0 stime = time.time() scipy_fittedpar, scipy_cov = cfit(gauss, xx, yy, parameters, sigma=sy) etime = time.time() print("Scipy Took ", etime - stime, "seconds") print("Counter = ", counter) print("scipy = ", scipy_fittedpar) print("Sigma = ", numpy.sqrt(numpy.diag(scipy_cov)))
def main(argv): iniFile = 'input/noise.ini' config = ConfigParser() config.read(iniFile) mass = 1.E14 conc = 3.0 z = 1.0 #noiseT = 1.0 #beam = 5.0 #ellbeam = 7000. Nclus = 1000. arcupto = 10. mss = [] #beams = np.arange(1.0,7.0,0.1) beams = [float(argv[0])] noiseT = float(argv[1]) xy = argv[2] print beams #sys.exit() for beam in beams: ellbeam = 7000 #ellbeam = int(np.sqrt(8.*np.log(2.)) / beam *60. * 180./np.pi) print "Ellbeam, ", ellbeam Nsupp = 10000. px = 0.1 arc = 20. #xy = 'TT' bin_width = 10 #int(beam/px) Nreals = 5000 Nbins = int(arcupto/bin_width) scale = px thetas = np.arange(bin_width*scale/2.,arcupto,scale*bin_width) lensedClFile = "../cmb-lensing-projections/data/TheorySpectra/ell28k_highacc_lensedCls.dat" unlensedClFile = "../cmb-lensing-projections/data/TheorySpectra/ell28k_highacc_scalCls.dat" cmbells,cltt,clee,clbb,clte = Lens.loadCls(lensedClFile) ucmbells,ucltt,uclee,uclte,dummy = Lens.loadCls(unlensedClFile) uclbb = clbb.copy()[:len(ucmbells)] uClsNow = [ucltt,uclee,uclbb,uclte] uClsFid = [ucltt,uclee,uclbb,uclte] lClsFid = [cltt,clee,clbb,clte] template = "../DerivGen/data/order5_lensedCMB_T_beam_cutout_3.fits" templateMap = liteMap.liteMapFromFits(template) Lens.makeBinfile("../cmb-lensing-projections/data/bintemp.dat",first=2,last=9000,width=20) lmin = 2. lmax = 8000. if xy=='mv': NormGen = Lens.AL(templateMap,'TT',ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L1,Nl1,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) NormGen = Lens.AL(templateMap,'EB',ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L2,Nl2,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) assert np.all(L1==L2) L = L1.copy() Nl = 1./(1./Nl1+1./Nl2) else: NormGen = Lens.AL(templateMap,xy,ucmbells,uClsNow,ucmbells,uClsFid,cmbells,lClsFid,lmax,lmax,gradCut=2000.) NormGen.addWhiteNoise(noiseT,np.sqrt(2.)*noiseT,beam,(0,0,lmin,lmax),(lmin,lmax,lmin,lmax)) L,Nl,ph = NormGen.binnedNLkk("../cmb-lensing-projections/data/bintemp.dat",inverted=False,halo=True) pl = Plotter(scaleX='log',scaleY='log') pl.add(L,Nl) pl.done('stampNl.png') #sys.exit() rads = [] print "Making stamps to determine profile covmat..." for i in range(Nreals): #print i lm = liteMap.makeEmptyCEATemplate(raSizeDeg=arc/60., decSizeDeg=arc/60.,pixScaleXarcmin = px, pixScaleYarcmin=px) if i==0: #print lm.data.shape Npix = lm.data.shape[0] lfilt = stepFunctionFilterFromLiteMap(lm,ellbeam) kapmaker = kappaMaker(Cosmology(defaultLCDM),mass,conc,z,storeKap=False) kapstamp,kaprad = kapmaker.getKappaAndProfile(Npix,scale=px,beam=None,bin_width=bin_width) # pl = Plotter() # pl.plot2d(kapstamp) # pl.done('kappa.png') # sys.exit() lm.fillWithGaussianRandomField(L,Nl/Nsupp,bufferFactor = 1) stamp = lm.data.copy() stamp = stamp+kapstamp.copy() stamp = np.nan_to_num(filterDataFromTemplate(stamp,lfilt)) prof = radial_data(stamp,annulus_width=bin_width).mean #print prof rads.append(prof) if i%1000==0: print i radmeans, covMean, cov, errMean,err,corrcoef = getStats(rads,Nreals) thstamp = np.nan_to_num(filterDataFromTemplate(kapstamp,lfilt)) thprof = radial_data(thstamp,annulus_width=bin_width).mean siginv = np.linalg.pinv(cov[:len(thetas),:len(thetas)]) #print siginv #print radmeans[:len(thetas)] b = np.dot(siginv,radmeans[:len(thetas)]) chisq = np.dot(radmeans[:len(thetas)],b) print np.sqrt(chisq*Nclus/Nsupp) #print radmeans #print err pl = Plotter() pl.addErr(thetas,radmeans[:len(thetas)],yerr=err[:len(thetas)]) pl.add(thetas,kapmaker.kappa(thetas)) pl._ax.set_ylim(-0.01,0.25) pl._ax.set_xlim(0.0,arcupto) pl.done('profile.png') pl = Plotter() pl.plot2d(corrcoef) pl.done('corrcoef.png') pl = Plotter() pl.plot2d(stamp) pl.done('stamp.png') amplitudeRange = np.arange(-1.,2.,0.01) width = amplitudeRange[1]-amplitudeRange[0] amplist = [] print "Fitting amplitudes..." for i in range(Nreals): prof = rads[i][:len(thetas)] Likelihood = lambda x: np.exp(-0.5*fchisq(prof,siginv,thprof[:len(thetas)],amp=x)) Likes = np.array([Likelihood(x) for x in amplitudeRange]) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] #print ampBest,ampErr amplist.append(ampBest) ms = plotstat( -1.,2.,0.01 , (np.array(amplist)) , "amps",fit=True,scale=np.sqrt(Nclus/Nsupp)) mss.append(ms) pl = Plotter() pl.add(beams,mss) pl.done('beam.png') np.savetxt("output/m1beamsms"+xy+argv[0]+"_noise"+str(noiseT)+".txt",np.vstack((beams,mss)).transpose())
def fitAuto(self,keyData,keyTheory,amplitudeRange=np.arange(0.1,2.0,0.01),constRange=np.arange(0.1,2.0,0.01),debug=False,store=False): # evaluate likelihood on a 2d grid and fit to a gaussian # store fit as new theory curve width = amplitudeRange[1]-amplitudeRange[0] height = constRange[1]-constRange[0] Likelihood = lambda x,y: np.exp(-0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y)) #Likelihood = lambda x,y: -0.5*self.chisqAuto(keyData,keyTheory,amp=x,const=y) Likes = np.array([[Likelihood(x,y) for x in amplitudeRange] for y in constRange]) ampLike = np.sum(Likes,axis=0) constLike = np.sum(Likes,axis=1) ampLike = ampLike / (ampLike.sum()*width) #normalize constLike = constLike / (constLike.sum()*height) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,ampLike,p0=[amplitudeRange.mean(),0.1*amplitudeRange.mean()])[0] constBest,constErr = cfit(norm.pdf,constRange,constLike,p0=[constRange.mean(),0.1*constRange.mean()])[0] if debug: pl = Plotter() pl.plot2d(Likes) pl.done("output/like2d.png") pl = Plotter() fitVals = np.array([norm.pdf(x,ampBest,ampErr) for x in amplitudeRange]) pl.add(amplitudeRange,ampLike,label="amplikes") pl.add(amplitudeRange,fitVals,label="fit") pl.legendOn() pl.done("output/amplike1d.png") pl = Plotter() fitVals = np.array([norm.pdf(x,constBest,constErr) for x in constRange]) pl.add(constRange,constLike,label="constlikes") pl.add(constRange,fitVals,label="fit") pl.legendOn() pl.done("output/constlike1d.png") #sys.exit() if not(store): return constBest,constErr else: self.datas[keyData]['binned'] -= constBest self.datas[keyData]['unbinned'] -= constBest fitKey = keyData+"_fitTo_"+keyTheory self.datas[fitKey] = {} self.datas[fitKey]['covmat'] = None self.datas[fitKey]['binned'] = self.datas[keyTheory]['binned']*ampBest self.datas[fitKey]['unbinned'] = self.datas[keyTheory]['unbinned']*ampBest self.datas[fitKey]['label'] = keyData+" fit to "+keyTheory+" with amp "+'{0:.2f}'.format(ampBest)+"+-"+'{0:.2f}'.format(ampErr) self.datas[fitKey]['amp']=(ampBest,ampErr) self.datas[fitKey]['const']=(constBest,constErr) self.datas[fitKey]['isFit'] = True return fitKey
t = (n * 2 * s.h * s.c**2) / ((l**5) * (np.exp( (s.h * s.c) / (l * s.k * T)) - 1)) return t plt.plot(wvl, dat) plt.show() inp = input("Where to cut:\n") if inp == 'max': cut = np.argmax(dat) else: cut = wvl.tolist().index(int(inp)) cut = int(cut) spec = wvl.tolist().index(3500) val, var = cfit(B, l[cut:], dat[cut:], p0=[1000, 1]) peaks, _ = fp((B(l, val[0], val[1]) - dat), height=0, prominence=0.05) wvlpk = (peaks * 5 + 1150) specpk = [] for i in range(len(peaks)): if wvlpk[i] > 6550 and wvlpk[i] < 6650: specpk.append(wvlpk[i]) elif wvlpk[i] > 4750 and wvlpk[i] < 4850: specpk.append(wvlpk[i]) elif wvlpk[i] > 4300 and wvlpk[i] < 4400: specpk.append(wvlpk[i]) specpk = (np.array(specpk) + (-1150)) / 5 wvlpk = np.array(specpk).astype(int) plt.plot((peaks * 5 + 1150), dat[peaks], "x") plt.plot((wvlpk * 5 + 1150), dat[wvlpk], "o") plt.plot(wvl, B(l, val[0], val[1]) - dat)
def getDLnMCMB(ells,Nls,clusterCosmology,log10Moverh,z,concentration,arcStamp,pxStamp,arc_upto,bin_width,expectedSN,Nclusters=1000,numSims=30,saveId=None,numPoints=1000,nsigma=8.,overdensity=500.,critical=True,atClusterZ=True): import flipper.liteMap as lm if saveId is not None: from orphics.tools.output import Plotter M = 10.**log10Moverh cc = clusterCosmology stepfilter_ellmax = max(ells) lmap = lm.makeEmptyCEATemplate(raSizeDeg=arcStamp/60., decSizeDeg=arcStamp/60.,pixScaleXarcmin=pxStamp,pixScaleYarcmin=pxStamp) xMap,yMap,modRMap,xx,xy = fmaps.getRealAttributes(lmap) lxMap,lyMap,modLMap,thetaMap,lx,ly = fmaps.getFTAttributesFromLiteMap(lmap) kappaMap,retR500 = NFWkappa(cc,M,concentration,z,modRMap*180.*60./np.pi,winAtLens,overdensity,critical,atClusterZ) finetheta = np.arange(0.01,arc_upto,0.01) finekappa,retR500 = NFWkappa(cc,M,concentration,z,finetheta,winAtLens,overdensity,critical,atClusterZ) kappaMap = fmaps.stepFunctionFilterLiteMap(kappaMap,modLMap,stepfilter_ellmax) generator = fmaps.GRFGen(lmap,ells,Nls) bin_edges = np.arange(0.,arc_upto,bin_width) binner = bin2D(modRMap*180.*60./np.pi, bin_edges) centers, thprof = binner.bin(kappaMap) if saveId is not None: pl = Plotter() pl.plot2d(kappaMap) pl.done("output/"+saveId+"kappa.png") expectedSNGauss = expectedSN*np.sqrt(numSims) sigma = 1./expectedSNGauss amplitudeRange = np.linspace(1.-nsigma*sigma,1.+nsigma*sigma,numPoints) lnLikes = 0. bigStamp = 0. for i in range(numSims): profiles,totstamp = getProfiles(generator,stepfilter_ellmax,kappaMap,binner,Nclusters) bigStamp += totstamp stats = getStats(profiles) if i==0 and (saveId is not None): pl = Plotter() pl.add(centers,thprof,lw=2,color='black') pl.add(finetheta,finekappa,lw=2,color='black',ls="--") pl.addErr(centers,stats['mean'],yerr=stats['errmean'],lw=2) pl._ax.set_ylim(-0.01,0.3) pl.done("output/"+saveId+"profile.png") pl = Plotter() pl.plot2d(totstamp) pl.done("output/"+saveId+"totstamp.png") Likes = getAmplitudeLikelihood(stats['mean'],stats['covmean'],amplitudeRange,thprof) lnLikes += np.log(Likes) width = amplitudeRange[1]-amplitudeRange[0] Likes = np.exp(lnLikes) Likes = Likes / (Likes.sum()*width) #normalize ampBest,ampErr = cfit(norm.pdf,amplitudeRange,Likes,p0=[1.0,0.5])[0] sn = ampBest/ampErr/np.sqrt(numSims) snAll = ampBest/ampErr if snAll<5.: print "WARNING: ", saveId, " run with mass ", M , " and redshift ", z , " has overall S/N<5. \ Consider re-running with a greater numSims, otherwise estimate of per Ncluster S/N will be noisy." if saveId is not None: Fit = np.array([np.exp(-0.5*(x-ampBest)**2./ampErr**2.) for x in amplitudeRange]) Fit = Fit / (Fit.sum()*width) #normalize pl = Plotter() pl.add(amplitudeRange,Likes,label="like") pl.add(amplitudeRange,Fit,label="fit") pl.legendOn(loc = 'lower left') pl.done("output/"+saveId+"like.png") pl = Plotter() pl.plot2d(bigStamp/numSims) pl.done("output/"+saveId+"bigstamp.png") np.savetxt("data/"+saveId+"_m"+str(log10Moverh)+"_z"+str(z)+".txt",np.array([log10Moverh,z,1./sn])) return 1./sn