def shiftgrid(lon0,datain,lonsin,start=True): """ shift global lat/lon grid east or west. assumes wraparound (or cyclic point) is included. lon0: starting longitude for shifted grid (ending longitude if start=False). lon0 must be on input grid (with the range of lonsin). datain: original data. lonsin: original longitudes. start[True]: if True, lon0 represents he starting longitude of the new grid. if False, lon0 is the ending longitude. returns dataout,lonsout (data and longitudes on shifted grid). """ if pylab.fabs(lonsin[-1]-lonsin[0]-360.) > 1.e-4: raise ValueError, 'cyclic point not included' if lon0 < lonsin[0] or lon0 > lonsin[-1]: raise ValueError, 'lon0 outside of range of lonsin' i0 = pylab.argsort(pylab.fabs(lonsin-lon0))[0] dataout = pylab.zeros(datain.shape,datain.typecode()) lonsout = pylab.zeros(lonsin.shape,lonsin.typecode()) if start: lonsout[0:len(lonsin)-i0] = lonsin[i0:] else: lonsout[0:len(lonsin)-i0] = lonsin[i0:]-360. dataout[:,0:len(lonsin)-i0] = datain[:,i0:] if start: lonsout[len(lonsin)-i0:] = lonsin[1:i0+1]+360. else: lonsout[len(lonsin)-i0:] = lonsin[1:i0+1] dataout[:,len(lonsin)-i0:] = datain[:,1:i0+1] return dataout,lonsout
def fillcontinents(self,color=0.8): """ Fill continents. color - color to fill continents (default gray). """ # get current axes instance. ax = pylab.gca() # define corners of map domain. p1 = (self.llcrnrx,self.llcrnry); p2 = (self.urcrnrx,self.urcrnry) p3 = (self.llcrnrx,self.urcrnry); p4 = (self.urcrnrx,self.llcrnry) for x,y in self.coastpolygons: xa = pylab.array(x,'f') ya = pylab.array(y,'f') # clip to map domain. xa = pylab.clip(xa, self.xmin, self.xmax) ya = pylab.clip(ya, self.ymin, self.ymax) # check to see if all four corners of domain in polygon (if so, # don't draw since it will just fill in the whole map). delx = 10; dely = 10 if self.projection in ['cyl']: delx = 0.1 dely = 0.1 test1 = pylab.fabs(xa-self.xmax) < delx test2 = pylab.fabs(xa-self.xmin) < delx test3 = pylab.fabs(ya-self.ymax) < dely test4 = pylab.fabs(ya-self.ymin) < dely hasp1 = sum(test1*test3) hasp2 = sum(test2*test3) hasp4 = sum(test2*test4) hasp3 = sum(test1*test4) if not hasp1 or not hasp2 or not hasp3 or not hasp4: xy = zip(xa.tolist(),ya.tolist()) poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0) ax.add_patch(poly)
def openRomb(integrand, a, b,eps=1e-6,jmax=14,k=5): """ Returns the integral on the _open_interval_ (a,b). Integration is performed by Romberg's method of order 2k, where, e.g., k=2 is Simpson's rule. """ jmaxp=jmax+1 s = 0.*M.zeros(jmaxp) h = 0.*M.zeros(jmaxp+1) ss = 0. dss = 0. h[0]=1.0 for j in range(0,jmax): s[j]=tripleInt(integrand,a,b,s[j],j) if j >= k: ss,dss = interpPoly(h[j-k:j],s[j-k:j],k,0.0) if M.fabs(dss) <= eps*M.fabs(ss): return ss s[j+1]=s[j] h[j+1]=h[j]/9. print 'Non-convergence in openRomb' return ss
def transform_vector(self,uin,vin,lons,lats,nx,ny,returnxy=False,preserve_magnitude=True): """ transform a vector field (uin,vin) from a lat/lon grid with longitudes lons and latitudes lats to a (ny,nx) native map projection grid. The input vector field is defined in spherical coordinates (it has eastward and northward components) while the output vector field is defined in map projection coordinates (relative to x and y). if returnxy=True, the x and y values of the native map projection grid are also returned (default False). if preserve_magnitude=True (default), the vector magnitude is preserved (so that length of vectors represents magnitude of vector relative to spherical coordinate system, not map projection coordinates). vectors on a lat/lon grid must be transformed to map projection coordinates before they be plotted on the map (with the quiver class method). """ lonsout, latsout, x, y = self.makegrid(nx,ny,returnxy=True) # interpolate to map projection coordinates. uin = interp(uin,lons,lats,lonsout,latsout) vin = interp(vin,lons,lats,lonsout,latsout) if preserve_magnitude: # compute original magnitude. mag = pylab.sqrt(uin**2+vin**2) rad2dg = 180./math.pi tiny = 1.e-5 delta = 0.1 coslats = pylab.cos(latsout/rad2dg) # use dx/dlongitude, dx/dlatitude, dy/dlongitude and dy/dlatitude # to transform vector to map projection coordinates. # dlongitude is delta degrees at equator, dlatitude is delta degrees. xn,yn = self(lonsout,pylab.where(latsout+delta<90.,latsout+delta,latsout)) # at poles, derivs w/respect to longitude will be zero. lonse = pylab.where(coslats>tiny,lonsout+(delta/coslats),lonsout) xe,ye = self(lonse,latsout) uout = uin*(xe-x)*(coslats/delta) + vin*(xn-x)/delta vout = uin*(ye-y)*(coslats/delta) + vin*(yn-y)/delta # make sure uout, vout not too small (quiver will raise # an exception when trying to rescale vectors). uout = pylab.where(pylab.fabs(uout)<tiny,tiny,uout) vout = pylab.where(pylab.fabs(vout)<tiny,tiny,vout) # fix units. if self.projection != 'cyl': uout = uout*rad2dg/self.rsphere vout = vout*rad2dg/self.rsphere # rescale magnitude. if preserve_magnitude: magout = pylab.sqrt(uout**2+vout**2) uout = uout*mag/magout vout = vout*mag/magout if returnxy: return uout,vout,x,y else: return uout,vout
def phiPrime(self,t): """ derivative of the double exponential transform """ if N.isscalar(t): t = M.array([t]) return N.piecewise(t,[M.fabs(t)>5.0],[self.phiPrime1,self.phiPrime2])
def maximize_Krzanowski_Lai_index(self): # Krzanowski -Lai index self.W = pl.zeros(len(self.Kvals)) p = self._nfeat for j, K in enumerate(self.Kvals): if self.verbose: print(f"Running with K={K} clusters") self.clusters = AgglomerativeClustering( n_clusters=K, affinity="precomputed", linkage="average", connectivity=self.connectivity, ) self.clusters.fit_predict(self._Affinity) # estimate WCSS for the samples self.W[j] = self.get_WCSS(K, self.clusters.labels_, self._distance_matr) # see eq. 3.1 of Krzanowski and Lai 1988 Biometrics DIFF = pl.array([ self.Kvals[1:], (self.W[:-1] * self.Kvals[:-1]**(2 / p)) - (self.W[1:] * self.Kvals[1:]**(2 / p)), ]) # for k=1, KL index is undefined self.KL = pl.array( [self.Kvals[1:-1], pl.fabs(DIFF[1, :-1] / DIFF[1, 1:])]) # see eq. 3.2 maxindex = self.KL[1, :].argmax() return pl.int_(self.KL[0, maxindex])
def phi(self,t): """ double exponential transformation """ if N.isscalar(t): t = M.array([t]) return N.piecewise(t,[M.fabs(t)>5.0],[self.phi1,self.phi2])
def statistical_compatibility(x, y): mu1 = x[0] sigma1 = x[1] mu2 = y[0] sigma2 = y[1] significance = pl.fabs(mu1 - mu2) / pl.sqrt(sigma1**2 + sigma2**2) return significance
def interpPoly(xin,yin,n,x): """ Polynomial interpolation of degree n. yin = f(xin) is the interpolated function; the output is y = f(x) and an estimate of the error, dy. """ nmax = 20 c = 0.*M.zeros(nmax) d = 0.*M.zeros(nmax) ns=1 dif=M.fabs(x-xin[1]) for i in range(0,n): dift=M.fabs(x-xin[i]) if dift < dif: ns=i dif=dift c[i]=yin[i] d[i]=yin[i] y=yin[ns] ns=ns-1 for m in range(1,n): for i in range(0,n-m): ho=xin[i]-x hp=xin[i+m]-x w=c[i+1]-d[i] den=ho-hp if den == 0.: print 'interpPoly failed!' return y,dy den=w/den d[i]=hp*den c[i]=ho*den if (2*ns < n-m): dy=c[ns+1] else: dy=d[ns] ns=ns-1 y=y+dy return y,dy
def mStar(m,nu): """ Returns M* based on an array of nu(M)'s. M* is defined to be the mass at which nu(M) = 1. Used for concentration distribution. """ closest = N.where(nu < 1.)[0][-1] #nu increases with M logmstar = M.log(m[closest]) + M.log(m[closest+1]/m[closest])/M.log(nu[closest+1]/nu[closest])*\ M.fabs(M.log(nu[closest])) return M.exp(logmstar)
def smooth_up(self, input_signal, thresh_window, end_smoothing): """generate fast fourier transform from a signal and smooth it Params : input_signal : audio signal thresh_window : relative to the size of the windows end_smoothing : relative to the length of the output signal Returns : fast Fourier transform of the audio signal applied to a specific domain of frequencies """ windowed_fft = build_fft(input_signal, self._filter_coefficients, thresh_window) return fft_smoothing(fabs(windowed_fft), end_smoothing)
def testjs(self): sf = SpecialFunctions() self.failIf(sf.j0(0.1) != 0.99833416646828155 or sf.j0(0.00001) != 0.99999999998333333 ) self.failIf(sf.j1(0.1) != 0.033300011902557269 or sf.j1(0.00001) != 3.3333333333333337e-06 ) self.failIf(sf.j2(0.1) != 0.00066619060838490896 or sf.j2(0.00001) != 6.6666666666666679e-12 ) self.failIf(M.fabs(sf.j0(M.array([0.1,0.00001]))- M.array([ 0.99833417, 1. ])).max() > 10e-8) self.failIf(M.fabs(sf.j1(M.array([0.1,0.00001]))- M.array([ 3.33000119e-02,3.33333333e-06])).max() > 10e-8) self.failIf(M.fabs(sf.j2(M.array([0.1,0.00001]))- M.array([6.66190608e-04,6.66666667e-12 ])).max() > 10e-8)
def cr_integrate(wavelength, filter_wavelength, filter_pass, z, matrix): total = 0.0 for ip in range(len(wavelength)-1): mw = (wavelength[ip+1]+wavelength[ip])*0.5*(1.0+z) il = locate(filter_wavelength,mw) if il < (len(filter_wavelength)-1) and il >= 0: ilp1 = il + 1 sl = (mw - filter_wavelength[il])/(filter_wavelength[ilp1]-filter_wavelength[il]) filt = filter_pass[il]+sl*(filter_pass[ilp1] - filter_pass[il]) dl = pylab.fabs(wavelength[ip+1] - wavelength[ip]) total += mw*filt*dl*matrix[ip] return total
def cr_integrate(wavelength, filter_wavelength, filter_pass, z, matrix): total = 0.0 for ip in range(len(wavelength) - 1): mw = (wavelength[ip + 1] + wavelength[ip]) * 0.5 * (1.0 + z) il = locate(filter_wavelength, mw) if il < (len(filter_wavelength) - 1) and il >= 0: ilp1 = il + 1 sl = (mw - filter_wavelength[il]) / (filter_wavelength[ilp1] - filter_wavelength[il]) filt = filter_pass[il] + sl * (filter_pass[ilp1] - filter_pass[il]) dl = pylab.fabs(wavelength[ip + 1] - wavelength[ip]) total += mw * filt * dl * matrix[ip] return total
def adaptIntPlot(f,a,b): """ Adaptive (doubling partition) integration. Minimizes function evaluations at the expense of some tedious array minipulations. """ maxiter = 20 miniter = 5 tolerance = 0.1 maxnx = 2**maxiter minnx = 2**miniter x = 0.*M.zeros(maxnx) dx = (b-a)/2.#**minsteps nx = 2 x[0] = a x[1] = a+dx integral = M.sum(f(x[1:2]))*dx # 1 so we don't include the first endpt dx /= 2. newintegral = integral/2. + M.sum(f(x[:nx]+dx))*dx for i in range(nx-1,-1,-1): x[2*i] = x[i] x[2*i+1] = x[i] + dx nx *= 2 keepgoing = 1 while keepgoing == 1: integral = newintegral dx /= 2. eff = f(x[:nx]+dx) M.plot(x[:nx]+dx,f(x[:nx]+dx)) newintegral = integral/2. + M.sum(eff)*dx#M.sum(f(x[:nx]+dx))*dx print newintegral*nx/(nx-1) for i in range(nx-1,-1,-1): x[2*i] = x[i] x[2*i+1] = x[i] + dx nx *= 2 keepgoing = 0 if integral*newintegral > 0.: if ((M.fabs(M.log(integral*(nx/2)/(nx/2-1)/(newintegral*nx/(nx-1)))) >\ tolerance) and (nx < maxnx/2)) or (nx < minnx): keepgoing = 1 elif integral*newintegral == 0.: print "Hmmm, we have a zero integral here. Assuming convergence." else: keepgoing = 1 M.show() print nx, if nx == maxnx/2: print 'No convergence in utils.adaptInt!' return newintegral*nx/(nx-1)
def _get_close_pixels(center, radius, nside, additional_mask=None): npix = healpy.nside2npix(nside) indices = pylab.arange(npix) thetas, lons = healpy.pix2ang(nside, indices) lats = pylab.pi/2.0 - thetas distances = coordinates.angular_distance(lons, lats, center[0], center[1]) mask = distances > radius if additional_mask is not None: mask |= additional_mask close_indices = indices[~mask] close_dxs = pylab.fabs(lons[close_indices] - center[0]) * pylab.cos(lats[close_indices]) close_dxs = (close_dxs + pylab.pi) % (2.0*pylab.pi) - pylab.pi close_dys = lats[close_indices] - center[1] return close_indices, close_dxs, close_dys
def fillcontinents(self,color=0.8): """ Fill continents. color - color to fill continents (default gray). """ # get current axes instance. ax = pylab.gca() # define corners of map domain. p1 = (self.llcrnrx,self.llcrnry); p2 = (self.urcrnrx,self.urcrnry) p3 = (self.llcrnrx,self.urcrnry); p4 = (self.urcrnrx,self.llcrnry) for x,y in self.coastpolygons: xa = pylab.array(x,'f') ya = pylab.array(y,'f') # clip to map domain. xa = pylab.clip(xa, self.xmin, self.xmax) ya = pylab.clip(ya, self.ymin, self.ymax) # check to see if all four corners of domain in polygon (if so, # don't draw since it will just fill in the whole map). delx = 10; dely = 10 if self.projection in ['cyl']: delx = 0.1 dely = 0.1 test1 = pylab.fabs(xa-self.xmax) < delx test2 = pylab.fabs(xa-self.xmin) < delx test3 = pylab.fabs(ya-self.ymax) < dely test4 = pylab.fabs(ya-self.ymin) < dely hasp1 = sum(test1*test3) hasp2 = sum(test2*test3) hasp4 = sum(test2*test4) hasp3 = sum(test1*test4) if not hasp1 or not hasp2 or not hasp3 or not hasp4: xy = zip(xa.tolist(),ya.tolist()) poly = Polygon(xy,facecolor=color,edgecolor=color,linewidth=0) ax.add_patch(poly) # set axes limits to fit map region. self.set_axes_limits()
def checkFlightDirection(sample): path = getFlightPath(sample) if path[0][0] >= 0: return False # May not start behind marker if path[-1][0] >= 0: return False # May not start behind marker direction = path[-1] - path[0] if direction[0] > 0: projectedYDeviation = path[0][1] - direction[1] * path[0][0] / direction[0] return pl.fabs(projectedYDeviation) < 0.5 # Only if aiming at +-50 cm area around marker return False
def expdecayfit(self,xmin=None,xmax=None): spect=self.copyxy(); spect.pick(xmin,xmax); x=spect['x']; y=spect['y']; I0=y[0]-y[-1]; tau=spect.nthmoment(2)**0.5; baseline=mean(y); paras0=pylab.array([I0,tau,baseline]); paras=scipy.optimize.fmin(self.expdecayfit_chi2,paras0,args=(x,y)); I0=paras[0]; tau=paras[1]; baseline=paras[2]; yfit=I0*exp(-x/tau)+baseline; check=numpy.core.maximum(numpy.core.fabs(y),numpy.core.fabs(yfit)); spect.plot();pylab.hold(True); pylab.plot(x,yfit,'r');pylab.hold(False); #pylab.show(); #chi2=self.expdecayfit_chi2(paras,x,y); #dchi2dtau=(2*(y-yfit)/check*I0*exp(-x/tau)*x/tau**2).sum(); #dchi2dtau=(2*(y-yfit)*I0*exp(-x/tau)*x/tau**2).sum(); #tauerror=pylab.fabs(chi2/dchi2dtau); #check=1; chi2I0I0=(2*exp(-2*x/tau)/check).sum(); chi2I0tau=(2*(yfit-y)*exp(-x/tau)*x/tau**2/check+2*I0*exp(-2*x/tau)*x/tau**2/check).sum(); chi2tautau=(2*I0*exp(-2*x/tau)*x**2/tau**4/check+2*(yfit-y)*I0*exp(-x/tau)*x**2/tau**4/check-4*(yfit-y)*I0*exp(-x/tau)*x/tau**2/check).sum(); errormatrix=pylab.array([[chi2I0I0,chi2I0tau],[chi2I0tau,chi2tautau]]); #print 'errormatrix',errormatrix inverr=scipy.linalg.inv(errormatrix); #print 'inverr',inverr tauerror=(pylab.fabs(inverr[1][1]))**0.5; print 'tauerror',tauerror return (paras,tauerror);
#ax2 = axs[0,1] #ax3 = axs[1,0] #ax4 = axs[1,1] ax1 = plt.subplot(2, 2, 1) ax2 = plt.subplot(2, 2, 2) ax3 = plt.subplot(2, 2, 3) ax4 = plt.subplot(2, 2, 4) for ant in range(0, N_ant): tile = PX_lsq[ant][0] if ant == 0: print(ant, tile) val = max(fabs(PX_lsq[ant][chan_sel])) if fabs(val - 1.0) < thresh: ax1.plot(freq[freq_idx], PX_lsq[ant][chan_sel], '-b.') else: ax1.plot(freq[freq_idx], PX_lsq[ant][chan_sel], '-c.', label='ID=%3d, max=%f (flag %3d?)' % (tile, val, tile - 1)) print('Possible PP flag: ID=%3d, max=%f (flag %3d?)' % (tile, val, tile - 1)) val = max(fabs(PY_lsq[ant][chan_sel])) if val < thresh or args.phases: ax2.plot(freq[freq_idx], PY_lsq[ant][chan_sel], '-b.') else:
trans = transforms.scale_transform(fig.dpi/transforms.Value(72.), fig.dpi/transforms.Value(72.)) col.set_transform(trans) # the points to pixels transform col.set_color(colors) a.autoscale_view() a.set_title('PolyCollection using offsets') # 7-sided regular polygons a = fig.add_subplot(2,2,3) col = collections.RegularPolyCollection(fig.dpi, 7, sizes = P.fabs(xx)*10, offsets=xyo, transOffset=a.transData) a.add_collection(col, autolim=True) trans = transforms.scale_transform(fig.dpi/transforms.Value(72.), fig.dpi/transforms.Value(72.)) col.set_transform(trans) # the points to pixels transform col.set_color(colors) a.autoscale_view() a.set_title('RegularPolyCollection using offsets') # Simulate a series of ocean current profiles, successively # offset by 0.1 m/s so that they form what is sometimes called # a "waterfall" plot or a "stagger" plot. a = fig.add_subplot(2,2,4)
def hypleg(x,y=None): if y is None and isinstance(x,p.ndarray): x,y=x return p.sqrt(p.fabs(p.norm(x)**2-p.norm(y)**2))
def cisiarr(xarr): """ Cosine and sine integrals Ci(xarr) and Si(xarr) for arrays. Uses a Numerical Recipes algorithm. """ EPS=6e-8 EULER=.57721566 MAXIT=101 PIBY2=1.5707963 FPMIN=1.e-30 TMIN=2. ciarr = xarr*0. siarr = xarr*0. for el in range(len(xarr)): x = xarr[el] t=M.fabs(x) if t == 0.: si=0. else: if t > TMIN: b=1.+t*1j c=1./FPMIN d=1./b h=d for i in range(2,MAXIT): a=-(i-1)**2 b += 2. d=1./(a*d+b) c=b+a/c dell=c*d h *= dell if (M.fabs(dell.real - 1.) + M.fabs(dell.imag)) < EPS: i = 0 break if i > 0: print 'Continued fraction method failed in cisiarr' h *= (M.cos(t)-M.sin(t)*1j) ci=-h.real si=PIBY2+h.imag else: if t < M.sqrt(FPMIN): sumc=0. sums=t else: summy=0. sums=0. sumc=0. sign=1. fact=1. odd=1 for k in range(1,MAXIT): fact *= t/k term=fact/k summy += sign*term err=term/M.fabs(summy) if odd: sign=-sign sums=summy summy=sumc else: sumc=summy summy=sums if err < EPS: k = 0 break odd=1-odd if k > 0: print "Too many iterations in cisiarr!" si=sums ci=sumc+M.log(t)+EULER if x < 0.: si=-si ciarr[el] = ci siarr[el] = si return ciarr,siarr
def run(self,c): # c is a camb object self.z = c.cp.transfer_redshift[0] self.dlog10m = 1./self.p.massdivsperdex self.dlogm = self.dlog10m * M.log(10.) if self.p.lomass == 0.: self.p.lomass = (4.*M.pi/3.)*c.cp.omega_cdm*(2.*M.pi/c.k[-1])**3 self.m_pad = self.p.lomass*\ M.exp(M.arange(-self.dlogm,M.log(self.p.himass/self.p.lomass)+self.dlogm*2, self.dlogm)) self.m = self.m_pad[1:-2] if self.p.integrateto == 0.: self.integratetoindex = len(self.m) else: self.integratetoindex = N.where(self.m < self.p.integrateto)[0][-1] ps = pt.PowerSpectrum(c.cp) if self.p.delta == 0.: self.delta = ps.deltac(self.z) else: self.delta = self.p.delta print 'delta = ',self.delta self.deltaz0 = ps.deltac(0.) #the ~1.67 factor, without redshift correction sigma_pad = M.sqrt(pt.sigma2fromPk(c,massfn.chimpRadius(self.m_pad))) * ps.d1(0.)/ps.d1(self.z) # calculate sigma with ps normalized to z=0 self.nu_pad = self.delta/sigma_pad self.sigma = sigma_pad[1:-2] self.nu = self.nu_pad[1:-2] self.b = getHaloBiasCoef(self) massfn.getMassFunction(self,c) # sets h.nmz bint0 = generalIntOverMassFn(0,1,1.,self,whichp='mm') bint1 = generalIntOverMassFn(1,1,1.,self,whichp='mm') bint2 = generalIntOverMassFn(2,1,1.,self,whichp='mm') #test 1st,2nd-order bias. bint0,1 should be ~1, bint2 should be ~0 if (M.fabs(M.log(bint0)) > 0.01) + (M.fabs(M.log(bint1)) > 0.01) + (M.fabs(bint2) > 0.01): print 'Warning! (bint0,1,2) = ',bint0,bint1,bint2 print 'Should be about (1,1,0). If worried, increase range of mass integration,' print 'mass bins per dex, or extrapolate c.pk (which affects sigma(m))' self.mstar = mStar(self.m,self.nu) self.logcbar = getLogcbar(self) #self.k = c.k[self.p.startki::self.p.strideki] self.k = self.p.k*1. if self.p.use_sea_2h: #Use nonlinear power spectrum for 2h term # not recently tested cp_cnl = copy.copy(c.cp) cp_cnl['do_nonlinear'] = 1 cnl = pt.Camb(cambParam = cp_cnl) cnl.run() self.pk = M.exp(utils.splineIntLinExt(M.log(cnl.pk),M.log(cnl.k),M.log(self.k))) else: #Use linear power spectrum for 2h term self.pk = c.pkInterp(self.k) if self.p.dofthp == 1: self.fthp = deltah(self) # Convert Kravtsov HOD quantities to internal units (chimps, etc.) self.k_mmin = massfn.msun2chimp(self.p.k_mmin_msun,c) self.k_m1 = self.k_mmin * self.p.k_m1overmmin # for the non-Kravtsov HOD, for some reason, we use units of msun/h self.mcut = massfn.msunh2chimp(self.p.mcut_msunh,c) self.m11 = massfn.msunh2chimp(self.p.m11_msunh,c) self.m13 = massfn.msunh2chimp(self.p.m13_msunh,c) self.logsqrtfact = M.log(M.sqrt(self.m13/self.m11)) self.ngalbar = generalIntOverMassFn(0,1, 1.,self,whichp='gg')
def getHaloTrispec(c,h, startki = 0, endki = 0, strideki = 1, adder = 0., highprecisionthresh = 0.03): """ Calculates the halo model trispectrum T(k1,-k1,k2,-k2), as in Cooray & Hu (2001) adder: something that will be added later, e.g. gaussian part of the covariance. In there for precision checks Should work for galaxies, too, but shot noise isn't implemented. Also, for some HOD parameters, may expose PT trispectrum on scales where it may not work """ #p = c.cp tribi = pt.TriBiSpectrumFromCamb(c) g = utils.HGQ(5) # Could change a bunch of these to scalars i11 = 0.*h.k i21 = 0.*h.k i02 = 0.*h.k pk1plusk3_perp = 0.*h.k t3hB_perp = 0.*h.k t4hT_perp = 0.*h.k dsq4hnoT = 0.*h.k dsq4hshouldbe = 0.*h.k dsq1h = 0.*h.k dsq2h = 0.*h.k dsq2h31 = 0.*h.k dsq3h = 0.*h.k dsq4h = 0.*h.k qsq = 0.*h.k i04 = M.outer(0.*h.k,0.*h.k) i12 = M.outer(0.*h.k,0.*h.k) i13_112 = M.outer(0.*h.k,0.*h.k) i13_122 = M.outer(0.*h.k,0.*h.k) i22 = M.outer(0.*h.k,0.*h.k) i114 = M.outer(0.*h.k,0.*h.k) i1114 = M.outer(0.*h.k,0.*h.k) pk1plusk3 = M.outer(0.*h.k,0.*h.k) t2h31 = M.outer(0.*h.k,0.*h.k) t3hnoB = M.outer(0.*h.k,0.*h.k) t3hB = M.outer(0.*h.k,0.*h.k) t4hnoT = M.outer(0.*h.k,0.*h.k) t4hT = M.outer(0.*h.k,0.*h.k) b10 = M.outer(0.*h.k,0.*h.k) t10 = M.outer(0.*h.k,0.*h.k) for k1 in range(len(h.k)): i11[k1] = intOverMassFn(1,1, [k1],h) i21[k1] = intOverMassFn(2,1, [k1],h) i02[k1] = intOverMassFn(0,2, [k1,k1],h) if endki == 0: endki = len(h.k) for k1 in range(startki, endki, strideki): for k3 in range(k1,len(h.k)): #for k3 in range(k1,endki,strideki): i04[k1,k3] = intOverMassFn(0,4, [k1,k1,k3,k3], h) i13_112[k1,k3] = intOverMassFn(1,3, [k1,k1,k3], h) i13_122[k1,k3] = i13_112[k3,k1] i12[k1,k3] = intOverMassFn(1,2, [k1,k3], h) i22[k1,k3] = intOverMassFn(2,2, [k1,k3], h) t2h31[k1,k3] = 2.*(h.pk[k1]*i13_122[k1,k3]*i11[k1] + \ h.pk[k3]*i13_112[k1,k3]*i11[k3]) t3hnoB[k1,k3] = (i11[k1]*h.pk[k1])**2 * i22[k3,k3] + \ (i11[k3]*h.pk[k3])**2 * i22[k1,k1] + \ 4.*(i11[k1]*h.pk[k1])*(i11[k3]*h.pk[k3])*i22[k1,k3] t4hnoT[k1,k3] = 2.*i11[k1]*i11[k3]*h.pk[k1]*h.pk[k3] *\ (i21[k1]*i11[k3]*h.pk[k3] + \ i21[k3]*i11[k1]*h.pk[k1]) # First Romberg-integrate explicitly-angular-averaged things to low precision pk1plusk3[k1,k3] = utils.openRomb(\ lambda cth:c.pkInterp(M.sqrt(h.k[k1]**2 + h.k[k3]**2 + \ 2.*h.k[k1]*h.k[k3]*cth)), -1.,1.,eps=0.3,k=3)/2. b10[k1,k3] = utils.openRomb(lambda cth:tribi.b\ (h.k[k1],h.k[k3], cth, c),-1.,1.,eps=0.3,k=3)/2. t3hB[k1,k3] = 4. * b10[k1,k3] * i12[k1,k3]*i11[k1]*i11[k3] #if k1 == k3: #t10[k1,k3] = 32.*h.pk[k1]**2*utils.openRomb(lambda cth: (3.+10*cth)**2*c.pkInterp(h.pk[k1]*M.sqrt(2.*(1-cth))),-1.,1.,eps = 0.3,k=2)/2. - 11./378.*h.pk[k1]**3 # could change to this if we wanted to; quicker, but less uniform t10[k1,k3] = utils.openRomb(lambda cth:tribi.tk1mk1k2mk2_array\ (h.k[k1],h.k[k3], cth, c,0),-1.,1.,eps=0.3,k=3)/2. t4hT[k1,k3] = t10[k1,k3] * i11[k1]**2 * i11[k3]**2 tentativetotal = M.fabs(i04[k1,k3]+2*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3]+\ t3hnoB[k1,k3]+t3hB[k1,k3]+t4hnoT[k1,k3]+t4hT[k1,k3] +\ adder[k1,k3]) if (adder[k1,k3] != 0.): print 'adder = ',adder[k1,k3] #calculate Romberg-integrated things to high precision, if they are >1/2 of total if M.fabs(2*pk1plusk3[k1,k3]*i12[k1,k3]) > highprecisionthresh*tentativetotal: print 't2h22: ',pk1plusk3[k1,k3], pk1plusk3[k1,k3] = utils.openRomb( lambda cth:c.pkInterp(M.sqrt(h.k[k1]**2 + h.k[k3]**2 + \ 2.*h.k[k1]*h.k[k3]*cth)), -1.,1.,eps=0.03,k=7,jmax=18)/2. print pk1plusk3[k1,k3] if M.fabs(t3hB[k1,k3]) > highprecisionthresh*tentativetotal: print 't3hB: ',b10[k1,k3], b10[k1,k3] = utils.openRomb(lambda cth:tribi.b\ (h.k[k1],h.k[k3], cth, c),-1.,1.,eps=0.01,k=5,jmax=30)/2. print b10[k1,k3] t3hB[k1,k3] = 4. * b10[k1,k3] * i12[k1,k3]*i11[k1]*i11[k3] if M.fabs(t4hT[k1,k3]) > highprecisionthresh*tentativetotal: print 't4hT:', t10[k1,k3], t10[k1,k3] = utils.openRomb(lambda cth:tribi.tk1mk1k2mk2_array\ (h.k[k1],h.k[k3], cth, c,0),-1.,1.,eps=0.01,k=5)/2. print t10[k1,k3] t4hT[k1,k3] = t10[k1,k3] * i11[k1]**2 * i11[k3]**2 nrm = 2.*h.pk[k1]*h.pk[k3] #output some stuff at each entry in the CovMat print k1,k3,i04[k1,k3]/nrm, (2.*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3])/nrm, \ (t3hnoB[k1,k3]+t3hB[k1,k3])/nrm, t4hT[k1,k3]/nrm, t4hnoT[k1,k3]/nrm, (t4hT[k1,k3]+t4hnoT[k1,k3])/nrm,\ (i04[k1,k3]+ 2.*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3]+ \ t3hnoB[k1,k3]+t3hB[k1,k3]+t4hT[k1,k3]+t4hnoT[k1,k3])/nrm pk1plusk3_perp[k1] = c.pkInterp(M.sqrt(2.)*h.k[k1]) t3hB_perp[k1] = 4.*tribi.b(h.k[k1],h.k[k1],0.,c) *\ i12[k1,k1]*i11[k1]**2 squaretri = tribi.tk1mk1k2mk2(h.k[k1],h.k[k1], 0., c,0) t4hT_perp[k1] = i11[k1]**4 * squaretri qsq[k1] = squaretri/(4.*h.pk[k1]**2 * (2.*pk1plusk3_perp[k1] +\ h.pk[k1])) s = pk1plusk3_perp[k1]/h.pk[k1] dsq4hshouldbe[k1] = 0.085*(4.*h.pk[k1]**2 * \ (2.*pk1plusk3_perp[k1] + h.pk[k1])) dsq1h[k1] = i04[k1,k1] dsq2h[k1] = 2.*pk1plusk3_perp[k1]*i12[k1,k1]**2. + t2h31[k1,k1] dsq2h31[k1] = t2h31[k1,k1] dsq3h[k1] = t3hnoB[k1,k1] + t3hB_perp[k1] dsq4hnoT[k1] = 4.*(i11[k1]*h.pk[k1])**3*i21[k1] dsq4h[k1] = t4hnoT[k1,k1] + t4hT_perp[k1] dsq = dsq1h + dsq2h + dsq3h + dsq4h df = h.k**3/(2.*M.pi**2) ot = 1./3. # These are debugging files; they output the square-configuration reduced trispectrum. #M.save(h.prefix+'dsq1h.dat',M.transpose([h.k,dsq1h])) #M.save(h.prefix+'dsq2h.dat',M.transpose([h.k,dsq2h])) #M.save(h.prefix+'dsq2h31.dat',M.transpose([h.k,dsq2h31])) #M.save(h.prefix+'dsq3h.dat',M.transpose([h.k,dsq3h])) #M.save(h.prefix+'dsq4h.dat',M.transpose([h.k,dsq4h])) rat = M.fabs(dsq4hnoT/t4hT_perp) t1h = i04 t2h22 = 2.*pk1plusk3*i12**2 for k1 in range(len(h.k)): for k3 in range(k1+1,len(h.k)): t10[k3,k1] = t10[k1,k3] t1h[k3,k1] = t1h[k1,k3] t2h22[k3,k1] = t2h22[k1,k3] t2h31[k3,k1] = t2h31[k1,k3] t3hnoB[k3,k1] = t3hnoB[k1,k3] t3hB[k3,k1] = t3hB[k1,k3] t4hnoT[k3,k1] = t4hnoT[k1,k3] t4hT[k3,k1] = t4hT[k1,k3] t2h = t2h22 + t2h31 t3h = t3hnoB + t3hB t4h = t4hnoT + t4hT ans = t1h+t2h+t3h+t4h if h.p.outputalltterms == 0: return ans elif h.p.outputalltterms == 1: return ans,t10,t1h,t2h,t3h,t4h elif h.p.outputalltterms == 2: return ans,t10,t1h,t2h22,t2h31,t3hB,t3hnoB,t4hT,t4hnoT else: return
def drawmeridians(self,meridians,color='k',linewidth=1., \ linestyle='--',dashes=[1,1],labels=[0,0,0,0],\ font='rm',fontsize=12): """ draw meridians (longitude lines). meridians - list containing longitude values to draw (in degrees). color - color to draw meridians (default black). linewidth - line width for meridians (default 1.) linestyle - line style for meridians (default '--', i.e. dashed). dashes - dash pattern for meridians (default [1,1], i.e. 1 pixel on, 1 pixel off). labels - list of 4 values (default [0,0,0,0]) that control whether meridians are labelled where they intersect the left, right, top or bottom of the plot. For example labels=[1,0,0,1] will cause meridians to be labelled where they intersect the left and bottom of the plot, but not the right and top. Labels are drawn using mathtext. font - mathtext font used for labels ('rm','tt','it' or 'cal', default 'rm'). fontsize - font size in points for labels (default 12). """ # get current axes instance. ax = pylab.gca() # don't draw meridians past latmax, always draw parallel at latmax. latmax = 80. # not used for cyl, merc projections. # offset for labels. yoffset = (self.urcrnry-self.llcrnry)/100./self.aspect xoffset = (self.urcrnrx-self.llcrnrx)/100. if self.projection not in ['merc','cyl']: lats = pylab.arange(-latmax,latmax+1).astype('f') else: lats = pylab.arange(-90,91).astype('f') xdelta = 0.1*(self.xmax-self.xmin) ydelta = 0.1*(self.ymax-self.ymin) for merid in meridians: lons = merid*pylab.ones(len(lats),'f') x,y = self(lons,lats) # remove points outside domain. testx = pylab.logical_and(x>=self.xmin-xdelta,x<=self.xmax+xdelta) x = pylab.compress(testx, x) y = pylab.compress(testx, y) testy = pylab.logical_and(y>=self.ymin-ydelta,y<=self.ymax+ydelta) x = pylab.compress(testy, x) y = pylab.compress(testy, y) if len(x) > 1 and len(y) > 1: # split into separate line segments if necessary. # (not necessary for mercator or cylindrical). xd = (x[1:]-x[0:-1])**2 yd = (y[1:]-y[0:-1])**2 dist = pylab.sqrt(xd+yd) split = dist > 500000. if pylab.asum(split) and self.projection not in ['merc','cyl']: ind = (pylab.compress(split,pylab.squeeze(split*pylab.indices(xd.shape)))+1).tolist() xl = [] yl = [] iprev = 0 ind.append(len(xd)) for i in ind: xl.append(x[iprev:i]) yl.append(y[iprev:i]) iprev = i else: xl = [x] yl = [y] # draw each line segment. for x,y in zip(xl,yl): # skip if only a point. if len(x) > 1 and len(y) > 1: l = Line2D(x,y,linewidth=linewidth,linestyle=linestyle) l.set_color(color) l.set_dashes(dashes) ax.add_line(l) # draw labels for meridians. # search along edges of map to see if parallels intersect. # if so, find x,y location of intersection and draw a label there. if self.projection == 'cyl': dx = 0.01; dy = 0.01 else: dx = 1000; dy = 1000 for dolab,side in zip(labels,['l','r','t','b']): if not dolab: continue # for cyl or merc, don't draw meridians on left or right. if self.projection in ['cyl','merc'] and side in ['l','r']: continue if side in ['l','r']: nmax = int((self.ymax-self.ymin)/dy+1) if self.urcrnry < self.llcrnry: yy = self.llcrnry-dy*pylab.arange(nmax) else: yy = self.llcrnry+dy*pylab.arange(nmax) if side == 'l': lons,lats = self(self.llcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True) else: lons,lats = self(self.urcrnrx*pylab.ones(yy.shape,'f'),yy,inverse=True) lons = pylab.where(lons < 0, lons+360, lons) lons = [int(lon*10) for lon in lons.tolist()] lats = [int(lat*10) for lat in lats.tolist()] else: nmax = int((self.xmax-self.xmin)/dx+1) if self.urcrnrx < self.llcrnrx: xx = self.llcrnrx-dx*pylab.arange(nmax) else: xx = self.llcrnrx+dx*pylab.arange(nmax) if side == 'b': lons,lats = self(xx,self.llcrnry*pylab.ones(xx.shape,'f'),inverse=True) else: lons,lats = self(xx,self.urcrnry*pylab.ones(xx.shape,'f'),inverse=True) lons = pylab.where(lons < 0, lons+360, lons) lons = [int(lon*10) for lon in lons.tolist()] lats = [int(lat*10) for lat in lats.tolist()] for lon in meridians: if lon<0: lon=lon+360. # find index of meridian (there may be two, so # search from left and right). try: nl = lons.index(int(lon*10)) except: nl = -1 try: nr = len(lons)-lons[::-1].index(int(lon*10))-1 except: nr = -1 if lon>180: lonlab = r'$\%s{%g\/^{\circ}\/W}$'%(font,pylab.fabs(lon-360)) elif lon<180 and lon != 0: lonlab = r'$\%s{%g\/^{\circ}\/E}$'%(font,lon) else: lonlab = r'$\%s{%g\/^{\circ}}$'%(font,lon) # meridians can intersect each map edge twice. for i,n in enumerate([nl,nr]): lat = lats[n]/10. # no meridians > latmax for projections other than merc,cyl. if self.projection not in ['merc','cyl'] and lat > latmax: continue # don't bother if close to the first label. if i and abs(nr-nl) < 100: continue if n >= 0: if side == 'l': pylab.text(self.llcrnrx-xoffset,yy[n],lonlab,horizontalalignment='right',verticalalignment='center',fontsize=fontsize) elif side == 'r': pylab.text(self.urcrnrx+xoffset,yy[n],lonlab,horizontalalignment='left',verticalalignment='center',fontsize=fontsize) elif side == 'b': pylab.text(xx[n],self.llcrnry-yoffset,lonlab,horizontalalignment='center',verticalalignment='top',fontsize=fontsize) else: pylab.text(xx[n],self.urcrnry+yoffset,lonlab,horizontalalignment='center',verticalalignment='bottom',fontsize=fontsize) # make sure axis ticks are turned off ax.set_xticks([]) ax.set_yticks([]) # set axes limits to fit map region. self.set_axes_limits()
a.add_collection(col, autolim=True) trans = transforms.scale_transform(fig.dpi / transforms.Value(72.), fig.dpi / transforms.Value(72.)) col.set_transform(trans) # the points to pixels transform col.set_color(colors) a.autoscale_view() a.set_title('PolyCollection using offsets') # 7-sided regular polygons a = fig.add_subplot(2, 2, 3) col = collections.RegularPolyCollection(fig.dpi, 7, sizes=P.fabs(xx) * 10, offsets=xyo, transOffset=a.transData) a.add_collection(col, autolim=True) trans = transforms.scale_transform(fig.dpi / transforms.Value(72.), fig.dpi / transforms.Value(72.)) col.set_transform(trans) # the points to pixels transform col.set_color(colors) a.autoscale_view() a.set_title('RegularPolyCollection using offsets') # Simulate a series of ocean current profiles, successively # offset by 0.1 m/s so that they form what is sometimes called # a "waterfall" plot or a "stagger" plot. a = fig.add_subplot(2, 2, 4)
def phi1(self,t): return M.fabs(t)