示例#1
0
	# Table quoted values from Henriques
	M200 = halos['M200CRIT']
	R200 = halos['R200CRIT']
	RA = halos['HALO_RA']
	DEC = halos['HALO_DEC']
	Z = halos['HALO_Z']
	HVD = halos['VELDISP']

	# Richnesses
        new = fits.open(data_root+'/richness_cr200_bcg/new.fits')[0].data
        newb = fits.open(data_root+'/richness_cr200_bcg/newb.fits')[0].data

	# Calculate N200 and Nspec
	from causticpy import *
	C = Caustic()

        ### Run with Halo values from either Henriques or C4 algorithm
	henriq = True
	if henriq == True:
		# Halo data from Henriques

		# Good Halos
		good = np.where((halos['halo_z']>0.03))[0]

		RA = halos['halo_ra']
		DEC = halos['halo_dec']
		Z = halos['halo_z']
		RVIR = halos['r200crit']

	else:
示例#2
0
	def __init__(self,varib):
		self.__dict__.update(varib)
		self.C = Caustic()
示例#3
0
bad_cluster_names = clusters['CName'][red_num_cut]
clusters['NotMasked'][red_num_cut] = False
for name in bad_cluster_names:
    bad_inds = np.where(name == galaxies['CName'])[0]
    galaxies['NotMasked'][bad_inds] = False

clusters = clusters[clusters['NotMasked']]
galaxies = galaxies[galaxies['NotMasked']]
caustic_fitter = Caustic(h=1.,
                         Om0=0.3,
                         rlimit=4.0,
                         vlimit=3500,
                         kernal_stretch=10.0,
                         rgridmax=6.0,
                         vgridmax=5000.0,
                         cut_sample=True,
                         edge_int_remove=False,
                         gapper=gapper,
                         mirror=True,
                         inflection=False,
                         edge_perc=0.1,
                         fbr=0.65)

fbeta_masses = []
vdisps = []
for cluster in clusters:
    clustername = cluster['CName']
    clustermembers = galaxies[galaxies['CName'] == clustername]
    if len(clustermembers) < 30:
        continue
    ras = clustermembers['RAdeg']
示例#4
0
	def __init__(self,varib):
		# Update Dictionary with Variables and Caustic Technique
		self.__dict__.update(varib)
		self.C = Caustic()
		self.U = Universal(varib)
示例#5
0
class Universal(object):
	"""
	Other functions that can be used in the building and set-up of data
	"""
	
	def __init__(self,varib):
		self.__dict__.update(varib)
		self.C = Caustic()


	def build(self,r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r200):
		"""
		This function builds the ensemble and individual cluster phase spaces, depending on fed parameters
		method 0 : top Ngal brightest
		method 1 : random Ngal within top Ngal*~10 brightest
		Interloper treatment done with ShiftGapper
		r : radius
		v : velocity
		en_gal_id : unique id for each ensemble galaxy
		en_clus_id : id for each galaxy relating back to its original parent halo
		ln_gal_id : unique id for each individual cluster galaxy
		rmags : SDSS r magnitude
		imags : SDSS i magnitude
		gmags : SDSS g magnitude
		halodata : 2 dimensional array, with info on halo properties
		- r200
		"""
		# Sort galaxies by r Magnitude
		bright = np.argsort(rmags)
		r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags = r[bright],v[bright],en_gal_id[bright],en_clus_id[bright],ln_gal_id[bright],gmags[bright],rmags[bright],imags[bright]

		if self.method_num == 0:
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags = self.build_method_0(r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r200)

		elif self.method_num == 1:
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags,samp_size = self.build_method_1(r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r200)

		else:
			print 'No Build...?'

		return en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags


	def build_method_0(self,r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r200):
		'''Picking top brightest galaxies, such that there are gal_num galaxies within r200'''

		gal_num = self.gal_num

		## ------------------------------- ##
		## Build Ensemble (en => ensemble) ##
		## ------------------------------- ##
	
		# define indicies of galaxies within r200
		within = np.where(r<r200)[0]
		# pick out gal_num 'th index in list, (include extra to counter shiftgapper's possible dimishement of richness)
		if gal_num < 10:
			excess = gal_num * 3.0 / 5.0				# galaxy excess to counter shiftgapper
		else:
			excess = gal_num / 5.0
		end = within[:gal_num + excess + 1][-1]		# instead of indexing I am slicing b/c of chance of not enough gals existing...	
		# Choose Ngals within r200
		if self.init_shiftgap == True:
			excess *= 2.0				# make excess a bit larger than previously defined
			end = within[:gal_num + excess + 1][-1]
			r2,v2,en_gal_id,en_clus_id,gmags2,rmags2,imags2 = self.C.shiftgapper(np.vstack([r[:end],v[:end],en_gal_id[:end],en_clus_id[:end],gmags[:end],rmags[:end],imags[:end]]).T).T # Shiftgapper inputs and outputs data as transpose...
			within = np.where(r2<r200)[0]	# re-calculate within array with new sample
			excess = gal_num / 5.0
			end = within[:gal_num + excess + 1][-1]
			# Append to ensemble array
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags = r2[:end],v2[:end],en_gal_id[:end],en_clus_id[:end],gmags2[:end],rmags2[:end],imags2[:end]
		else:
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags = r[0:end],v[0:end],en_gal_id[:end],en_clus_id[:end],gmags[0:end],rmags[0:end],imags[0:end]


		## ----------------------------------------- ##
		## Build Line of Sight (ln => line of sight) ##
		## ----------------------------------------- ##

		# define indicies of galaxies within r200
		within = np.where(r<r200)[0]
		# pick out gal_num 'th index in list, (include extra to counter shiftgapper's possible dimishement of richness)
		if gal_num < 10:
			excess = gal_num * 3.0 / 5.0				# galaxy excess to counter shiftgapper
		else:
			excess = gal_num / 5.0
		end = within[:gal_num + excess + 1][-1]		# instead of indexing I am slicing b/c
		# shiftgapper on line of sight
		try:
			r2,v2,ln_gal_id,gmags2,rmags2,imags2 = self.C.shiftgapper(np.vstack([r[:end],v[:end],ln_gal_id[:end],gmags[:end],rmags[:end],imags[:end]]).T).T
		except UnboundLocalError:	#When only a couple or no galaxies exist within RVIR
			print '-'*40
			print 'UnboundLocalError raised for shiftgapper on individual cluster....'
			print '-'*40
			r2,v2,ln_gal_id,gmags2,rmags2,imags2 = r[:end],v[:end],ln_gal_id[:end],gmags[:end],rmags[:end],imags[:end]

		# Sort by rmags
		sort = np.argsort(rmags2)
		r2,v2,ln_gal_id,gmags2,rmags2,imags2 = r2[sort],v2[sort],ln_gal_id[sort],gmags2[sort],rmags2[sort],imags2[sort]

		ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags = r2,v2,ln_gal_id,gmags2,rmags2,imags2

		# Done! Now we have en_r and ln_r arrays, which will either be stacked (former) or put straight into Caustic technique (latter)
		return en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags


	def build_method_1(self,r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r_crit200):
		'''Randomly choosing bright galaxies until gal_num galaxies are within r200'''

		gal_num = self.gal_num
		
		# reduce size of sample to something reasonable within magnitude limits
		sample = gal_num * 25				# arbitrary coefficient, see sites page post Apr 24th, 2013 for more info
		r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags = r[:sample],v[:sample],en_gal_id[:sample],en_clus_id[:sample],ln_gal_id[:sample],gmags[:sample],rmags[:sample],imags[:sample]
		samp_size = len(r)				# actual size of sample (might be less than gal_num*25)
		self.samp_size = samp_size

		# create random numbered array for galaxy selection
		if gal_num < 10:				# when gal_num < 10, has trouble hitting gal_num richness inside r200
			excess = gal_num * 4.0 / 5.0
		else:
			excess = gal_num * 2.0 / 5.0

		samp_num = gal_num + excess			# sample size of randomly generated numbers, start too low on purpose, then raise in loop
		loop = True					# break condition
		while loop == True:				# see method 0 comments on variables such as 'excess' and 'within' and 'end'
			for it in range(3):			# before upping sample size, try to get a good sample a few times
				rando = npr.randint(0,samp_size,samp_num)
				within = np.where(r[rando]<=r_crit200)[0]
				if len(within) >= gal_num + excess:
					loop = False
			if len(within) < gal_num + excess:
				samp_num += 2

		### Build Ensemble ###
		if self.init_shiftgap == True:

			r2,v2,en_gal_id,en_clus_id,gmags2,rmags2,imags2 = self.C.shiftgapper(np.vstack([r[rando],v[rando],en_gal_id[rando],en_clus_id[rando],gmags[rando],rmags[rand],imags[rando]]).T).T
			within = np.where(r2<r_crit200)[0]
			excess = gal_num / 5.0
			end = within[:gal_num + excess + 1][-1]
			# Append to ensemble array
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags = r2[:end],v2[:end],en_gal_id[:end],en_clus_id[:end],gmags2[:end],rmags2[:end],imags2[:end]
		else:
			excess = gal_num / 5.0
			end = within[:gal_num + excess + 1][-1]
			en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags = r[rando][:end],v[rando][:end],en_gal_id[rando][:end],en_clus_id[rando][:end],gmags[rando][:end],rmags[rando][:end],imags[rando][:end]

		### Build LOS ###
		# Set Sample
		if gal_num < 10:
			excess = gal_num * 4.0 / 5.0
		else:
			excess = gal_num * 2.0 / 5.0
		try:
			end = within[:gal_num + excess + 1][-1]
			# shiftgapper
			r2,v2,ln_gal_id2,gmags2,rmags2,imags2 = self.C.shiftgapper(np.vstack([r[rando][:end],v[rando][:end],ln_gal_id[rando][:end],gmags[rando][:end],rmags[rando][:end],imags[rando][:end]]).T).T
			# sort by rmags
			sort = np.argsort(rmags2)
			r2,v2,ln_gal_id2,gmags2,rmags2,imags2 = r2[sort],v2[sort],ln_gal_id2[sort],gmags2[sort],rmags2[sort],imags2[sort]
			# feed back gal_num gals within r200
			within = np.where(r2<r_crit200)[0]
			end = within[:gal_num + 1][-1]
			richness = len(within)
		except IndexError:
			print '****RAISED INDEX ERROR on LOS Building****'
			richness = 0		

		# Make sure gal_num richness inside r200
		run_time = time.asctime()
		j = 0
		while richness < gal_num:
			## Ensure this loop doesn't get trapped forever
			duration = (float(time.asctime()[11:13])*3600+float(time.asctime()[14:16])*60+float(time.asctime()[17:19]))-(float(run_time[11:13])*3600+float(run_time[14:16])*60+float(run_time[17:19]))
			if duration > 30:
				print "****Duration exceeded 30 seconds in LOS Buiding, manually broke loop****"
				break
			##
			j += 1
			loop = True
			while loop == True:				
				for j in range(3):			
					rando = npr.randint(0,samp_size,samp_num)
					within = np.where(r[rando]<=r_crit200)[0]
					if len(within) >= gal_num + excess:
						loop = False
				if len(within) < gal_num + excess:
					samp_num += 2
			try:
				end = within[:gal_num + excess + 1][-1]
				r2,v2,ln_gal_id2,gmags2,rmags2,imags2 = self.C.shiftgapper(np.vstack([r[rando][:end],v[rando][:end],ln_gal_id[rando][:end],gmags[rando][:end],rmags[rando][:end],imags[rando][:end]]).T).T
				within = np.where(r2<r_crit200)[0]
				end = within[:gal_num + 1][-1]
				richness = len(within)
			except IndexError:
				print '**** Raised Index Error on LOS Building****'
				richness = 0

			if j >= 100:
				print 'j went over 100'
				break

		ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags = r2[:end],v2[:end],ln_gal_id2[:end],gmags2[:end],rmags2[:end],imags2[:end]
		# Done! Now we have en_r and ln_r arrays (ensemble and line of sight arrays)
		
		return en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags,samp_size


	def rand_pos(self,distance):
	        '''Picks a random position for the observer a given distance away from the center'''
		theta = npr.normal(np.pi/2,np.pi/4)
		phi = npr.uniform(0,2*np.pi)
		x = np.sin(theta)*np.cos(phi)
		y = np.sin(theta)*np.sin(phi)
		z = np.cos(theta)
	
		unit = np.array([x,y,z])/(x**2+y**2+z**2)**(.5)
		# move the position a random 'distance' Mpc away
	        return distance*unit


	def limit_gals(self,r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,r200):
		''' Sort data by magnitude, and elimite values outside phase space limits '''
		# Sort by ascending r magnitude (bright to dim)
		sorts = np.argsort(rmags)
		r = r[sorts]
		v = v[sorts]
		en_gal_id = en_gal_id[sorts]
		en_clus_id = en_clus_id[sorts]
		ln_gal_id = ln_gal_id[sorts]
		gmags = gmags[sorts]
		rmags = rmags[sorts]
		imags = imags[sorts]

		# Limit Phase Space
		sample = np.where( (r < r200*self.r_limit) & (v > -self.v_limit) & (v < self.v_limit) )[0] 
		r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags = r[sample],v[sample],en_gal_id[sample],en_clus_id[sample],ln_gal_id[sample],gmags[sample],rmags[sample],imags[sample]
		samp_size = len(sample)

		# Eliminate galaxies w/ mag = 99.
		cut = np.where((gmags!=99)&(rmags!=99)&(imags!=99))[0]
		r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags = r[cut],v[cut],en_gal_id[cut],en_clus_id[cut],ln_gal_id[cut],gmags[cut],rmags[cut],imags[cut]
		samp_size = len(cut)

		return r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,samp_size


	def Bin_Calc(self,M200,R200,HVD):
		'''
		This function does pre-technique binning analysis
		'''
		# Choose Averaging Method
		if self.avg_meth == 'median':
			avg_method = np.median
		elif self.avg_meth == 'mean':
			avg_method = np.mean
		else:
			print 'Average Method for Bin is Mean()'
			avg_method = np.mean

		# Calculate Bin R200 and Bin HVD, use median
		BIN_M200,BIN_R200,BIN_HVD = [],[],[]
		for i in range(len(M200)/self.line_num):
			BIN_M200.append( avg_method( M200[i*self.line_num:(i+1)*self.line_num] ) )
			BIN_R200.append( avg_method( R200[i*self.line_num:(i+1)*self.line_num] ) )
			BIN_HVD.append( avg_method( HVD[i*self.line_num:(i+1)*self.line_num] ) )

		BIN_M200,BIN_R200,BIN_HVD = np.array(BIN_M200),np.array(BIN_R200),np.array(BIN_HVD)

		return BIN_M200,BIN_R200,BIN_HVD


	def get_3d(self,Gal_P,Gal_V,ens_gal_id,los_gal_id,stack_range,ens_num,self_stack,j):
		'''
		This function recovers the 3D positions and velocities of the galaxies in the ensemble and los phase space.
		'''

		if self_stack == True:
			# Create fully concatenated arrays to draw ensemble data from
			GPX3D,GPY3D,GPZ3D = Gal_P[j][0],Gal_P[j][1],Gal_P[j][0]
			GVX3D,GVY3D,GVZ3D = Gal_V[j][0],Gal_V[j][1],Gal_V[j][0]
		
			# Recover ensemble 3D data
			ens_gpx3d,ens_gpy3d,ens_gpz3d = GPX3D[ens_gal_id],GPY3D[ens_gal_id],GPZ3D[ens_gal_id]
			ens_gvx3d,ens_gvy3d,ens_gvz3d = GVX3D[ens_gal_id],GVY3D[ens_gal_id],GVZ3D[ens_gal_id]

			# Recover line_of_sight 3D data	
			los_gpx3d,los_gpy3d,los_gpz3d = np.array(map(lambda x: GPX3D[x],los_gal_id)),np.array(map(lambda x: GPY3D[x],los_gal_id)),np.array(map(lambda x: GPZ3D[x],los_gal_id))
			los_gvx3d,los_gvy3d,los_gvz3d = np.array(map(lambda x: GVX3D[x],los_gal_id)),np.array(map(lambda x: GVY3D[x],los_gal_id)),np.array(map(lambda x: GVZ3D[x],los_gal_id))

		else:	
			# Create fully concatenated arrays to draw ensemble data from
			[BIN_GPX3D,BIN_GPY3D,BIN_GPZ3D] = map(np.concatenate,Gal_P[j*self.line_num:(j+1)*self.line_num].T)
			[BIN_GVX3D,BIN_GVY3D,BIN_GVZ3D] = map(np.concatenate,Gal_V[j*self.line_num:(j+1)*self.line_num].T)

			# Recover ensemble 3D data	
			ens_gpx3d,ens_gpy3d,ens_gpz3d = BIN_GPX3D[ens_gal_id],BIN_GPY3D[ens_gal_id],BIN_GPZ3D[ens_gal_id]
			ens_gvx3d,ens_gvy3d,ens_gvz3d = BIN_GVX3D[ens_gal_id],BIN_GVY3D[ens_gal_id],BIN_GVZ3D[ens_gal_id]

			# Recover line_of_sight 3D data
			los_gpx3d,los_gpy3d,los_gpz3d = [],[],[]
			los_gvx3d,los_gvy3d,los_gvz3d = [],[],[]
			for i,k in zip( np.arange(j*self.line_num,(j+1)*self.line_num), np.arange(self.line_num) ):
				los_gpx3d.append(Gal_P[i][0][los_gal_id[k]])
				los_gpy3d.append(Gal_P[i][1][los_gal_id[k]])
				los_gpz3d.append(Gal_P[i][2][los_gal_id[k]])
				los_gvx3d.append(Gal_V[i][0][los_gal_id[k]])
				los_gvy3d.append(Gal_V[i][1][los_gal_id[k]])
				los_gvz3d.append(Gal_P[i][2][los_gal_id[k]])

			los_gpx3d,los_gpy3d,los_gpz3d = np.array(los_gpx3d),np.array(los_gpy3d),np.array(los_gpz3d)
			los_gvx3d,los_gvy3d,los_gvz3d = np.array(los_gvx3d),np.array(los_gvy3d),np.array(los_gvz3d)

		return np.array([ens_gpx3d,ens_gpy3d,ens_gpz3d]),np.array([ens_gvx3d,ens_gvy3d,ens_gvz3d]),np.array([los_gpx3d,los_gpy3d,los_gpz3d]),np.array([los_gvx3d,los_gvy3d,los_gvz3d])


	def line_of_sight(self,gal_p,gal_v,halo_p,halo_v):
		'''Line of Sight Calculations to mock projected data, if given 3D data'''
		# Pick Position
		new_pos = self.rand_pos(30)
		new_pos += halo_p 

		# New Halo Information
		halo_dist = ((halo_p[0]-new_pos[0])**2 + (halo_p[1]-new_pos[1])**2 + (halo_p[2]-new_pos[2])**2)**0.5
		halo_pos_unit = np.array([halo_p[0]-new_pos[0],halo_p[1]-new_pos[1],halo_p[2]-new_pos[2]]) / halo_dist
		halo_vlos = np.dot(halo_pos_unit, halo_v)

		# New Galaxy Information
		gal_p = np.array(gal_p)
		gal_v = np.array(gal_v)
		gal_dist = ((gal_p[0]-new_pos[0])**2 + (gal_p[1]-new_pos[1])**2 + (gal_p[2]-new_pos[2])**2)**0.5
		gal_vlos = np.zeros(gal_dist.size)
		gal_pos_unit = np.zeros((3,gal_dist.size))	#vector from new_p to gal	
		n = gal_dist.size

		# Line of sight
		code = """
		int u,w;
		for (u=0;u<n;++u){
		for(w=0;w<3;++w){
		gal_pos_unit(w,u) = (gal_p(w,u)-new_pos(w))/gal_dist(u);
		}
		gal_vlos(u) = gal_pos_unit(0,u)*gal_v(0,u)+gal_pos_unit(1,u)*gal_v(1,u)+gal_pos_unit(2,u)*gal_v(2,u);
		}
		"""
		fast = weave.inline(code,['gal_pos_unit','n','gal_dist','gal_vlos','gal_v','new_pos','gal_p'],type_converters=converters.blitz,compiler='gcc')
		angles = np.arccos(np.dot(halo_pos_unit,gal_pos_unit))
		r = angles*halo_dist
		#v_pec = gal_vlos-halo_vlos*np.dot(halo_pos_unit,gal_pos_unit)
		z_clus_cos = self.H0*halo_dist/self.c
		z_clus_pec = halo_vlos/self.c
		z_clus_obs = (1+z_clus_pec)*(1+z_clus_cos)-1
		z_gal_cos = self.H0*gal_dist/self.c
		z_gal_pec = gal_vlos/self.c
		z_gal_obs = (1+z_gal_pec)*(1+z_gal_cos)-1
		v = self.c*(z_gal_obs-z_clus_obs)/(1+z_clus_obs)
		#gal_vdisp3d[i] = np.sqrt(astStats.biweightScale(gal_v[0][np.where(gal_radius<=HaloR200[i])]-Halo_V[0],9.0)**2+astStats.biweightScale(gal_v[1][np.where(gal_radius<=HaloR200[i])]-Halo_V[1],9.0)**2+astStats.biweightScale(gal_v[2][np.where(gal_radius<=HaloR200[i])]-Halo_V[2],9.0)**2)/np.sqrt(3)
		#print 'MY VELOCITY OF GALAXIES', gal_vdisp3d[i]
#		particle_vdisp3d[i] = HVD*np.sqrt(3)
#		gal_rmag_new = gal_abs_rmag# + 5*np.log10(gal_dist*1e6/10.0)

		return r, v, np.array(new_pos)


	def app2abs(self,m_app,color,z,photo_band,color_band):
		'''
		takes apparent magnitude of a galaxy in some photometric band (SDSS g or r or i)
		and converts it to an absolute magnitude via distance modulus and k correction
		M_abs = m_app - Dist_Mod - K_corr
		takes:
			m_app		: float, apparent magnitude of galaxy
			color		: float, color of galaxy between two SDSS bands
			z		: float, total redshift of galaxy
			photo_band	: str, SDSS band for magnitude. Ex: 'g' or 'r' or 'i'
			color_band	: str, color between two SDSS bands. Ex: 'g - r' or 'g - i' or 'r - i', etc..
		'''
		dm = self.distance_mod(z)
		K_corr = calc_kcor(photo_band,z,color_band,color)
		return m_app - dm - K_corr


	def distance_mod(self,z):
		ang_d,lum_d = self.C.zdistance(z,self.H0)
		dm = 5.0 * np.log10(lum_d*1e6 / 10.0 )
		return dm


	def print_varibs(self,names,dictionary):
		print '## Variables Defined in the Run'
		print '-'*50
		for i in names:
			try:
				if i=='':
					print ''
				print i+'\r\t\t\t= '+str(dictionary[i])
			except:
				pass
		print '-'*50

	def print_separation(self,text,type=1):
		if type==1:
			print ''
			print '-'*60
			print str(text)
			print '-'*60
		elif type==2:
			print ''
			print str(text)
			print '-'*30
		return
示例#6
0
class Stack(object):
	"""
	The main class that runs the caustic technique over a stacking routine
	"""
	def __init__(self,varib):
		# Update Dictionary with Variables and Caustic Technique
		self.__dict__.update(varib)
		self.C = Caustic()
		self.U = Universal(varib)

	def run_caustic(self,rvalues,vvalues,R200,HVD,clus_z=0,edge_int_remove=False,shiftgap=False,mirror=True,ensemble=True):
		"""
		Calls causticpy's run_caustic function
		"""
		# Feed caustic dummy vertical array
		length = len(rvalues)
		dummy = np.zeros(length).reshape(length,1)

		# Run Caustic
		self.C.run_caustic(dummy,gal_r=rvalues,gal_v=vvalues,r200=R200,clus_z=clus_z,clus_vdisp=HVD,gapper=shiftgap,edge_int_remove=edge_int_remove,mirror=mirror,edge_perc=self.edge_perc,q=self.q,rlimit=self.r_limit*R200,vlimit=self.v_limit,H0=self.H0)
		#self.C.run_caustic(dummy,gal_r=rvalues,gal_v=vvalues,r200=R200,clus_z=clus_z,clus_vdisp=HVD,gapper=shiftgap,mirror=mirror,edge_perc=self.edge_perc,q=self.q,rlimit=self.r_limit*R200,vlimit=self.v_limit,H0=self.H0)


	def caustic_stack(self,Rdata,Vdata,HaloID,HaloData,stack_num,
				ens_shiftgap=True,edge_int_remove=True,gal_reduce=True,stack_raw=False,est_v_center=False,
				feed_mags=True,G_Mags=None,R_Mags=None,I_Mags=None,clus_z=0):
		"""
		-- Takes an array of individual phase spaces and stacks them, then runs 
		   a caustic technique over the ensemble and/or individual phase spaces.
		-- Returns a dictionary

		-- Relies on a few parameters to be defined outside of the function:
			self.gal_num - Equivalent to Ngal: number of galaxies to take per cluster to then be stacked
			self.line_num - Equivalent to Nclus: number of clusters to stack into one ensemble cluster
			self.scale_data - Scale r data by R200 while stacking, then un-scale by BinR200
			self.run_los - run caustic technique over individual cluster (aka line-of-sight)
			self.avg_meth - method by which averaging of Bin Properties should be, 'mean' or 'median' or 'biweight' etc..
			self.mirror - mirror phase space before solving for caustic?
			Including others... see RunningTheCode.pdf for a list
			* These parameters should be fed to initialization of Stack() class as a dictionary, for ex:
				variables = {'run_los':False, ...... }
				S = Stack(variables)

		"rdata" - should be a 2 dimensional array with individual phase spaces as rows
		"vdata" - should be a 2 dimensional array with individual phase spaces as rows
				ex. rdata[0] => 0th phase space data
		'HaloID' : 1 dimensional array containing Halo Identification Numbers, len(HaloID) == len(rdata)
		'HaloData' : 2 dimensional array containing M200, R200, HVD, Z of Halos, with unique halos as columns
		'stack_num' : number of individual clusters to stack into the one ensemble

		'ens_shiftgap' - do a shiftgapper over final ensemble phase space?
		'gal_reduce' - before caustic run, reduce ensemble phase space to Ngal gals within R200?
		'stack_raw' - don't worry about limiting or building the phase space, just stack the Rdata and Vdata together as is
		'est_v_center' - take median of Vdata for new velocity center

		'feed_gal_mags' - feed magnitudes for individual galaxies, must feed all three mags if True

		-- 'ens' stands for ensemble cluster
		-- 'ind' stands for individual cluster

		-- Uppercase arrays contain data for multiple clusters,
			lowercase arrays contain data for 1 cluster
		"""

		# Define a container for holding stacked data, D
		D = Data()

		# Assign some parameters to Class scope
		self.__dict__.update(ez.create(['stack_raw','feed_mags','gal_reduce','ens_shiftgap','edge_int_remove'],locals()))

		# New Velocity Center
		if est_v_center == True:
			v_offset = astats.biweight_location(vdata[np.where(rdata < 1.0)])
			vdata -= v_offset

		# Unpack HaloData
		if HaloData == None:
			self.fed_halo_data = False
			# Estimate R200
			R200 = []
			HVD = []
			Z = []
			for i in range(stack_num):
				R200.append(np.exp(-1.86)*len(np.where((R_Mags[i] < -19.55) & (Rdata[i] < 1.0) & (np.abs(Vdata[i]) < 3500))[0])**0.51)
				HVD.append(astats.biweight_midvariance(Vdata[i][np.where((Rdata[i] < 1.0)&(np.abs(Vdata[i])<4000))]))
			R200 = np.array(R200)
			HVD = np.array(HVD)
			Z = np.array([0.05]*len(R200))
			if self.avg_meth == 'mean': BinR200 = np.mean(R200); BinHVD = np.mean(HVD); BinZ = np.mean(Z)
			elif self.avg_meth == 'median': BinR200 = np.median(R200); BinHVD = np.median(HVD); BinZ = np.median(Z)
			D.add({'BinR200':BinR200,'BinHVD':BinHVD,'BinZ':BinZ,'R200':R200,'HVD':HVD,'Z':Z})

		else:
			self.fed_halo_data = True
			M200,R200,HVD,Z = HaloData
			if self.avg_meth == 'mean':
				BinM200 = np.mean(M200)
				BinR200 = np.mean(R200)
				BinHVD = np.mean(HVD)
				BinZ = np.mean(Z)
			elif self.avg_meth == 'median':
				BinM200 = np.median(M200)
				BinR200 = np.median(R200)
				BinHVD = np.median(HVD)
				BinZ = np.median(Z)
			# Append to Data
			D.add({'BinM200':BinM200,'BinR200':BinR200,'BinHVD':BinHVD,'BinZ':BinZ})	

		# Create Dummy Variables for Magnitudes if necessary
		if self.feed_mags == False:
			G_Mags,R_Mags,I_Mags = [],[],[]
			for i in range(stack_num):
				G_Mags.append([None]*len(Rdata[i]))
				R_Mags.append([None]*len(Rdata[i]))
				I_Mags.append([None]*len(Rdata[i]))
			G_Mags,R_Mags,I_Mags = np.array(G_Mags),np.array(R_Mags),np.array(I_Mags)

		# Create galaxy identification arrays
		ENS_gal_id,ENS_clus_id,IND_gal_id = [],[],[]
		gal_count = 0
		for i in range(stack_num):
			ENS_gal_id.append(np.arange(gal_count,gal_count+len(Rdata[i])))
			ENS_clus_id.append(np.array([HaloID[i]]*len(Rdata[i]),int))
			IND_gal_id.append(np.arange(len(Rdata[i])))
		ENS_gal_id,ENS_clus_id,ENS_gal_id = np.array(ENS_gal_id),np.array(ENS_clus_id),np.array(IND_gal_id)

		# Iterate through phase spaces
		for self.l in range(stack_num):

			# Limit Phase Space
			if self.stack_raw == False:
				r,v,ens_gal_id,ens_clus_id,ind_gal_id,gmags,rmags,imags,samp_size = self.U.limit_gals(Rdata[self.l],Vdata[self.l],ENS_gal_id[self.l],ENS_clus_id[self.l],IND_gal_id[self.l],G_Mags[self.l],R_Mags[self.l],I_Mags[self.l],R200[self.l])

			# Build Ensemble and LOS Phase Spaces
			if self.stack_raw == False:
				ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags,ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = self.U.build(r,v,ens_gal_id,ens_clus_id,ind_gal_id,gmags,rmags,imags,R200[self.l])

			# If Scale data before stack is desired
			if self.scale_data == True:
				ens_r /= R200[self.l]

			# Stack Ensemble Data by extending to Data() container
			names = ['ens_r','ens_v','ens_gmags','ens_rmags','ens_imags','ens_gal_id','ens_clus_id']
			D.extend(ez.create(names,locals()))

			if self.run_los == True:
		
				# Create Data Block
				ind_data = np.vstack([ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags])

				# Sort by Rmag 
				bright = np.argsort(ind_rmags)
				ind_data = ind_data.T[bright].T
				ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = ind_data
	
				# Reduce phase space
				if self.stack_raw == False and self.gal_reduce == True:
					within = np.where(ind_r <= R200[self.l])[0]
					end = within[:self.gal_num + 1][-1]
					ind_data = ind_data.T[:end].T
					ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = ind_data
	
				# Calculate individual HVD
				# Pick out gals within r200
				within = np.where(ind_r <= R200[self.l])[0]
				gal_count = len(within)
				if gal_count <= 3:
					'''biweightScale can't take less than 4 elements'''
					# Calculate hvd with numpy std of galaxies within r200 (b/c this is quoted richness)
					ind_hvd = np.std(np.copy(ind_v)[within])
				else:
					# Calculate hvd with astStats biweightScale (see Beers 1990)
					try:
						ind_hvd = astats.biweight_midvariance(np.copy(ind_v)[within])
					# Sometimes divide by zero error in biweight function for low gal_num
					except ZeroDivisionError:
						print 'ZeroDivisionError in biweightfunction'
						print 'ind_v[within]=',ind_v[within]
						ind_hvd = np.std(np.copy(ind_v)[within])

				# If run_los == True, run Caustic Technique on individual cluster
				self.U.print_separation('# Running Caustic for LOS '+str(self.l),type=2)
				self.run_caustic(ind_r,ind_v,R200[self.l],ind_hvd,clus_z=Z[self.l],mirror=self.mirror)
				ind_caumass = np.array([self.C.M200_fbeta])
				ind_caumass_est = np.array([self.C.Mass2.M200_est])
				ind_edgemass = np.array([self.C.M200_edge])
				ind_edgemass_est = np.array([self.C.MassE.M200_est])
				ind_causurf = np.array(self.C.caustic_profile)
				ind_nfwsurf = np.array(self.C.caustic_fit)
				ind_edgesurf = np.array(self.C.caustic_edge)

			# Append Individual Cluster Data
			names = ['ind_r','ind_v','ind_gal_id','ind_gmags','ind_rmags','ind_imags',
				'ind_hvd','ind_caumass','ind_caumass_est','ind_edgemass','ind_edgemass_est',
				'ind_causurf','ind_nfwsurf']
			D.append(ez.create(names,locals()))


		# Turn Ensemble into Arrays
		names = ['ens_r','ens_v','ens_gal_id','ens_clus_id','ens_gmags','ens_rmags','ens_imags','ens_caumass','ens_caumass_est','ens_edgemass','ens_edgemass_est','ens_causurf','ens_nfwsurf','ens_edgesurf']
		D.to_array(names,ravel=True)

		# Re-scale data if scale_data == True:
		if self.scale_data == True:
			D.ens_r *= BinR200

		# Create Ensemble Data Block
		D.ens_data = np.vstack([D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags])

		# Shiftgapper for Interloper Treatment
		if self.stack_raw == False and self.ens_shiftgap == True:
			self.D = D
			try:
				D.ens_data = self.C.shiftgapper(D.ens_data.T).T
				D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data
			except UnboundLocalError:	# A couple or no galaxies within RVIR
				print '-'*40
				print 'UnboundLocalError raised on Ensemble Shiftgapper'
				print '-'*40

		D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Sort by R_Mag
		bright = np.argsort(D.ens_rmags)
		D.ens_data = D.ens_data.T[bright].T
		D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Reduce System Down to gal_num richness within BinR200
		if self.stack_raw and self.gal_reduce == True:
			within = np.where(D.ens_r <= BinR200)[0]
			print 'len(within)',len(within)
			print 'gal_num,line_num',self.gal_num,self.line_num
			end = within[:self.gal_num*self.line_num + 1][-1]
			D.ens_data = D.ens_data.T[:end].T
			D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Calculate Ensemble Velocity Dispersion for galaxies within R200
		ens_hvd = astats.biweight_midvariance(np.copy(D.ens_v)[np.where(D.ens_r<=BinR200)])

		# Run Caustic Technique!
		try: self.U.print_separation('# Running Caustic on Ensemble '+str(self.j),type=2)
		except: pass
		try:
			self.run_caustic(D.ens_r,D.ens_v,BinR200,ens_hvd,clus_z=BinZ,mirror=self.mirror,shiftgap=self.ens_shiftgap,edge_int_remove=self.edge_int_remove)
			ens_caumass = np.array([self.C.M200_fbeta])
			ens_caumass_est = np.array([self.C.Mass2.M200_est])
			ens_edgemass = np.array([self.C.M200_edge])
			ens_edgemass_est = np.array([self.C.MassE.M200_est])
			ens_causurf = np.array(self.C.caustic_profile)
			ens_nfwsurf = np.array(self.C.caustic_fit)
			ens_edgesurf = np.array(self.C.caustic_edge)
			ens_caumass500_est = np.array(self.C.Mass2.M500_est)
			ens_edgemass500_est = np.array(self.C.MassE.M500_est)
			ens_r200_est = np.array(self.C.r200_est_fbeta)
			ens_r500_est = np.array(self.C.r500_est_fbeta)
			ens_r200_est_edge = np.array(self.C.r200_est_edge)

		except:
			print ''
			print '-'*45
			print 'CAUSTIC TECHNIQUE FAILED ON THIS CLUSTER'
			print '-'*45
			print ''
			ens_caumass = np.array([0])
			ens_caumass_est = np.array([0])
			ens_edgemass = np.array([0])
			ens_edgemass_est = np.array([0])
			ens_causurf = np.array([0]*len(self.C.x_range))
			ens_nfwsurf = np.array([0]*len(self.C.x_range))
			ens_edgesurf = np.array([0]*len(self.C.x_range))
			ens_caumass500_est = np.array([0])
			ens_edgemass500_est = np.array([0])
			ens_r200_est = np.array([0])
			ens_r500_est = np.array([0])
			ens_r200_est_edge = np.array([0])

		# Other Arrays
		x_range = self.C.x_range

		# Append Data
		names = ['ens_caumass','ens_hvd','ens_caumass_est','ens_edgemass','ens_edgemass_est','ens_causurf','ens_nfwsurf','ens_edgesurf','x_range','ens_caumass500_est','ens_edgemass500_est','ens_r200_est','ens_r500_est','ens_r200_est_edge']
		D.add(ez.create(names,locals()))

		# Turn Individual Data into Arrays
		if self.run_los == True:
			names = ['ind_caumass','ind_caumass_est','ind_edgemass','ind_edgemass_est','ind_hvd']
			D.to_array(names,ravel=True)
			names = ['ind_r','ind_v','ind_gal_id','ind_gmags','ind_rmags','ind_imags','ind_causurf','ind_nfwsurf','ind_edgesurf']
			D.to_array(names)	

		# Output Data
		return D.__dict__
示例#7
0
文件: c4_clusters.py 项目: nkern/C4
# Load Data
# Cluster File
c4 = fits.open('C4_Cluster_Single.fits')[1].data
cut = np.where(c4['z_c4']!=0)
c4 = c4[cut]
sort = np.argsort(c4['z_c4'])
c4 = c4[sort]

# Galaxy File
photo = fits.open('DR12_Photo_AbsMag_Data.fits')[1].data
dr12 = fits.open('DR12_SpectroPhoto_BestObjID.fits')[1].data
dr12 = dr12[np.where((dr12['z'] < 0.27)&(dr12['zWarning']==0))]

# Set Constants
C = Caustic()
H0 = 72.0
c = 2.99792e5
Cosmo = cosmo.LambdaCDM(H0,0.3,0.7)
data_set = 'DataSet2/'
keys = ['C','H0','c','Cosmo','data_set']
varib = ez.create(keys,locals())
root = '/nfs/christoq_ls/nkern'

## Useful Functions
def plot3d():
	fig = mp.figure()
	ax = fig.add_subplot(111,projection='3d')
	globals().update({'ax':ax})

示例#8
0
def cluster_rich(data_loc,
                 halo_file,
                 chris_data_root,
                 chris_data_file,
                 newfilename,
                 write_data=True,
                 clobber=True):
    """ Calculate a Cluster's richness via Miller N200 and Kern N200
		data_loc : e.g. MassRich/TRUTH_CAUSTIC
		halo_file : e.g. halos.fits
		chris_data_root : e.g. /nfs/christoq_ls/C4/sdssdr12
		chris_data_file : e.g. DR12_GalaxyPhotoData_wabs_wedges.fits or m19.1_allgals_wsdss_specerrs_abs.fits
 """

    # Run through Kern richness estimator, mainly to get cluster pairs

    root = '/nfs/christoq_ls/nkern'

    C4 = CFOUR({'H0': 70, 'chris_data_root': chris_data_root})
    C = Caustic()
    H0 = 70.0
    c = 2.99792e5
    Cosmo = cosmo.LambdaCDM(H0, 0.3, 0.7)
    keys = ['C', 'H0', 'c', 'Cosmo', 'C4']
    varib = ez.create(keys, locals())
    R = RICHNESS(varib)

    # Load Halos
    halos = fits.open(data_loc + '/' + halo_file)[1].data
    HaloID = halos['orig_order']
    RA = halos['ra_avg']
    DEC = halos['dec_avg']
    Z = halos['z_avg']
    RVIR = halos['RVIR']

    # Load Galaxy Data
    gals = fits.open(chris_data_root + '/' + chris_data_file)[1].data
    # Change gals keys according to SDSSDR12 galaxy file
    if data_loc[-4:] != 'DR12':
        gals = dict(map(lambda x: (x, gals[x]), gals.names))
        gals['objid'] = gals.pop('GAL_HALOID')
        gals['ra'] = gals.pop('GAL_RA')
        gals['dec'] = gals.pop('GAL_DEC')
        gals['z'] = gals.pop('GAL_Z_APP')
        gals['u_mag'] = gals.pop('GAL_SDSS_U')
        gals['g_mag'] = gals.pop('GAL_SDSS_G')
        gals['r_mag'] = gals.pop('GAL_SDSS_R')
        gals['i_mag'] = gals.pop('GAL_SDSS_I')
        gals['z_mag'] = gals.pop('GAL_SDSS_Z')
        gals['r_absmag'] = gals.pop('R_ABSMAG')

    # Derive Gal Cut Out Parameters
    arcs = np.array(
        Cosmo.arcsec_per_kpc_proper(Z)) * 15000. / 3600.  # 15 Mpc in degrees

    # Kern richness arrays
    kern_N200 = []
    HVD = []
    pair_avg = []
    Nspec = []
    kern_obs_tot = []
    kern_obs_back = [
    ]  # obs_back is number of non-member galaxies in central aperture around cluster (aka. already scaled to inner aperture)

    # Miller Richness Arrays
    new = fits.open(chris_data_root + '/richness_cr200_bcg/new.fits')[0].data
    newb = fits.open(chris_data_root + '/richness_cr200_bcg/newb.fits')[0].data
    newb *= 0.2
    miller_N200 = []
    miller_obs_tot = []
    miller_obs_back = []
    colfac = 9
    bakfac = 1
    mag3 = 4
    v = 2
    radfac = 1

    # Loop over clusters
    print ''
    print '-' * 40
    print '...calculating cluster richnesses'
    for i in range(len(HaloID)):

        if i % 100 == 0:
            print '...working on cluster ' + str(i) + ' out of ' + str(
                len(HaloID))
        # Define Cluster parameters
        clus_ra = RA[i]
        clus_dec = DEC[i]
        clus_z = Z[i]
        clus_rvir = RVIR[i]
        haloid = HaloID[i]

        if np.isnan(clus_ra) == True or np.isnan(clus_dec) == True or np.isnan(
                clus_z) == True:
            richness.append(0)
            HVD.append(0)
            pair_avg.append(False)
            Nspec.append(0)
            continue

        # 15 Mpc in degrees of declination and degrees of RA
        d_dec = arcs[i]
        d_ra = d_dec / np.cos(clus_dec * np.pi / 180)
        d_z = 0.04

        # Cut Out Galaxy Data Around Cluster
        cut = np.where((np.abs(gals['ra'] - clus_ra) < d_ra)
                       & (np.abs(gals['dec'] - clus_dec) < d_dec)
                       & (np.abs(gals['z'] - clus_z) < d_z))[0]
        gal_ra = gals['ra'][cut]
        gal_dec = gals['dec'][cut]
        gal_z = gals['z'][cut]
        gal_gmags = gals['g_mag'][cut]
        gal_rmags = gals['r_mag'][cut]
        gal_imags = gals['i_mag'][cut]
        gal_absr = gals['r_absmag'][cut]

        # Run Kern Richness Estimator
        rich = R.richness_est(gal_ra,
                              gal_dec,
                              gal_z,
                              np.zeros(len(gal_z)),
                              gal_gmags,
                              gal_rmags,
                              gal_imags,
                              gal_absr,
                              haloid,
                              clus_ra,
                              clus_dec,
                              clus_z,
                              clus_rvir=clus_rvir,
                              spec_list=None,
                              use_specs=False,
                              use_bcg=False,
                              fit_rs=False,
                              fixed_vdisp=False,
                              fixed_aperture=False,
                              plot_sky=False,
                              plot_gr=False,
                              plot_phase=False,
                              find_pairs=True)

        kern_N200.append(rich)
        HVD.append(R.vel_disp)
        pair_avg.append(R.pair)
        Nspec.append(R.Nspec)
        kern_obs_tot.append(R.obs_tot)
        kern_obs_back.append(R.obs_back_scaled)

        # Append Miller Richness Values
        k = halos['orig_order'][i]
        miller_N200.append(new[k, colfac, mag3, v, radfac] -
                           newb[k, bakfac, mag3, v, radfac])
        miller_obs_tot.append(new[k, colfac, mag3, v, radfac])
        miller_obs_back.append(newb[k, bakfac, mag3, v, radfac])

    kern_N200 = np.array(kern_N200)
    HVD = np.array(HVD)
    pair_avg = np.array(pair_avg)
    Nspec = np.array(Nspec)
    kern_obs_tot = np.array(kern_obs_tot)
    kern_obs_back = np.array(kern_obs_back)

    miller_N200 = np.array(miller_N200)
    miller_obs_tot = np.array(miller_obs_tot)
    miller_obs_back = np.array(miller_obs_back)

    print '...finished calculating richnesses'
    ## Write Data Out
    if write_data == True:
        print '...writing out halos.fits file'

        # Dictionary of new columns
        new_keys = [
            'kern_N200', 'HVD', 'pair_avg', 'Nspec', 'kern_obs_tot',
            'kern_obs_back', 'miller_N200', 'miller_obs_tot', 'miller_obs_back'
        ]
        new_dic = ez.create(new_keys, locals())

        # Original fits record file
        orig_table = halos

        # Write out own fits file
        keys = ['HaloID', 'RVIR'] + new_keys
        dic = ez.create(keys, locals())
        fits_table(dic, keys, data_loc + '/richnesses.fits', clobber=True)

        # Append new columns
        fits_append(orig_table,
                    new_dic,
                    new_keys,
                    filename=data_loc + '/' + newfilename,
                    clobber=clobber)
        print '-' * 40
        print ''
示例#9
0
def clus_avg(data_loc,
             halo_file,
             chris_data_root,
             newfilename,
             write_data=True,
             clobber=True):

    C4 = CFOUR({'H0': 70, 'chris_data_root': chris_data_root})
    C = Caustic()

    # Load Halos
    halos = fits.open(data_loc + '/' + halo_file)[1].data
    HaloID = halos['orig_order']
    RA = halos['ra_bcg']
    DEC = halos['dec_bcg']
    Z = halos['z_biwt']
    RVIR = halos['RVIR']
    SINGLE = halos['single']
    SUB = halos['sub']
    NC4 = halos['nc4']

    RA_AVG, DEC_AVG, Z_AVG = [], [], []

    # Loop Over Halos
    print ''
    print '-' * 40
    print '...running average cluster center code'
    for i in range(len(halos)):
        if i % 100 == 0:
            print '...working on cluster ' + str(i) + ' out of ' + str(
                len(halos))
        try:
            # Assign Halo Properties
            clus_ra = RA[i]
            clus_dec = DEC[i]
            clus_z = Z[i]

            # Load Galaxies
            galdata = C4.load_chris_gals(HaloID[i])
            gal_ra, gal_dec, gal_z, gal_gmags, gal_rmags, gal_imags = galdata

            # Take Iterative Average, four times
            # vlim = 1500, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1500,
                                                 1.5, C)
            # vlim = 1000, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1000,
                                                 1.5, C)
            # vlim = 1500, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1000,
                                                 1.5, C)
            # vlim = 2000, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 2000,
                                                 1.5, C)

        except:
            print i
            clus_ra, clus_dec, clus_z = 0, 0, 0

        RA_AVG.append(clus_ra)
        DEC_AVG.append(clus_dec)
        Z_AVG.append(clus_z)

    RA_AVG, DEC_AVG, Z_AVG = np.array(RA_AVG), np.array(DEC_AVG), np.array(
        Z_AVG)

    print '...finished average cluster-center calculations'

    ## Write Data Out
    if write_data == True:
        print '...writing out cluster catalgoue with average centers included'
        # Dictionary of new columns
        new_keys = ['RA_AVG', 'DEC_AVG', 'Z_AVG']
        new_dic = ez.create(new_keys, locals())

        # Original fits record file
        orig_table = halos

        # Write own fits file
        keys = [
            'HaloID', 'RA', 'DEC', 'Z', 'RVIR', 'RA_AVG', 'DEC_AVG', 'Z_AVG'
        ]
        dic = ez.create(keys, locals())
        fits_table(dic, keys, data_loc + '/avg_centers.fits', clobber=True)

        # Append new columns
        fits_append(orig_table,
                    new_dic,
                    new_keys,
                    filename=data_loc + '/' + newfilename,
                    clobber=clobber)
        print '-' * 40
        print ''
示例#10
0
文件: c4_richness.py 项目: nkern/C4
# Import Modules
import numpy as np
import pylab as mp
import astropy.io.fits as fits
import astropy.cosmology as cosmo
from causticpy import Caustic
import astrostats as astats
from mpl_toolkits.mplot3d.axes3d import Axes3D
import DictEZ as ez
import cPickle as pkl
from sklearn import linear_model
from c4_pairfind import pair_find

# Set Constants
C = Caustic()
H0 = 72.0
c = 2.99792e5
Cosmo = cosmo.LambdaCDM(H0, 0.3, 0.7)
keys = ['C', 'H0', 'c', 'Cosmo']
varib = ez.create(keys, locals())
root = '/nfs/christoq_ls/nkern'


# Calculate Richnesses
class RICHNESS(object):
    def __init__(self, varib):
        self.__dict__.update(varib)

    def richness_est(self,
                     ra,
                     dec,