Beispiel #1
0
def write_m200(params,newfilename,clobber=True):
	''' load individual caustic mass estimates from individual/ and add to halos.fits '''
	# Add M200 estimates to halos.fits
	print '...loading in files from '+str(params['data_loc'])+'/'+str(params['write_loc'])
	CAUM200 = []
	CAUHVD = []
	CAUM200_EST = []
	CAUR200_EST = []
	CAUM500_EST = []
	CAUR500_EST = []
	for i in range(params['halo_num']):
		sys.stdout.write("Progress... "+str(i)+" out of "+str(params['halo_num'])+"\r")
		sys.stdout.flush()
		try:
			f = open(data_loc+'/'+write_loc+'/Ensemble_'+str(i)+'_Data.pkl','rb')
			input = pkl.Unpickler(f)
			data = input.load()
			CAUM200.append(float(data['ens_caumass']))
			CAUHVD.append(float(data['ens_hvd']))
			CAUM200_EST.append(float(data['ens_caumass_est']))
			CAUR200_EST.append(float(data['ens_r200_est']))
			CAUM500_EST.append(float(data['ens_caumass500_est']))
			CAUR500_EST.append(float(data['ens_r500_est']))
		except:
			CAUM200.append(0)
			CAUHVD.append(0)
			CAUM200_EST.append(0)
			CAUR200_EST.append(0)
			CAUM500_EST.append(0)
			CAUR500_EST.append(0)

	CAUM200 = np.array(CAUM200)
	CAUHVD = np.array(CAUHVD)
	CAUM200_EST = np.array(CAUM200_EST)
	CAUR200_EST = np.array(CAUR200_EST)
	CAUM500_EST = np.array(CAUM500_EST)
	CAUR500_EST = np.array(CAUR500_EST)


	# Define Catastrophic Failures as M200 < 1e12
	CATA_FAIL = np.array([False]*len(params['halo_num']))
	CATA_FAIL[np.where((CAUM200<1e12)|(np.isnan(CAUM200)==True))] = True

	print '...finished load'
	## Write Data Out
	if write_data == True:
		print '...writing out halos.fits file'

		# Dictionary of new columns
		new_keys = ['CAUM200','CAUHVD','CAUM200_EST','CAUR200_EST','CAUM500_EST','CAUR500_EST','CATA_FAIL']
		new_dic = ez.create(new_keys,locals())

		# Original fits record file
		orig_table = halos

		# Append new columns
		fits_append(orig_table,new_dic,new_keys,filename=data_loc+'/'+newfilename,clobber=clobber)
		print '-'*40
		print ''
	def bootstrap_load_write(self,rep_nums,cell_nums,data_stem='binstack/bootstrap1/rep',where_to_write='binstack/bootstrap1/',write_data=True):
		''' 
		Performs a R.recover() on bootstrap run tables over all repetitions, then writes out data
		'''
		# create dictionary for all data
		data = {}
	
		# iterate through cell_nums and reps
		for i in cell_nums:
			MFRAC,VFRAC,CAUMASS,HVD,BINM200,BINHVD,MBIAS,MSCAT,VBIAS,VSCAT = [],[],[],[],[],[],[],[],[],[]
			BINM200_STD,BINHVD_STD = [],[]
			for j in rep_nums:
				d = R.recover("bo_m0_run"+str(i),ss=False,mm=False,go_global=False,data_loc=data_stem+str(j))
				MFRAC.append(d['ENS_MFRAC'].ravel())
				VFRAC.append(d['ENS_VFRAC'].ravel())
				CAUMASS.append(d['ENS_CAUMASS'].ravel())
				HVD.append(d['ENS_HVD'].ravel())
				BINM200.append(d['BINM200'].ravel())	
				BINHVD.append(d['BINHVD'].ravel())
				MBIAS.append(d['ens_mbias'])
				MSCAT.append(d['ens_mscat'])
				VBIAS.append(d['ens_vbias'])
				VSCAT.append(d['ens_vscat'])

				# Find std of BINM200 and BINHVD
				BINM200_STD.append(map(np.std,zip(*[iter(d['M_crit200'][:d['halo_num']])]*d['line_num'])))
				BINHVD_STD.append(map(np.std,zip(*[iter(d['HVD'][:d['halo_num']])]*d['line_num'])))

			MFRAC = np.array(MFRAC)
			VFRAC = np.array(VFRAC)
			CAUMASS = np.array(CAUMASS)
			HVD = np.array(HVD)
			BINM200 = np.array(BINM200)
			BINHVD = np.array(BINHVD)
			MBIAS = np.array(MBIAS)
			MSCAT = np.array(MSCAT)
			VBIAS = np.array(VBIAS)
			VSCAT = np.array(VSCAT)
			BINM200_STD = np.array(BINM200_STD)
			BINHVD_STD = np.array(BINHVD_STD)		
	
			keys = ['MFRAC','VFRAC','CAUMASS','HVD','BINM200','BINHVD','MBIAS','MSCAT','VBIAS','VSCAT','BINM200_STD','BINHVD_STD']
			cell_data = ez.create(keys,locals())

			for name in cell_data.keys():
				new_name = name+'_cell'+str(i)
				cell_data[new_name] = cell_data.pop(name)

			data.update(cell_data)		

		if write_data == True:
			file = open(where_to_write+'bootstrap_errors.pkl','wb')
			output = pkl.Pickler(file)
			output.dump(data)

		return data
Beispiel #3
0
gmax			= None							# Limit to global params

N_samples		= 6000							# Number of samples to draw from multi-Gaussian or to use in training
eval_samples	= np.arange(0,6000)				# Total number of samples in dataset, train + cv (excluding fiducial)
N_train			= 6000							# Samples to train on
N_cv			= 0								# Samples to cross validate on

## Organize Parameters
params          = ['sigma8','hlittle','OMbh2','OMch2','ns',
					'Tvir','zeta','Rmfp',
					'fX','aX','numin']         					     		# Cosmological and Astrophysical parameters
params_fid      = [sigma8,hlittle,OMbh2,OMch2,ns,
					Tvir,zeta,Rmfp,fX,aX,numin]								# Fiducial values
params_prefix   = map(lambda x: x +"_",params)          					# Strings to create directories etc.
p_latex         = ['$\sigma_{8}$','$h$','$\Omega_{b}h^{2}$',
					'$\Omega_{c}h^{2}$','$n_{s}$','$T^{min}_{vir}\ (\mathrm{K})$','$\zeta$',
					'$R_{mfp}\ (\mathrm{Mpc})$','$f_{X}$','$\\alpha_{X}$','$\\nu_{min}\ (\mathrm{eV})$']		# params list but in LaTeX
p_fid_latex		= ['$\sigma_{8}^{fid}$','$h^{fid}$','$\Omega_{b}h^{2}^{fid}$',
                    '$\Omega_{c}h^{2}^{fid}$','$n_{s}^{fid}$','$T_{vir}^{fid}$','$\zeta^{fid}$',
                    '$R_{mfp}^{fid}$','$f_{X}^{fid}$','$\\alpha_{X}^{fid}$','$\\nu_{min}^{fid}$']     # params list but in LaTeX

variables       = ['z_start','z_end','z_step','zlow','zprime','randomseed','boxlen',
					'dim','HIIdim','computeRmfp','numcores','ram','use_Ts']         # Other variables to include in parameter files
variables		= DictEZ.create(variables,globals())

base_direc      = 'param_space/lhsfs_hera331/'                        # directory that opens up to 21cmFAST realizations

sim_root        = '/Users/nkern/Software/21cmFAST_v1'     # Where Home 21cmFAST directory lives
direc_root		= '/Users/nkern/EoR/cosmo_eor_heat/mcmc'	# Where this directory lives
command			= 'make;./drive_logZscroll_Ts'
Beispiel #4
0
## Calculate Cluster Richnesses
cluster_rich = False
if cluster_rich == True:

        root = '/nfs/christoq_ls/nkern'
        data_loc = 'MassRich/TRUTH_CAUSTIC'
        write_loc = 'individual'

        C4 = CFOUR({'H0':70,'chris_data_root':'/nfs/christoq_ls/MILLENNIUM/Henriques/TRUTH_CAUSTIC'})
	C = Caustic()
	H0 = 72.0
	c = 2.99792e5
	Cosmo = cosmo.LambdaCDM(H0,0.3,0.7)
	keys = ['C','H0','c','Cosmo','C4']
	varib = ez.create(keys,locals())
	R = RICHNESS(varib)


        # Load Halos
        halos = fits.open(root+'/C4/'+data_loc+'/halos.fits')[1].data
        HaloID = halos['orig_order']
        RA = halos['ra_avg']
        DEC = halos['dec_avg']
        Z = halos['z_avg']
        RVIR = halos['RVIR']

	# Load Galaxy Data
	gals = fits.open('/nfs/christoq_ls/MILLENNIUM/Henriques/TRUTH_CAUSTIC/m19.1_allgals_wsdss_specerrs_abs.fits')[1].data

	# Derive Gal Cut Out Parameters
Beispiel #5
0
	def caustic_stack(self,Rdata,Vdata,HaloID,HaloData,stack_num,
				ens_shiftgap=True,edge_int_remove=True,gal_reduce=True,stack_raw=False,est_v_center=False,
				feed_mags=True,G_Mags=None,R_Mags=None,I_Mags=None,clus_z=0):
		"""
		-- Takes an array of individual phase spaces and stacks them, then runs 
		   a caustic technique over the ensemble and/or individual phase spaces.
		-- Returns a dictionary

		-- Relies on a few parameters to be defined outside of the function:
			self.gal_num - Equivalent to Ngal: number of galaxies to take per cluster to then be stacked
			self.line_num - Equivalent to Nclus: number of clusters to stack into one ensemble cluster
			self.scale_data - Scale r data by R200 while stacking, then un-scale by BinR200
			self.run_los - run caustic technique over individual cluster (aka line-of-sight)
			self.avg_meth - method by which averaging of Bin Properties should be, 'mean' or 'median' or 'biweight' etc..
			self.mirror - mirror phase space before solving for caustic?
			Including others... see RunningTheCode.pdf for a list
			* These parameters should be fed to initialization of Stack() class as a dictionary, for ex:
				variables = {'run_los':False, ...... }
				S = Stack(variables)

		"rdata" - should be a 2 dimensional array with individual phase spaces as rows
		"vdata" - should be a 2 dimensional array with individual phase spaces as rows
				ex. rdata[0] => 0th phase space data
		'HaloID' : 1 dimensional array containing Halo Identification Numbers, len(HaloID) == len(rdata)
		'HaloData' : 2 dimensional array containing M200, R200, HVD, Z of Halos, with unique halos as columns
		'stack_num' : number of individual clusters to stack into the one ensemble

		'ens_shiftgap' - do a shiftgapper over final ensemble phase space?
		'gal_reduce' - before caustic run, reduce ensemble phase space to Ngal gals within R200?
		'stack_raw' - don't worry about limiting or building the phase space, just stack the Rdata and Vdata together as is
		'est_v_center' - take median of Vdata for new velocity center

		'feed_gal_mags' - feed magnitudes for individual galaxies, must feed all three mags if True

		-- 'ens' stands for ensemble cluster
		-- 'ind' stands for individual cluster

		-- Uppercase arrays contain data for multiple clusters,
			lowercase arrays contain data for 1 cluster
		"""

		# Define a container for holding stacked data, D
		D = Data()

		# Assign some parameters to Class scope
		self.__dict__.update(ez.create(['stack_raw','feed_mags','gal_reduce','ens_shiftgap','edge_int_remove'],locals()))

		# New Velocity Center
		if est_v_center == True:
			v_offset = astats.biweight_location(vdata[np.where(rdata < 1.0)])
			vdata -= v_offset

		# Unpack HaloData
		if HaloData == None:
			self.fed_halo_data = False
			# Estimate R200
			R200 = []
			HVD = []
			Z = []
			for i in range(stack_num):
				R200.append(np.exp(-1.86)*len(np.where((R_Mags[i] < -19.55) & (Rdata[i] < 1.0) & (np.abs(Vdata[i]) < 3500))[0])**0.51)
				HVD.append(astats.biweight_midvariance(Vdata[i][np.where((Rdata[i] < 1.0)&(np.abs(Vdata[i])<4000))]))
			R200 = np.array(R200)
			HVD = np.array(HVD)
			Z = np.array([0.05]*len(R200))
			if self.avg_meth == 'mean': BinR200 = np.mean(R200); BinHVD = np.mean(HVD); BinZ = np.mean(Z)
			elif self.avg_meth == 'median': BinR200 = np.median(R200); BinHVD = np.median(HVD); BinZ = np.median(Z)
			D.add({'BinR200':BinR200,'BinHVD':BinHVD,'BinZ':BinZ,'R200':R200,'HVD':HVD,'Z':Z})

		else:
			self.fed_halo_data = True
			M200,R200,HVD,Z = HaloData
			if self.avg_meth == 'mean':
				BinM200 = np.mean(M200)
				BinR200 = np.mean(R200)
				BinHVD = np.mean(HVD)
				BinZ = np.mean(Z)
			elif self.avg_meth == 'median':
				BinM200 = np.median(M200)
				BinR200 = np.median(R200)
				BinHVD = np.median(HVD)
				BinZ = np.median(Z)
			# Append to Data
			D.add({'BinM200':BinM200,'BinR200':BinR200,'BinHVD':BinHVD,'BinZ':BinZ})	

		# Create Dummy Variables for Magnitudes if necessary
		if self.feed_mags == False:
			G_Mags,R_Mags,I_Mags = [],[],[]
			for i in range(stack_num):
				G_Mags.append([None]*len(Rdata[i]))
				R_Mags.append([None]*len(Rdata[i]))
				I_Mags.append([None]*len(Rdata[i]))
			G_Mags,R_Mags,I_Mags = np.array(G_Mags),np.array(R_Mags),np.array(I_Mags)

		# Create galaxy identification arrays
		ENS_gal_id,ENS_clus_id,IND_gal_id = [],[],[]
		gal_count = 0
		for i in range(stack_num):
			ENS_gal_id.append(np.arange(gal_count,gal_count+len(Rdata[i])))
			ENS_clus_id.append(np.array([HaloID[i]]*len(Rdata[i]),int))
			IND_gal_id.append(np.arange(len(Rdata[i])))
		ENS_gal_id,ENS_clus_id,ENS_gal_id = np.array(ENS_gal_id),np.array(ENS_clus_id),np.array(IND_gal_id)

		# Iterate through phase spaces
		for self.l in range(stack_num):

			# Limit Phase Space
			if self.stack_raw == False:
				r,v,ens_gal_id,ens_clus_id,ind_gal_id,gmags,rmags,imags,samp_size = self.U.limit_gals(Rdata[self.l],Vdata[self.l],ENS_gal_id[self.l],ENS_clus_id[self.l],IND_gal_id[self.l],G_Mags[self.l],R_Mags[self.l],I_Mags[self.l],R200[self.l])

			# Build Ensemble and LOS Phase Spaces
			if self.stack_raw == False:
				ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags,ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = self.U.build(r,v,ens_gal_id,ens_clus_id,ind_gal_id,gmags,rmags,imags,R200[self.l])

			# If Scale data before stack is desired
			if self.scale_data == True:
				ens_r /= R200[self.l]

			# Stack Ensemble Data by extending to Data() container
			names = ['ens_r','ens_v','ens_gmags','ens_rmags','ens_imags','ens_gal_id','ens_clus_id']
			D.extend(ez.create(names,locals()))

			if self.run_los == True:
		
				# Create Data Block
				ind_data = np.vstack([ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags])

				# Sort by Rmag 
				bright = np.argsort(ind_rmags)
				ind_data = ind_data.T[bright].T
				ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = ind_data
	
				# Reduce phase space
				if self.stack_raw == False and self.gal_reduce == True:
					within = np.where(ind_r <= R200[self.l])[0]
					end = within[:self.gal_num + 1][-1]
					ind_data = ind_data.T[:end].T
					ind_r,ind_v,ind_gal_id,ind_gmags,ind_rmags,ind_imags = ind_data
	
				# Calculate individual HVD
				# Pick out gals within r200
				within = np.where(ind_r <= R200[self.l])[0]
				gal_count = len(within)
				if gal_count <= 3:
					'''biweightScale can't take less than 4 elements'''
					# Calculate hvd with numpy std of galaxies within r200 (b/c this is quoted richness)
					ind_hvd = np.std(np.copy(ind_v)[within])
				else:
					# Calculate hvd with astStats biweightScale (see Beers 1990)
					try:
						ind_hvd = astats.biweight_midvariance(np.copy(ind_v)[within])
					# Sometimes divide by zero error in biweight function for low gal_num
					except ZeroDivisionError:
						print 'ZeroDivisionError in biweightfunction'
						print 'ind_v[within]=',ind_v[within]
						ind_hvd = np.std(np.copy(ind_v)[within])

				# If run_los == True, run Caustic Technique on individual cluster
				self.U.print_separation('# Running Caustic for LOS '+str(self.l),type=2)
				self.run_caustic(ind_r,ind_v,R200[self.l],ind_hvd,clus_z=Z[self.l],mirror=self.mirror)
				ind_caumass = np.array([self.C.M200_fbeta])
				ind_caumass_est = np.array([self.C.Mass2.M200_est])
				ind_edgemass = np.array([self.C.M200_edge])
				ind_edgemass_est = np.array([self.C.MassE.M200_est])
				ind_causurf = np.array(self.C.caustic_profile)
				ind_nfwsurf = np.array(self.C.caustic_fit)
				ind_edgesurf = np.array(self.C.caustic_edge)

			# Append Individual Cluster Data
			names = ['ind_r','ind_v','ind_gal_id','ind_gmags','ind_rmags','ind_imags',
				'ind_hvd','ind_caumass','ind_caumass_est','ind_edgemass','ind_edgemass_est',
				'ind_causurf','ind_nfwsurf']
			D.append(ez.create(names,locals()))


		# Turn Ensemble into Arrays
		names = ['ens_r','ens_v','ens_gal_id','ens_clus_id','ens_gmags','ens_rmags','ens_imags','ens_caumass','ens_caumass_est','ens_edgemass','ens_edgemass_est','ens_causurf','ens_nfwsurf','ens_edgesurf']
		D.to_array(names,ravel=True)

		# Re-scale data if scale_data == True:
		if self.scale_data == True:
			D.ens_r *= BinR200

		# Create Ensemble Data Block
		D.ens_data = np.vstack([D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags])

		# Shiftgapper for Interloper Treatment
		if self.stack_raw == False and self.ens_shiftgap == True:
			self.D = D
			try:
				D.ens_data = self.C.shiftgapper(D.ens_data.T).T
				D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data
			except UnboundLocalError:	# A couple or no galaxies within RVIR
				print '-'*40
				print 'UnboundLocalError raised on Ensemble Shiftgapper'
				print '-'*40

		D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Sort by R_Mag
		bright = np.argsort(D.ens_rmags)
		D.ens_data = D.ens_data.T[bright].T
		D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Reduce System Down to gal_num richness within BinR200
		if self.stack_raw and self.gal_reduce == True:
			within = np.where(D.ens_r <= BinR200)[0]
			print 'len(within)',len(within)
			print 'gal_num,line_num',self.gal_num,self.line_num
			end = within[:self.gal_num*self.line_num + 1][-1]
			D.ens_data = D.ens_data.T[:end].T
			D.ens_r,D.ens_v,D.ens_gal_id,D.ens_clus_id,D.ens_gmags,D.ens_rmags,D.ens_imags = D.ens_data

		# Calculate Ensemble Velocity Dispersion for galaxies within R200
		ens_hvd = astats.biweight_midvariance(np.copy(D.ens_v)[np.where(D.ens_r<=BinR200)])

		# Run Caustic Technique!
		try: self.U.print_separation('# Running Caustic on Ensemble '+str(self.j),type=2)
		except: pass
		try:
			self.run_caustic(D.ens_r,D.ens_v,BinR200,ens_hvd,clus_z=BinZ,mirror=self.mirror,shiftgap=self.ens_shiftgap,edge_int_remove=self.edge_int_remove)
			ens_caumass = np.array([self.C.M200_fbeta])
			ens_caumass_est = np.array([self.C.Mass2.M200_est])
			ens_edgemass = np.array([self.C.M200_edge])
			ens_edgemass_est = np.array([self.C.MassE.M200_est])
			ens_causurf = np.array(self.C.caustic_profile)
			ens_nfwsurf = np.array(self.C.caustic_fit)
			ens_edgesurf = np.array(self.C.caustic_edge)
			ens_caumass500_est = np.array(self.C.Mass2.M500_est)
			ens_edgemass500_est = np.array(self.C.MassE.M500_est)
			ens_r200_est = np.array(self.C.r200_est_fbeta)
			ens_r500_est = np.array(self.C.r500_est_fbeta)
			ens_r200_est_edge = np.array(self.C.r200_est_edge)

		except:
			print ''
			print '-'*45
			print 'CAUSTIC TECHNIQUE FAILED ON THIS CLUSTER'
			print '-'*45
			print ''
			ens_caumass = np.array([0])
			ens_caumass_est = np.array([0])
			ens_edgemass = np.array([0])
			ens_edgemass_est = np.array([0])
			ens_causurf = np.array([0]*len(self.C.x_range))
			ens_nfwsurf = np.array([0]*len(self.C.x_range))
			ens_edgesurf = np.array([0]*len(self.C.x_range))
			ens_caumass500_est = np.array([0])
			ens_edgemass500_est = np.array([0])
			ens_r200_est = np.array([0])
			ens_r500_est = np.array([0])
			ens_r200_est_edge = np.array([0])

		# Other Arrays
		x_range = self.C.x_range

		# Append Data
		names = ['ens_caumass','ens_hvd','ens_caumass_est','ens_edgemass','ens_edgemass_est','ens_causurf','ens_nfwsurf','ens_edgesurf','x_range','ens_caumass500_est','ens_edgemass500_est','ens_r200_est','ens_r500_est','ens_r200_est_edge']
		D.add(ez.create(names,locals()))

		# Turn Individual Data into Arrays
		if self.run_los == True:
			names = ['ind_caumass','ind_caumass_est','ind_edgemass','ind_edgemass_est','ind_hvd']
			D.to_array(names,ravel=True)
			names = ['ind_r','ind_v','ind_gal_id','ind_gmags','ind_rmags','ind_imags','ind_causurf','ind_nfwsurf','ind_edgesurf']
			D.to_array(names)	

		# Output Data
		return D.__dict__
	def recover(self,data_loc=None,write_loc=None,raw_data=False,ss=True,mm=False,go_global=True,ens_only=True,cent_offset=None):

		"""
		This function uploads the pickle files from directory stack_data and configures them into multi dimensional arrays.
		It is meant to work with the self-stacked ensembles.
		go_global = True makes variables uploaded to global dictionary, False makes it returned to a dictionary
		write_loc = place where data lives
		raw_data : if True, output just mass estimates and caustic surfaces, no statistical calculations
		"""

		## Load Data ##
		# Attach certain variables to class
		self.ss = ss
		self.mm = mm

		# Create Open Container for Data
		D = Data()

		# Create list of variables you want to load in and stack into large array
		# Not all names in list need to be in stack_data, it will take those that exist
		load_names = [
		'ens_r','ens_v','ens_gmags','ens_rmags','ens_imags','ens_caumass','ens_caumass_est','ens_edgemass','ens_edgemass_est','ens_hvd',
		'ens_causurf','ens_edgesurf','ens_nfwsurf','ens_gal_id','ens_clus_id',
		'los_r','los_v','los_gmags','los_rmags','los_imags','los_caumass','los_caumass_est','los_edgemass','los_edgemass_est','los_hvd',
		'los_causurf','los_edgesurf','los_nfwsurf','los_gal_id',
		'pro_pos','bootstrap_select','ens_r200_est',
		'BinM200','BinR200','BinHVD'
		]

		# Open First Data file and load in varib dictionary
		pkl_file = open(root+'/OSDCStacking/'+data_loc+'/'+write_loc+'/Ensemble_'+str(0)+'_Data.pkl','rb')
		input = pkl.Unpickler(pkl_file)
		stack_data 	= input.load()
		varib		= input.load()

		# Add varib and first Ensemble and other single arrays to Data
		D.__dict__.update(varib)
		D.append(stack_data,keys=load_names)
		D.add({'x_range':stack_data['x_range']})

	        # Add varib to Classes  
		self.__dict__.update(varib)
		self.U = Universal(varib)
		self.M = Millennium(varib)

		# Define halo_range: number of ensembles to load
		if self.ss:	self.halo_range = range(varib['halo_num'])
		else:		self.halo_range = range(varib['halo_num']/varib['line_num'])

                ## Get Halo Data
		# Try and get halos from data_loc dictionary
		try:
			file = open(root+'/OSDCStacking/'+data_loc+'/halo_arrays.pkl','rb')
			input = pkl.Unpickler(file)
			data = input.load()
			D.add(data,keys=['HaloData', 'DEC', 'RA', 'HaloID','richness'])
			D.M_crit200,D.R_crit200,D.Z,D.HVD,D.HPX,D.HPY,D.HPZ,D.HVX,D.HVY,D.HVZ = D.HaloData
		except IOError:			
                	# Load and Sort Halos by Mass
                	D.HaloID,D.HaloData = self.M.load_halos()
                	D.HaloID,D.HaloData = self.M.sort_halos(D.HaloID,D.HaloData)
                	D.HaloID,D.M_crit200,D.R_crit200,D.Z,D.HVD,D.HPX,D.HPY,D.HPZ,D.HVX,D.HVY,D.HVZ = np.vstack((D.HaloID,D.HaloData))
                	D.HaloID = np.array(D.HaloID,int)

                # Build Halo_P, Halo_V
                Halo_P = np.vstack([D.HPX,D.HPY,D.HPZ])
                Halo_V = np.vstack([D.HVX,D.HVY,D.HVZ])

		# Loop over ensembles
		j = 2
		for i in self.halo_range[1:]:
			# Progress Bar
			sys.stdout.write("Progress... "+str(j)+" out of "+str(len(self.halo_range))+"\r")
			sys.stdout.flush()
			j += 1
			pkl_file = open(root+'/OSDCStacking/'+data_loc+'/'+write_loc+'/Ensemble_'+str(i)+'_Data.pkl','rb')
			input = pkl.Unpickler(pkl_file)
			stack_data = input.load()
		
			# Append variables to data container D	
			D.append(stack_data,keys=load_names)

		print ''

		# Convert to arrays
		D.to_array(load_names)
		D.upper(names=load_names)

		# Return Data
		if raw_data == True:
			# Return either dictionary of dump to globals()
			if go_global == True:
				globals().update(D.__dict__)
				return
			elif go_global == False:
				return  D.__dict__

		################################
		### Statistical Calculations ###
		################################
		if self.ss:	# If self stacking is True
			D.M200 = D.M_crit200[0:self.halo_num]
			D.HVD200 = D.HVD[0:self.halo_num]	
			ENS_MFRAC,ens_mbias,ens_mscat,ENS_VFRAC,ens_vbias,ens_vscat = self.stat_calc(D.ENS_CAUMASS.ravel(),D.M200,D.ENS_HVD.ravel(),D.HVD200)
			if ens_only == True:
				LOS_MFRAC,los_mbias,los_mscat,LOS_VFRAC,los_vbias,los_vscat = None,None,None,None,None,None
			else:
				LOS_MFRAC,los_mbias,los_mscat,LOS_VFRAC,los_vbias,los_vscat = self.stat_calc(D.LOS_CAUMASS.ravel(),D.M_crit200[0:self.halo_num],D.LOS_HVD.ravel(),D.HVD[0:self.halo_num],ens=False)
		else:		# If self stacking is False
			if mm == True:
				ENS_MFRAC,ens_mbias,ens_mscat,ENS_VFRAC,ens_vbias,ens_vscat = self.stat_calc(D.ENS_EDGEMASS,D.BINM200,D.ENS_HVD,D.BINHVD,data_set=None)#'cut_low_mass')
			else:	
				ENS_MFRAC,ens_mbias,ens_mscat,ENS_VFRAC,ens_vbias,ens_vscat = self.stat_calc(D.ENS_EDGEMASS,D.BINM200,D.ENS_HVD,D.BINHVD)	
			if ens_only == True:
				LOS_MFRAC,los_mbias,los_mscat,LOS_VFRAC,los_vbias,los_vscat = None,None,None,None,None,None
			else:
				LOS_MFRAC,los_mbias,los_mscat,LOS_VFRAC,los_vbias,los_vscat = self.stat_calc(D.LOS_CAUMASS.ravel(),D.M_crit200[0:self.halo_num],D.LOS_HVD.ravel(),D.HVD[0:self.halo_num],ens=False)
	
		if cent_offset != None:
			OFF_MFRAC,off_ens_mbias,off_ens_mscat,OFF_ENS_VFRAC,off_ens_vbias,off_ens_vscat = self.stat_calc(D.OFF_ENS_CAUMASS,D.BIN_M200,D.OFF_ENS_HVD,D.BIN_HVD,data_set=None)


		# Create a dictionary
		names = ['ens_mbias','ens_mscat','los_mbias','los_mscat','ens_vbias','ens_vscat','los_vbias','los_vscat','ENS_MFRAC','ENS_VFRAC','LOS_MFRAC','LOS_VFRAC']
		mydict = ez.create(names,locals())
		D.add(mydict)

		# Return either a dictionary or dump to globals()
		if go_global == True:
			globals().update(D.__dict__)
			return
		elif go_global == False:
			return  D.__dict__
	iter_array	= np.arange(1,50)
	tab_shape	= (7,7)
	write_stem	= 'bs_m0_run'
	write_loc	= 'bs_m0_run5'
	data_loc	= 'binstack/bs_run_table'+str(table_num)
	ss		= False
	mm		= False
	cent_offset	= None

	kwargs = {'write_loc':write_loc,'raw_data':False,'ss':ss,'mm':mm,'go_global':False,'ens_only':True,'data_loc':data_loc,'cent_offset':cent_offset}

	data = W.load_all(kwargs=kwargs,write_stem=write_stem,iter_array=iter_array,tab_shape=tab_shape)

	names = ['ENS_MBIAS','ENS_MSCAT','ENS_VBIAS','ENS_VSCAT','LOS_MBIAS','LOS_MSCAT','LOS_VBIAS','LOS_VSCAT','RUN_NUM','GAL_NUM','LINE_NUM','RICH_NUM','OFF_ENS_MBIAS','OFF_ENS_MSCAT','OFF_ENS_VBIAS','OFF_ENS_VSCAT']

	dictionary = ez.create(names,data)

	file = open(root+'/OSDCStacking/'+data_loc+'/table_analysis.pkl','wb')

	output = pkl.Pickler(file)

	output.dump(dictionary)

	file.close()






        'ss': ss,
        'mm': mm,
        'go_global': False,
        'ens_only': True,
        'data_loc': data_loc,
        'cent_offset': cent_offset
    }

    data = W.load_all(kwargs=kwargs,
                      write_stem=write_stem,
                      iter_array=iter_array,
                      tab_shape=tab_shape)

    names = [
        'ENS_MBIAS', 'ENS_MSCAT', 'ENS_VBIAS', 'ENS_VSCAT', 'LOS_MBIAS',
        'LOS_MSCAT', 'LOS_VBIAS', 'LOS_VSCAT', 'RUN_NUM', 'GAL_NUM',
        'LINE_NUM', 'RICH_NUM', 'OFF_ENS_MBIAS', 'OFF_ENS_MSCAT',
        'OFF_ENS_VBIAS', 'OFF_ENS_VSCAT'
    ]

    dictionary = ez.create(names, data)

    file = open(root + '/OSDCStacking/' + data_loc + '/table_analysis.pkl',
                'wb')

    output = pkl.Pickler(file)

    output.dump(dictionary)

    file.close()
Beispiel #9
0
def cluster_rich(data_loc,halo_file,chris_data_root,chris_data_file,newfilename,write_data=True,clobber=True):
	""" Calculate a Cluster's richness via Miller N200 and Kern N200
		data_loc : e.g. MassRich/TRUTH_CAUSTIC
		halo_file : e.g. halos.fits
		chris_data_root : e.g. /nfs/christoq_ls/C4/sdssdr12
		chris_data_file : e.g. DR12_GalaxyPhotoData_wabs_wedges.fits or m19.1_allgals_wsdss_specerrs_abs.fits
 """

	# Run through Kern richness estimator, mainly to get cluster pairs

        root = '/nfs/christoq_ls/nkern'

        C4 = CFOUR({'H0':70,'chris_data_root':chris_data_root})
	C = Caustic()
	H0 = 70.0
	c = 2.99792e5
	Cosmo = cosmo.LambdaCDM(H0,0.3,0.7)
	keys = ['C','H0','c','Cosmo','C4']
	varib = ez.create(keys,locals())
	R = RICHNESS(varib)

        # Load Halos
        halos = fits.open(data_loc+'/'+halo_file)[1].data
        HaloID = halos['orig_order']
        RA = halos['ra_avg']
        DEC = halos['dec_avg']
        Z = halos['z_avg']
        RVIR = halos['RVIR']

	# Load Galaxy Data
	gals = fits.open(chris_data_root+'/'+chris_data_file)[1].data
	# Change gals keys according to SDSSDR12 galaxy file
	if data_loc[-4:] != 'DR12':
		gals = dict(map(lambda x: (x,gals[x]),gals.names))
		gals['objid'] = gals.pop('GAL_HALOID')
		gals['ra'] = gals.pop('GAL_RA')
		gals['dec'] = gals.pop('GAL_DEC')
		gals['z'] = gals.pop('GAL_Z_APP')
		gals['u_mag'] = gals.pop('GAL_SDSS_U')
		gals['g_mag'] = gals.pop('GAL_SDSS_G')
		gals['r_mag'] = gals.pop('GAL_SDSS_R')
		gals['i_mag'] = gals.pop('GAL_SDSS_I')
		gals['z_mag'] = gals.pop('GAL_SDSS_Z')
		gals['r_absmag'] = gals.pop('R_ABSMAG')

	# Derive Gal Cut Out Parameters
	arcs = np.array(Cosmo.arcsec_per_kpc_proper(Z))*15000. / 3600.    # 15 Mpc in degrees

	# Kern richness arrays
	kern_N200 = []
	HVD = []
	pair_avg = []
	Nspec = []
	kern_obs_tot = []
	kern_obs_back = []		# obs_back is number of non-member galaxies in central aperture around cluster (aka. already scaled to inner aperture)

	# Miller Richness Arrays
	new = fits.open(chris_data_root+'/richness_cr200_bcg/new.fits')[0].data
	newb = fits.open(chris_data_root+'/richness_cr200_bcg/newb.fits')[0].data
	newb *= 0.2
	miller_N200 = []
	miller_obs_tot = []
	miller_obs_back = []
	colfac = 9
	bakfac = 1
	mag3 = 4
	v = 2
	radfac = 1

	# Loop over clusters
	print ''
	print '-'*40
	print '...calculating cluster richnesses'
	for i in range(len(HaloID)):

		if i % 100 == 0: print '...working on cluster '+str(i)+' out of '+str(len(HaloID))
		# Define Cluster parameters
		clus_ra = RA[i]
		clus_dec = DEC[i]
		clus_z = Z[i]
		clus_rvir = RVIR[i]
		haloid = HaloID[i]

		if np.isnan(clus_ra) == True or np.isnan(clus_dec) == True or np.isnan(clus_z) == True:
			richness.append(0)
			HVD.append(0)
			pair_avg.append(False)
			Nspec.append(0)
			continue

		# 15 Mpc in degrees of declination and degrees of RA
		d_dec = arcs[i]
		d_ra = d_dec/np.cos(clus_dec*np.pi/180)
		d_z = 0.04

		# Cut Out Galaxy Data Around Cluster
		cut = np.where( (np.abs(gals['ra']-clus_ra)<d_ra) & (np.abs(gals['dec']-clus_dec)<d_dec) & (np.abs(gals['z']-clus_z) < d_z))[0]
		gal_ra = gals['ra'][cut];gal_dec = gals['dec'][cut];gal_z=gals['z'][cut];gal_gmags=gals['g_mag'][cut];gal_rmags=gals['r_mag'][cut];gal_imags=gals['i_mag'][cut];gal_absr=gals['r_absmag'][cut]

		# Run Kern Richness Estimator
		rich = R.richness_est(gal_ra,gal_dec,gal_z,np.zeros(len(gal_z)),gal_gmags,gal_rmags,gal_imags,gal_absr,haloid,clus_ra,clus_dec,clus_z,clus_rvir=clus_rvir,spec_list=None,use_specs=False,use_bcg=False,fit_rs=False,fixed_vdisp=False,fixed_aperture=False,plot_sky=False,plot_gr=False,plot_phase=False,find_pairs=True)

		kern_N200.append(rich)
		HVD.append(R.vel_disp)
		pair_avg.append(R.pair)
		Nspec.append(R.Nspec)
		kern_obs_tot.append(R.obs_tot)
		kern_obs_back.append(R.obs_back_scaled)

		# Append Miller Richness Values
		k = halos['orig_order'][i]
		miller_N200.append(new[k,colfac,mag3,v,radfac] - newb[k,bakfac,mag3,v,radfac])
		miller_obs_tot.append(new[k,colfac,mag3,v,radfac])
		miller_obs_back.append(newb[k,bakfac,mag3,v,radfac])


	kern_N200 = np.array(kern_N200)
	HVD = np.array(HVD)
	pair_avg = np.array(pair_avg)
	Nspec = np.array(Nspec)
	kern_obs_tot = np.array(kern_obs_tot)
	kern_obs_back = np.array(kern_obs_back)

	miller_N200 = np.array(miller_N200)
	miller_obs_tot = np.array(miller_obs_tot)
	miller_obs_back = np.array(miller_obs_back)

	print '...finished calculating richnesses'
	## Write Data Out
	if write_data == True:
		print '...writing out halos.fits file'

		# Dictionary of new columns
		new_keys = ['kern_N200','HVD','pair_avg','Nspec','kern_obs_tot','kern_obs_back','miller_N200','miller_obs_tot','miller_obs_back']
		new_dic = ez.create(new_keys,locals())

		# Original fits record file
		orig_table = halos

		# Write out own fits file
		keys = ['HaloID','RVIR'] + new_keys
		dic = ez.create(keys,locals())
		fits_table(dic,keys,data_loc+'/richnesses.fits',clobber=True)

		# Append new columns
		fits_append(orig_table,new_dic,new_keys,filename=data_loc+'/'+newfilename,clobber=clobber)
		print '-'*40
		print ''
Beispiel #10
0
def clus_avg(data_loc,halo_file,chris_data_root,newfilename,write_data=True,clobber=True):

	C4 = CFOUR({'H0':70,'chris_data_root':chris_data_root})
	C = Caustic()

	# Load Halos
        halos = fits.open(data_loc+'/'+halo_file)[1].data
        HaloID = halos['orig_order']
        RA = halos['ra_bcg']
        DEC = halos['dec_bcg']
        Z = halos['z_biwt']
        RVIR = halos['RVIR']
        SINGLE = halos['single']
        SUB = halos['sub']
        NC4 = halos['nc4']

	RA_AVG,DEC_AVG,Z_AVG = [],[],[]

	# Loop Over Halos
	print ''
	print '-'*40
	print '...running average cluster center code'
	for i in range(len(halos)):
		if i % 100 == 0: print '...working on cluster '+str(i)+' out of '+str(len(halos))
		try:
			# Assign Halo Properties
			clus_ra = RA[i]
			clus_dec = DEC[i]
			clus_z = Z[i]

			# Load Galaxies
			galdata = C4.load_chris_gals(HaloID[i])
			gal_ra,gal_dec,gal_z,gal_gmags,gal_rmags,gal_imags = galdata

			# Take Iterative Average, four times
			# vlim = 1500, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1500,1.5,C)
			# vlim = 1000, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1000,1.5,C)
			# vlim = 1500, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1000,1.5,C)
			# vlim = 2000, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,2000,1.5,C)

		except:
			print i
			clus_ra,clus_dec,clus_z = 0, 0, 0

		RA_AVG.append(clus_ra)
		DEC_AVG.append(clus_dec)
		Z_AVG.append(clus_z)

	RA_AVG,DEC_AVG,Z_AVG = np.array(RA_AVG),np.array(DEC_AVG),np.array(Z_AVG)

	print '...finished average cluster-center calculations'

	## Write Data Out
	if write_data == True:
		print '...writing out cluster catalgoue with average centers included'
		# Dictionary of new columns
		new_keys = ['RA_AVG','DEC_AVG','Z_AVG']
		new_dic = ez.create(new_keys,locals())

		# Original fits record file
		orig_table = halos

		# Write own fits file
		keys = ['HaloID','RA','DEC','Z','RVIR','RA_AVG','DEC_AVG','Z_AVG']
		dic = ez.create(keys,locals())
		fits_table(dic,keys,data_loc+'/avg_centers.fits',clobber=True)

		# Append new columns
		fits_append(orig_table,new_dic,new_keys,filename=data_loc+'/'+newfilename,clobber=clobber)
		print '-'*40
		print ''
Beispiel #11
0
def write_m200(params, newfilename, clobber=True):
    ''' load individual caustic mass estimates from individual/ and add to halos.fits '''
    # Add M200 estimates to halos.fits
    print '...loading in files from ' + str(params['data_loc']) + '/' + str(
        params['write_loc'])
    CAUM200 = []
    CAUHVD = []
    CAUM200_EST = []
    CAUR200_EST = []
    CAUM500_EST = []
    CAUR500_EST = []
    for i in range(params['halo_num']):
        sys.stdout.write("Progress... " + str(i) + " out of " +
                         str(params['halo_num']) + "\r")
        sys.stdout.flush()
        try:
            f = open(
                data_loc + '/' + write_loc + '/Ensemble_' + str(i) +
                '_Data.pkl', 'rb')
            input = pkl.Unpickler(f)
            data = input.load()
            CAUM200.append(float(data['ens_caumass']))
            CAUHVD.append(float(data['ens_hvd']))
            CAUM200_EST.append(float(data['ens_caumass_est']))
            CAUR200_EST.append(float(data['ens_r200_est']))
            CAUM500_EST.append(float(data['ens_caumass500_est']))
            CAUR500_EST.append(float(data['ens_r500_est']))
        except:
            CAUM200.append(0)
            CAUHVD.append(0)
            CAUM200_EST.append(0)
            CAUR200_EST.append(0)
            CAUM500_EST.append(0)
            CAUR500_EST.append(0)

    CAUM200 = np.array(CAUM200)
    CAUHVD = np.array(CAUHVD)
    CAUM200_EST = np.array(CAUM200_EST)
    CAUR200_EST = np.array(CAUR200_EST)
    CAUM500_EST = np.array(CAUM500_EST)
    CAUR500_EST = np.array(CAUR500_EST)

    # Define Catastrophic Failures as M200 < 1e12
    CATA_FAIL = np.array([False] * len(params['halo_num']))
    CATA_FAIL[np.where((CAUM200 < 1e12) | (np.isnan(CAUM200) == True))] = True

    print '...finished load'
    ## Write Data Out
    if write_data == True:
        print '...writing out halos.fits file'

        # Dictionary of new columns
        new_keys = [
            'CAUM200', 'CAUHVD', 'CAUM200_EST', 'CAUR200_EST', 'CAUM500_EST',
            'CAUR500_EST', 'CATA_FAIL'
        ]
        new_dic = ez.create(new_keys, locals())

        # Original fits record file
        orig_table = halos

        # Append new columns
        fits_append(orig_table,
                    new_dic,
                    new_keys,
                    filename=data_loc + '/' + newfilename,
                    clobber=clobber)
        print '-' * 40
        print ''
Beispiel #12
0
def cluster_rich(data_loc,
                 halo_file,
                 chris_data_root,
                 chris_data_file,
                 newfilename,
                 write_data=True,
                 clobber=True):
    """ Calculate a Cluster's richness via Miller N200 and Kern N200
		data_loc : e.g. MassRich/TRUTH_CAUSTIC
		halo_file : e.g. halos.fits
		chris_data_root : e.g. /nfs/christoq_ls/C4/sdssdr12
		chris_data_file : e.g. DR12_GalaxyPhotoData_wabs_wedges.fits or m19.1_allgals_wsdss_specerrs_abs.fits
 """

    # Run through Kern richness estimator, mainly to get cluster pairs

    root = '/nfs/christoq_ls/nkern'

    C4 = CFOUR({'H0': 70, 'chris_data_root': chris_data_root})
    C = Caustic()
    H0 = 70.0
    c = 2.99792e5
    Cosmo = cosmo.LambdaCDM(H0, 0.3, 0.7)
    keys = ['C', 'H0', 'c', 'Cosmo', 'C4']
    varib = ez.create(keys, locals())
    R = RICHNESS(varib)

    # Load Halos
    halos = fits.open(data_loc + '/' + halo_file)[1].data
    HaloID = halos['orig_order']
    RA = halos['ra_avg']
    DEC = halos['dec_avg']
    Z = halos['z_avg']
    RVIR = halos['RVIR']

    # Load Galaxy Data
    gals = fits.open(chris_data_root + '/' + chris_data_file)[1].data
    # Change gals keys according to SDSSDR12 galaxy file
    if data_loc[-4:] != 'DR12':
        gals = dict(map(lambda x: (x, gals[x]), gals.names))
        gals['objid'] = gals.pop('GAL_HALOID')
        gals['ra'] = gals.pop('GAL_RA')
        gals['dec'] = gals.pop('GAL_DEC')
        gals['z'] = gals.pop('GAL_Z_APP')
        gals['u_mag'] = gals.pop('GAL_SDSS_U')
        gals['g_mag'] = gals.pop('GAL_SDSS_G')
        gals['r_mag'] = gals.pop('GAL_SDSS_R')
        gals['i_mag'] = gals.pop('GAL_SDSS_I')
        gals['z_mag'] = gals.pop('GAL_SDSS_Z')
        gals['r_absmag'] = gals.pop('R_ABSMAG')

    # Derive Gal Cut Out Parameters
    arcs = np.array(
        Cosmo.arcsec_per_kpc_proper(Z)) * 15000. / 3600.  # 15 Mpc in degrees

    # Kern richness arrays
    kern_N200 = []
    HVD = []
    pair_avg = []
    Nspec = []
    kern_obs_tot = []
    kern_obs_back = [
    ]  # obs_back is number of non-member galaxies in central aperture around cluster (aka. already scaled to inner aperture)

    # Miller Richness Arrays
    new = fits.open(chris_data_root + '/richness_cr200_bcg/new.fits')[0].data
    newb = fits.open(chris_data_root + '/richness_cr200_bcg/newb.fits')[0].data
    newb *= 0.2
    miller_N200 = []
    miller_obs_tot = []
    miller_obs_back = []
    colfac = 9
    bakfac = 1
    mag3 = 4
    v = 2
    radfac = 1

    # Loop over clusters
    print ''
    print '-' * 40
    print '...calculating cluster richnesses'
    for i in range(len(HaloID)):

        if i % 100 == 0:
            print '...working on cluster ' + str(i) + ' out of ' + str(
                len(HaloID))
        # Define Cluster parameters
        clus_ra = RA[i]
        clus_dec = DEC[i]
        clus_z = Z[i]
        clus_rvir = RVIR[i]
        haloid = HaloID[i]

        if np.isnan(clus_ra) == True or np.isnan(clus_dec) == True or np.isnan(
                clus_z) == True:
            richness.append(0)
            HVD.append(0)
            pair_avg.append(False)
            Nspec.append(0)
            continue

        # 15 Mpc in degrees of declination and degrees of RA
        d_dec = arcs[i]
        d_ra = d_dec / np.cos(clus_dec * np.pi / 180)
        d_z = 0.04

        # Cut Out Galaxy Data Around Cluster
        cut = np.where((np.abs(gals['ra'] - clus_ra) < d_ra)
                       & (np.abs(gals['dec'] - clus_dec) < d_dec)
                       & (np.abs(gals['z'] - clus_z) < d_z))[0]
        gal_ra = gals['ra'][cut]
        gal_dec = gals['dec'][cut]
        gal_z = gals['z'][cut]
        gal_gmags = gals['g_mag'][cut]
        gal_rmags = gals['r_mag'][cut]
        gal_imags = gals['i_mag'][cut]
        gal_absr = gals['r_absmag'][cut]

        # Run Kern Richness Estimator
        rich = R.richness_est(gal_ra,
                              gal_dec,
                              gal_z,
                              np.zeros(len(gal_z)),
                              gal_gmags,
                              gal_rmags,
                              gal_imags,
                              gal_absr,
                              haloid,
                              clus_ra,
                              clus_dec,
                              clus_z,
                              clus_rvir=clus_rvir,
                              spec_list=None,
                              use_specs=False,
                              use_bcg=False,
                              fit_rs=False,
                              fixed_vdisp=False,
                              fixed_aperture=False,
                              plot_sky=False,
                              plot_gr=False,
                              plot_phase=False,
                              find_pairs=True)

        kern_N200.append(rich)
        HVD.append(R.vel_disp)
        pair_avg.append(R.pair)
        Nspec.append(R.Nspec)
        kern_obs_tot.append(R.obs_tot)
        kern_obs_back.append(R.obs_back_scaled)

        # Append Miller Richness Values
        k = halos['orig_order'][i]
        miller_N200.append(new[k, colfac, mag3, v, radfac] -
                           newb[k, bakfac, mag3, v, radfac])
        miller_obs_tot.append(new[k, colfac, mag3, v, radfac])
        miller_obs_back.append(newb[k, bakfac, mag3, v, radfac])

    kern_N200 = np.array(kern_N200)
    HVD = np.array(HVD)
    pair_avg = np.array(pair_avg)
    Nspec = np.array(Nspec)
    kern_obs_tot = np.array(kern_obs_tot)
    kern_obs_back = np.array(kern_obs_back)

    miller_N200 = np.array(miller_N200)
    miller_obs_tot = np.array(miller_obs_tot)
    miller_obs_back = np.array(miller_obs_back)

    print '...finished calculating richnesses'
    ## Write Data Out
    if write_data == True:
        print '...writing out halos.fits file'

        # Dictionary of new columns
        new_keys = [
            'kern_N200', 'HVD', 'pair_avg', 'Nspec', 'kern_obs_tot',
            'kern_obs_back', 'miller_N200', 'miller_obs_tot', 'miller_obs_back'
        ]
        new_dic = ez.create(new_keys, locals())

        # Original fits record file
        orig_table = halos

        # Write out own fits file
        keys = ['HaloID', 'RVIR'] + new_keys
        dic = ez.create(keys, locals())
        fits_table(dic, keys, data_loc + '/richnesses.fits', clobber=True)

        # Append new columns
        fits_append(orig_table,
                    new_dic,
                    new_keys,
                    filename=data_loc + '/' + newfilename,
                    clobber=clobber)
        print '-' * 40
        print ''
Beispiel #13
0
def clus_avg(data_loc,
             halo_file,
             chris_data_root,
             newfilename,
             write_data=True,
             clobber=True):

    C4 = CFOUR({'H0': 70, 'chris_data_root': chris_data_root})
    C = Caustic()

    # Load Halos
    halos = fits.open(data_loc + '/' + halo_file)[1].data
    HaloID = halos['orig_order']
    RA = halos['ra_bcg']
    DEC = halos['dec_bcg']
    Z = halos['z_biwt']
    RVIR = halos['RVIR']
    SINGLE = halos['single']
    SUB = halos['sub']
    NC4 = halos['nc4']

    RA_AVG, DEC_AVG, Z_AVG = [], [], []

    # Loop Over Halos
    print ''
    print '-' * 40
    print '...running average cluster center code'
    for i in range(len(halos)):
        if i % 100 == 0:
            print '...working on cluster ' + str(i) + ' out of ' + str(
                len(halos))
        try:
            # Assign Halo Properties
            clus_ra = RA[i]
            clus_dec = DEC[i]
            clus_z = Z[i]

            # Load Galaxies
            galdata = C4.load_chris_gals(HaloID[i])
            gal_ra, gal_dec, gal_z, gal_gmags, gal_rmags, gal_imags = galdata

            # Take Iterative Average, four times
            # vlim = 1500, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1500,
                                                 1.5, C)
            # vlim = 1000, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1000,
                                                 1.5, C)
            # vlim = 1500, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 1000,
                                                 1.5, C)
            # vlim = 2000, rlim = 1.5
            clus_ra, clus_dec, clus_z = proj_avg(clus_ra, clus_dec, clus_z,
                                                 gal_ra, gal_dec, gal_z, 2000,
                                                 1.5, C)

        except:
            print i
            clus_ra, clus_dec, clus_z = 0, 0, 0

        RA_AVG.append(clus_ra)
        DEC_AVG.append(clus_dec)
        Z_AVG.append(clus_z)

    RA_AVG, DEC_AVG, Z_AVG = np.array(RA_AVG), np.array(DEC_AVG), np.array(
        Z_AVG)

    print '...finished average cluster-center calculations'

    ## Write Data Out
    if write_data == True:
        print '...writing out cluster catalgoue with average centers included'
        # Dictionary of new columns
        new_keys = ['RA_AVG', 'DEC_AVG', 'Z_AVG']
        new_dic = ez.create(new_keys, locals())

        # Original fits record file
        orig_table = halos

        # Write own fits file
        keys = [
            'HaloID', 'RA', 'DEC', 'Z', 'RVIR', 'RA_AVG', 'DEC_AVG', 'Z_AVG'
        ]
        dic = ez.create(keys, locals())
        fits_table(dic, keys, data_loc + '/avg_centers.fits', clobber=True)

        # Append new columns
        fits_append(orig_table,
                    new_dic,
                    new_keys,
                    filename=data_loc + '/' + newfilename,
                    clobber=clobber)
        print '-' * 40
        print ''
    def bootstrap_load_write(self,
                             rep_nums,
                             cell_nums,
                             data_stem='binstack/bootstrap1/rep',
                             where_to_write='binstack/bootstrap1/',
                             write_data=True):
        ''' 
		Performs a R.recover() on bootstrap run tables over all repetitions, then writes out data
		'''
        # create dictionary for all data
        data = {}

        # iterate through cell_nums and reps
        for i in cell_nums:
            MFRAC,VFRAC,CAUMASS,HVD,BINM200,BINHVD,MBIAS,MSCAT,VBIAS,VSCAT = [],[],[],[],[],[],[],[],[],[]
            BINM200_STD, BINHVD_STD = [], []
            for j in rep_nums:
                d = R.recover("bo_m0_run" + str(i),
                              ss=False,
                              mm=False,
                              go_global=False,
                              data_loc=data_stem + str(j))
                MFRAC.append(d['ENS_MFRAC'].ravel())
                VFRAC.append(d['ENS_VFRAC'].ravel())
                CAUMASS.append(d['ENS_CAUMASS'].ravel())
                HVD.append(d['ENS_HVD'].ravel())
                BINM200.append(d['BINM200'].ravel())
                BINHVD.append(d['BINHVD'].ravel())
                MBIAS.append(d['ens_mbias'])
                MSCAT.append(d['ens_mscat'])
                VBIAS.append(d['ens_vbias'])
                VSCAT.append(d['ens_vscat'])

                # Find std of BINM200 and BINHVD
                BINM200_STD.append(
                    map(
                        np.std,
                        zip(*[iter(d['M_crit200'][:d['halo_num']])] *
                            d['line_num'])))
                BINHVD_STD.append(
                    map(np.std,
                        zip(*[iter(d['HVD'][:d['halo_num']])] *
                            d['line_num'])))

            MFRAC = np.array(MFRAC)
            VFRAC = np.array(VFRAC)
            CAUMASS = np.array(CAUMASS)
            HVD = np.array(HVD)
            BINM200 = np.array(BINM200)
            BINHVD = np.array(BINHVD)
            MBIAS = np.array(MBIAS)
            MSCAT = np.array(MSCAT)
            VBIAS = np.array(VBIAS)
            VSCAT = np.array(VSCAT)
            BINM200_STD = np.array(BINM200_STD)
            BINHVD_STD = np.array(BINHVD_STD)

            keys = [
                'MFRAC', 'VFRAC', 'CAUMASS', 'HVD', 'BINM200', 'BINHVD',
                'MBIAS', 'MSCAT', 'VBIAS', 'VSCAT', 'BINM200_STD', 'BINHVD_STD'
            ]
            cell_data = ez.create(keys, locals())

            for name in cell_data.keys():
                new_name = name + '_cell' + str(i)
                cell_data[new_name] = cell_data.pop(name)

            data.update(cell_data)

        if write_data == True:
            file = open(where_to_write + 'bootstrap_errors.pkl', 'wb')
            output = pkl.Pickler(file)
            output.dump(data)

        return data
    def recover(self,
                data_loc=None,
                write_loc=None,
                raw_data=False,
                ss=True,
                mm=False,
                go_global=True,
                ens_only=True,
                cent_offset=None):
        """
		This function uploads the pickle files from directory stack_data and configures them into multi dimensional arrays.
		It is meant to work with the self-stacked ensembles.
		go_global = True makes variables uploaded to global dictionary, False makes it returned to a dictionary
		write_loc = place where data lives
		raw_data : if True, output just mass estimates and caustic surfaces, no statistical calculations
		"""

        ## Load Data ##
        # Attach certain variables to class
        self.ss = ss
        self.mm = mm

        # Create Open Container for Data
        D = Data()

        # Create list of variables you want to load in and stack into large array
        # Not all names in list need to be in stack_data, it will take those that exist
        load_names = [
            'ens_r', 'ens_v', 'ens_gmags', 'ens_rmags', 'ens_imags',
            'ens_caumass', 'ens_caumass_est', 'ens_edgemass',
            'ens_edgemass_est', 'ens_hvd', 'ens_causurf', 'ens_edgesurf',
            'ens_nfwsurf', 'ens_gal_id', 'ens_clus_id', 'los_r', 'los_v',
            'los_gmags', 'los_rmags', 'los_imags', 'los_caumass',
            'los_caumass_est', 'los_edgemass', 'los_edgemass_est', 'los_hvd',
            'los_causurf', 'los_edgesurf', 'los_nfwsurf', 'los_gal_id',
            'pro_pos', 'bootstrap_select', 'ens_r200_est', 'BinM200',
            'BinR200', 'BinHVD'
        ]

        # Open First Data file and load in varib dictionary
        pkl_file = open(
            root + '/OSDCStacking/' + data_loc + '/' + write_loc +
            '/Ensemble_' + str(0) + '_Data.pkl', 'rb')
        input = pkl.Unpickler(pkl_file)
        stack_data = input.load()
        varib = input.load()

        # Add varib and first Ensemble and other single arrays to Data
        D.__dict__.update(varib)
        D.append(stack_data, keys=load_names)
        D.add({'x_range': stack_data['x_range']})

        # Add varib to Classes
        self.__dict__.update(varib)
        self.U = Universal(varib)
        self.M = Millennium(varib)

        # Define halo_range: number of ensembles to load
        if self.ss: self.halo_range = range(varib['halo_num'])
        else: self.halo_range = range(varib['halo_num'] / varib['line_num'])

        ## Get Halo Data
        # Try and get halos from data_loc dictionary
        try:
            file = open(
                root + '/OSDCStacking/' + data_loc + '/halo_arrays.pkl', 'rb')
            input = pkl.Unpickler(file)
            data = input.load()
            D.add(data, keys=['HaloData', 'DEC', 'RA', 'HaloID', 'richness'])
            D.M_crit200, D.R_crit200, D.Z, D.HVD, D.HPX, D.HPY, D.HPZ, D.HVX, D.HVY, D.HVZ = D.HaloData
        except IOError:
            # Load and Sort Halos by Mass
            D.HaloID, D.HaloData = self.M.load_halos()
            D.HaloID, D.HaloData = self.M.sort_halos(D.HaloID, D.HaloData)
            D.HaloID, D.M_crit200, D.R_crit200, D.Z, D.HVD, D.HPX, D.HPY, D.HPZ, D.HVX, D.HVY, D.HVZ = np.vstack(
                (D.HaloID, D.HaloData))
            D.HaloID = np.array(D.HaloID, int)

# Build Halo_P, Halo_V
        Halo_P = np.vstack([D.HPX, D.HPY, D.HPZ])
        Halo_V = np.vstack([D.HVX, D.HVY, D.HVZ])

        # Loop over ensembles
        j = 2
        for i in self.halo_range[1:]:
            # Progress Bar
            sys.stdout.write("Progress... " + str(j) + " out of " +
                             str(len(self.halo_range)) + "\r")
            sys.stdout.flush()
            j += 1
            pkl_file = open(
                root + '/OSDCStacking/' + data_loc + '/' + write_loc +
                '/Ensemble_' + str(i) + '_Data.pkl', 'rb')
            input = pkl.Unpickler(pkl_file)
            stack_data = input.load()

            # Append variables to data container D
            D.append(stack_data, keys=load_names)

        print ''

        # Convert to arrays
        D.to_array(load_names)
        D.upper(names=load_names)

        # Return Data
        if raw_data == True:
            # Return either dictionary of dump to globals()
            if go_global == True:
                globals().update(D.__dict__)
                return
            elif go_global == False:
                return D.__dict__

        ################################
        ### Statistical Calculations ###
        ################################
        if self.ss:  # If self stacking is True
            D.M200 = D.M_crit200[0:self.halo_num]
            D.HVD200 = D.HVD[0:self.halo_num]
            ENS_MFRAC, ens_mbias, ens_mscat, ENS_VFRAC, ens_vbias, ens_vscat = self.stat_calc(
                D.ENS_CAUMASS.ravel(), D.M200, D.ENS_HVD.ravel(), D.HVD200)
            if ens_only == True:
                LOS_MFRAC, los_mbias, los_mscat, LOS_VFRAC, los_vbias, los_vscat = None, None, None, None, None, None
            else:
                LOS_MFRAC, los_mbias, los_mscat, LOS_VFRAC, los_vbias, los_vscat = self.stat_calc(
                    D.LOS_CAUMASS.ravel(),
                    D.M_crit200[0:self.halo_num],
                    D.LOS_HVD.ravel(),
                    D.HVD[0:self.halo_num],
                    ens=False)
        else:  # If self stacking is False
            if mm == True:
                ENS_MFRAC, ens_mbias, ens_mscat, ENS_VFRAC, ens_vbias, ens_vscat = self.stat_calc(
                    D.ENS_EDGEMASS,
                    D.BINM200,
                    D.ENS_HVD,
                    D.BINHVD,
                    data_set=None)  #'cut_low_mass')
            else:
                ENS_MFRAC, ens_mbias, ens_mscat, ENS_VFRAC, ens_vbias, ens_vscat = self.stat_calc(
                    D.ENS_EDGEMASS, D.BINM200, D.ENS_HVD, D.BINHVD)
            if ens_only == True:
                LOS_MFRAC, los_mbias, los_mscat, LOS_VFRAC, los_vbias, los_vscat = None, None, None, None, None, None
            else:
                LOS_MFRAC, los_mbias, los_mscat, LOS_VFRAC, los_vbias, los_vscat = self.stat_calc(
                    D.LOS_CAUMASS.ravel(),
                    D.M_crit200[0:self.halo_num],
                    D.LOS_HVD.ravel(),
                    D.HVD[0:self.halo_num],
                    ens=False)

        if cent_offset != None:
            OFF_MFRAC, off_ens_mbias, off_ens_mscat, OFF_ENS_VFRAC, off_ens_vbias, off_ens_vscat = self.stat_calc(
                D.OFF_ENS_CAUMASS,
                D.BIN_M200,
                D.OFF_ENS_HVD,
                D.BIN_HVD,
                data_set=None)

        # Create a dictionary
        names = [
            'ens_mbias', 'ens_mscat', 'los_mbias', 'los_mscat', 'ens_vbias',
            'ens_vscat', 'los_vbias', 'los_vscat', 'ENS_MFRAC', 'ENS_VFRAC',
            'LOS_MFRAC', 'LOS_VFRAC'
        ]
        mydict = ez.create(names, locals())
        D.add(mydict)

        # Return either a dictionary or dump to globals()
        if go_global == True:
            globals().update(D.__dict__)
            return
        elif go_global == False:
            return D.__dict__
Beispiel #16
0
import astropy.cosmology as cosmo
from causticpy import Caustic
import astropy.stats as astats
from mpl_toolkits.mplot3d.axes3d import Axes3D
import DictEZ as ez
import cPickle as pkl
from sklearn import linear_model
from c4_pairfind import pair_find

# Set Constants
C = Caustic()
H0 = 72.0
c = 2.99792e5
Cosmo = cosmo.LambdaCDM(H0,0.3,0.7)
keys = ['C','H0','c','Cosmo']
varib = ez.create(keys,locals())
root = '/nfs/christoq_ls/nkern'



# Calculate Richnesses
class Richness(object):

	def __init__(self,varib):
		self.__dict__.update(varib)

	def richness_est(self,ra,dec,z,pz,gmags,rmags,imags,abs_rmags,haloid,clus_ra,clus_dec,clus_z,clus_rvir=None,gr_slope=None,gr_width=None,gr_inter=None,shiftgap=True,use_specs=False,use_bcg=False,fit_rs=False,spec_list=None,fixed_aperture=False,fixed_vdisp=False,plot_gr=False,plot_sky=False,plot_phase=False,find_pairs=True):
		''' Richness estimator, magnitudes should be apparent mags except for abs_rmag
			z : spectroscopic redshift
			pz : photometric redshift
			gr_slope : if fed color-mag (g-r vs. r) slope of fit to red sequence
Beispiel #17
0
	def richness_est(self,ra,dec,z,pz,gmags,rmags,imags,abs_rmags,haloid,clus_ra,clus_dec,clus_z,clus_rvir=None,gr_slope=None,gr_width=None,gr_inter=None,shiftgap=True,use_specs=False,use_bcg=False,fit_rs=False,spec_list=None,fixed_aperture=False,fixed_vdisp=False,plot_gr=False,plot_sky=False,plot_phase=False,find_pairs=True):
		''' Richness estimator, magnitudes should be apparent mags except for abs_rmag
			z : spectroscopic redshift
			pz : photometric redshift
			gr_slope : if fed color-mag (g-r vs. r) slope of fit to red sequence
			gr_width : if fed color-mag (g-r vs. r) width of fit to red sequence
			gr_inter : if red color-mag (g-r vs. r) intercept of fit to red sequence
			spec_list : indexing array specifying gals w/ spectroscopic redshifts, preferably fed as boolean numpy array
			fixed_aperture : clus_rvir = 1 Mpc
			fixed_vdisp : clus_vdisp = 1000 km/s (members < clus_vdisp*2)
			use_specs : includes all spectro members in richness regardless of their color, subject to counting twice
			use_bcg : use bcg RS_color as cluster RS_color
			fit_rs : fit a line to the member galaxy RS relationship, as of now this does not work and we take a flat RS relationship
			plot_gr : plot r vs g-r color diagram with RS fits
			plot_sky : plot RA and DEC of gals with signal and background annuli
			plot_phase : plot rdata and vdata phase space
			find_pairs : run c4 cluster pair identifier on phase spaces
			- If at least one quarter of the outer annuli doesn't have any galaxies (perhaps because it has run over the observation edge),
				it will return -99 as a richness
		'''
		# Put Into Class Namespace
		keys = ['ra','dec','z','pz','gmags','rmags','imags','clus_ra','clus_dec','clus_z','clus_rvir','vel_disp','color_data','color_cut','RS_color','RS_sigma','clus_color_cut','signal','background','richness','mems','phot','spec','spec_mems','deg_per_rvir','SA_outer','SA_inner','outer_red_dense','inner_background','Nphot_inner','Nphot_outer','red_inner','red_outer','all','outer_edge','inner_edge','shift_cut','set','pair','Nspec','gr_slope','gr_inter','gr_width']
		self.__dict__.update(ez.create(keys,locals()))

		# Take rough virial radius measurement, this method is BAD...
		if clus_rvir == None:
			clus_rvir = np.exp(-1.86)*len(np.where((rmags < -19.55) & (rdata < 1.0) & (np.abs(vdata) < 3500))[0])**0.51
			self.clus_rvir = clus_rvir

		# Set clus_rvir = 1.0 Mpc if fixed aperture == True
		if fixed_aperture == True:
			clus_rvir = 1.0

		# Magnitue Cut at 19.1 Apparent R Mag, then at -19 Absolute R Mag and sort by them
		bright = np.where(rmags < 19.1)[0]
		data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
		data = data.T[bright].T
		ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data

                bright = np.where(abs_rmags < -19.5)[0]
                data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
                data = data.T[bright].T
                ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data

		sorts = np.argsort(abs_rmags)
                data = np.vstack([ra,dec,z,pz,gmags,rmags,imags,abs_rmags])
		data = data.T[sorts].T
                ra,dec,z,pz,gmags,rmags,imags,abs_rmags = data
		self.__dict__.update(ez.create(keys,locals()))

		# Separate Into Spectro Z and Photo Z DataSets
		# Define the Spectro Z catalogue
		if spec_list == None:
			spec_cut = np.abs(z) > 1e-4
		else:
			if type(spec_list) == list: spec_list = np.array(spec_list)
			if spec_list.dtype != 'bool':
				spec_cut = np.array([False]*len(ra))
				spec_cut[spec_list] = True
		all = {'ra':ra,'dec':dec,'z':z,'pz':pz,'gmags':gmags,'rmags':rmags,'imags':imags,'abs_rmags':abs_rmags}
		spec = {'ra':ra[spec_cut],'dec':dec[spec_cut],'z':z[spec_cut],'pz':pz[spec_cut],'gmags':gmags[spec_cut],'rmags':rmags[spec_cut],'imags':imags[spec_cut],'abs_rmags':abs_rmags[spec_cut]}
		phot = {'ra':ra[~spec_cut],'dec':dec[~spec_cut],'z':z[~spec_cut],'pz':pz[~spec_cut],'gmags':gmags[~spec_cut],'rmags':rmags[~spec_cut],'imags':imags[~spec_cut],'abs_rmags':abs_rmags[~spec_cut]}

		# Project Spectra into radius and velocity
		ang_d,lum_d = C.zdistance(clus_z,self.H0)
		angles = C.findangle(spec['ra'],spec['dec'],clus_ra,clus_dec)
		rdata = angles * ang_d
		vdata = self.c * (spec['z'] - clus_z) / (1 + clus_z)
                spec.update({'rdata':rdata,'vdata':vdata})
                self.__dict__.update(ez.create(keys,locals()))

		# Take Hard Phasespace Limits
		limit = np.where( (rdata < 5) & (np.abs(vdata) < 5000) )[0]
		clus_data = np.vstack([rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags']])
		clus_data = clus_data.T[limit].T
		rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags'] = clus_data
                spec.update({'rdata':rdata,'vdata':vdata})
		self.__dict__.update(ez.create(keys,locals()))

		# Shiftgapper for Interlopers
		if shiftgap == True:
			len_before = np.where((rdata < clus_rvir*1.5)&(np.abs(vdata)<4000))[0].size
			clus_data = np.vstack([rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags']])
			clus_data = C.shiftgapper(clus_data.T).T
			sorts = np.argsort(clus_data[-1])
			clus_data = clus_data.T[sorts].T
			rdata,vdata,spec['ra'],spec['dec'],spec['z'],spec['pz'],spec['gmags'],spec['rmags'],spec['imags'],spec['abs_rmags'] = clus_data
			shift_cut = len_before - np.where((rdata < clus_rvir)&(np.abs(vdata)<4000))[0].size

		# Measure Velocity Dispersion of all galaxies within 1 * r_vir and np.abs(vdata) < 4000 km/s
                spec.update({'rdata':rdata,'vdata':vdata})
                self.__dict__.update(ez.create(keys,locals()))
		vel_disp = astats.biweight_midvariance(vdata[np.where((rdata < clus_rvir*1)&(np.abs(vdata) < 4000))])
		if fixed_vdisp == True: vel_disp = 1000

		# Run Cluster Pair Finder
		if find_pairs == True:
			pair, d1_chi, d2_chi, d3_chi, s_chi, double1, double2, double3, single, v_range, bins = pair_find(rdata,vdata)

		# Calculate Nspec, Nspec = # of galaxies within RVIR
		try:
			Nspec = np.where(rdata<clus_rvir)[0].size
		except:
			Nspec = 0
                self.__dict__.update(ez.create(keys,locals()))

		# Get Members from Specta, get their red sequence color
		mems = np.where((spec['rdata'] < clus_rvir)&(np.abs(spec['vdata'])<2*vel_disp))[0]
		color_data = spec['gmags'] - spec['rmags']
		color_cut = np.where((color_data[mems] < 1.2) & (color_data[mems] > 0.65))[0]
		RS_color = astats.biweight_location(color_data[mems][color_cut])
		RS_shift = color_data[mems][color_cut] - RS_color
		RS_sigma = astats.biweight_midvariance(RS_shift[np.where(np.abs(RS_shift)<.15)])

		if fit_rs == True:
			clf = linear_model.LinearRegression()
			set = np.where(np.abs(color_data[mems]-RS_color)<3*RS_sigma)[0]
			clf.fit(spec['rmags'][mems][set].reshape(set.size,1),color_data[mems][set])

		if use_bcg == True:
			bright = np.argsort(all['abs_rmags'][mems])[0]
			RS_color = color_data[mems][bright]
	                RS_shift = color_data[mems][color_cut] - RS_color
	                RS_sigma = astats.biweight_midvariance(RS_shift[np.where(np.abs(RS_shift)<.15)])
			
		# spec_mems is # of members that are within 2 sigma of cluster color
		clus_color_cut = np.where(np.abs(color_data[mems] - RS_color) < RS_sigma*2)[0]
		spec_mems = len(clus_color_cut)
		self.__dict__.update(ez.create(keys,locals()))

		# If fed gr fit
		if gr_slope != None:
			def RS_color(r_mag): return r_mag*gr_slope + gr_inter
			RS_sigma = gr_width / 2
			clus_color_cut = np.where(np.abs(color_data[mems] - RS_color(spec['rmags'])[mems]) < RS_sigma*2)[0]
			spec_mems = len(clus_color_cut)
			self.__dict__.update(ez.create(keys,locals()))

		# Get Rdata from PhotoZ & SpecZ Data Set
		angles = C.findangle(all['ra'],all['dec'],clus_ra,clus_dec)
		all['rdata'] = angles * ang_d

		# Get deg per RVIR proper
		deg_per_rvir = R.Cosmo.arcsec_per_kpc_proper(clus_z).value * 1e3 * clus_rvir / 3600

		# Get Area of Virial Circle out to 1 rvir, and Area of outer annuli, which is annuli from 4rvir < R < 6rvir, or dependent on rvir
		if clus_rvir < 2.5:
			outer_edge = 6.0
			inner_edge = 4.0
		elif clus_rvir >= 2.5 and clus_rvir < 3:
			outer_edge = 5.0
			inner_edge = 3.5
		else:
			outer_edge = 3.0
			inner_edge = 2.0	
		SA_inner = np.pi*deg_per_rvir**2
		SA_outer = np.pi * ( (outer_edge*deg_per_rvir)**2 - (inner_edge*deg_per_rvir)**2 )

		# Get Number of Cluster Color Galaxies from Photo Data Set in Inner Circle and Outer Annuli
		RS_color_sig = 2.0
		if gr_slope == None:
			red_inner = np.where(((all['gmags']-all['rmags']) < RS_color + RS_color_sig*RS_sigma)&((all['gmags']-all['rmags']) > RS_color - RS_color_sig*RS_sigma)&(all['rdata']<clus_rvir))[0]
			red_outer = np.where(((all['gmags']-all['rmags']) < RS_color + 1.5*RS_sigma)&((all['gmags']-all['rmags']) > RS_color - 1.5*RS_sigma)&(all['rdata']<outer_edge*clus_rvir)&(all['rdata']>inner_edge*clus_rvir))[0]
		else:
			red_inner = np.where(((all['gmags']-all['rmags']) < RS_color(all['rmags']) + RS_color_sig*RS_sigma)&((all['gmags']-all['rmags']) > RS_color(all['rmags']) - RS_color_sig*RS_sigma)&(all['rdata']<clus_rvir))[0]
			red_outer = np.where(((all['gmags']-all['rmags']) < RS_color(all['rmags']) + 1.5*RS_sigma)&((all['gmags']-all['rmags']) > RS_color(all['rmags']) - 1.5*RS_sigma)&(all['rdata']<outer_edge*clus_rvir)&(all['rdata']>inner_edge*clus_rvir))[0]

		Nphot_inner = len(red_inner)
		Nphot_outer = len(red_outer)

		self.__dict__.update(ez.create(keys,locals()))

		# Get Solid Angle Density of Outer Red Galaxies
		outer_red_dense = Nphot_outer / SA_outer
		inner_background = int(np.ceil(outer_red_dense * SA_inner))

		# If inner_background is less than Nphot_inner, then Nphot_inner -= inner_background, otherwise Nphot_inner = 0
		if inner_background < Nphot_inner:
			Nphot_inner -= inner_background
		else:
			Nphot_inner = 0

		# Richness = spec_mems + Nphot_inner or just Nphot_inner
		if use_specs == True:
			richness = spec_mems + Nphot_inner
		else:
			richness = Nphot_inner
		self.__dict__.update(ez.create(keys,locals()))

                # Plot
                if plot_gr == True:
                        fig,ax = mp.subplots()
                        ax.plot(rmags,gmags-rmags,'ko',alpha=.8)
                        ax.plot(spec['rmags'][mems],color_data[mems],'co')
			if gr_slope == None:
                        	ax.axhline(RS_color,color='r')
                        	ax.axhline(RS_color+RS_sigma,color='b')
                        	ax.axhline(RS_color-RS_sigma,color='b')
			else:
				xdata = np.arange(spec['rmags'][mems].min(),spec['rmags'][mems].max(),.1)
				ax.plot(xdata,RS_color(xdata),color='r')
				ax.plot(xdata,RS_color(xdata)+RS_sigma,color='b')
				ax.plot(xdata,RS_color(xdata)-RS_sigma,color='b')
                        ax.set_xlim(13,19)
                        ax.set_ylim(0,1.3)
                        ax.set_xlabel('Apparent R Mag',fontsize=16)
                        ax.set_ylabel('App G Mag - App R Mag',fontsize=16)
                        ax.set_title('Color-Mag Diagram, Cluster '+str(haloid))
                        fig.savefig('colormag_'+str(haloid)+'.png',bbox_inches='tight')
                        mp.close(fig)

		if plot_sky == True:
			fig,ax = mp.subplots()
			ax.plot(all['ra'],all['dec'],'ko')
			ax.plot(all['ra'][red_inner],all['dec'][red_inner],'ro')
			ax.plot(all['ra'][red_outer],all['dec'][red_outer],'yo')
			ax.plot(clus_ra,clus_dec,'co',markersize=9)
			ax.set_xlabel('RA',fontsize=15)
			ax.set_ylabel('Dec.',fontsize=15)
			ax.set_title('Richness Annuli for Halo '+str(haloid))
			fig.savefig('skyplot_'+str(haloid)+'.png',bbox_inches='tight')
			mp.close(fig)

		if plot_phase == True:
			fig,ax = mp.subplots()
			ax.plot(spec['rdata'],spec['vdata'],'ko')
			ax.plot(spec['rdata'][mems],spec['vdata'][mems],'co')
			bcg = np.where(spec['abs_rmags']==spec['abs_rmags'][mems].min())[0][0]
			ax.plot(spec['rdata'][bcg],spec['vdata'][bcg],'ro')
			ax.set_xlim(0,5)
			ax.set_ylim(-5000,5000)
			ax.set_xlabel('Radius (Mpc)',fontsize=15)
			ax.set_ylabel('Velocity (km/s)',fontsize=15)
			ax.set_title('phasespace haloid '+str(haloid))
			fig.savefig('phasespace_'+str(haloid)+'.png',bbox_inches='tight')
			mp.close(fig)

                # Check to make sure galaxies exist (somewhat) uniformely in outer annuli (aka. cluster isn't on edge of observation strip)
                # First, project all galaxies into polar coordinates centered on cluster center
		x = (all['ra']-clus_ra)/np.cos(clus_dec*np.pi/180)		# x coordinate in arcsec (of dec) centered on cluster center
		y = (all['dec']-clus_dec)
                all['radius'] = np.sqrt( x**2 + y**2 ) / deg_per_rvir	#radius scaled by RVIR
                all['theta'] = np.arctan( y / x )
		# Add corrections to arctan function
		all['theta'][np.where( (x < 0) & (y > 0) )] += np.pi	# Quadrant II
		all['theta'][np.where( (x < 0) & (y < 0) )] += np.pi	# Quadrant III
		all['theta'][np.where( (x > 0) & (y < 0) )] += 2*np.pi	# Quadrant IV
                # Then break outer annuli into 4 sections and check if at least 1 galaxy exists in each section
                sizes1 = np.array([np.where((np.abs(all['theta']-i)<=np.pi/2)&(all['radius']>inner_edge)&(all['radius']<15))[0].size for i in np.linspace(0,2*np.pi,4)])
                # Do it again but shift theta by np.pi/8 this time
                sizes2 = np.array([np.where((np.abs(all['theta']-i)<=np.pi/2)&(all['radius']>inner_edge)&(all['radius']<15))[0].size for i in np.linspace(np.pi/8,17*np.pi/8,4)])
                if 0 in sizes1 or 0 in sizes2:
			mp.plot(all['radius'],all['theta'],'ko')
			mp.xlabel('radius')
			mp.ylabel('theta')
			mp.savefig('rth_'+str(haloid)+'.png')
			mp.close()
			print 'sizes1=',sizes1
			print 'sizes2=',sizes2
                        return -99

		return richness
Beispiel #18
0
    def richness_est(self,
                     ra,
                     dec,
                     z,
                     pz,
                     gmags,
                     rmags,
                     imags,
                     abs_rmags,
                     haloid,
                     clus_ra,
                     clus_dec,
                     clus_z,
                     clus_rvir=None,
                     gr_slope=None,
                     gr_width=None,
                     gr_inter=None,
                     shiftgap=True,
                     use_specs=False,
                     use_bcg=False,
                     fit_rs=False,
                     spec_list=None,
                     fixed_aperture=False,
                     fixed_vdisp=False,
                     plot_gr=False,
                     plot_sky=False,
                     plot_phase=False,
                     find_pairs=True):
        ''' Richness estimator, magnitudes should be apparent mags except for abs_rmag
			z : spectroscopic redshift
			pz : photometric redshift
			gr_slope : if fed color-mag (g-r vs. r) slope of fit to red sequence
			gr_width : if fed color-mag (g-r vs. r) width of fit to red sequence
			gr_inter : if red color-mag (g-r vs. r) intercept of fit to red sequence
			spec_list : indexing array specifying gals w/ spectroscopic redshifts, preferably fed as boolean numpy array
			fixed_aperture : clus_rvir = 1 Mpc
			fixed_vdisp : clus_vdisp = 1000 km/s (members < clus_vdisp*2)
			use_specs : includes all spectro members in richness regardless of their color, subject to counting twice
			use_bcg : use bcg RS_color as cluster RS_color
			fit_rs : fit a line to the member galaxy RS relationship, as of now this does not work and we take a flat RS relationship
			plot_gr : plot r vs g-r color diagram with RS fits
			plot_sky : plot RA and DEC of gals with signal and background annuli
			plot_phase : plot rdata and vdata phase space
			find_pairs : run c4 cluster pair identifier on phase spaces
			- If at least one quarter of the outer annuli doesn't have any galaxies (perhaps because it has run over the observation edge),
				it will return -99 as a richness
		'''
        # Put Into Class Namespace
        keys = [
            'ra', 'dec', 'z', 'pz', 'gmags', 'rmags', 'imags', 'clus_ra',
            'clus_dec', 'clus_z', 'clus_rvir', 'vel_disp', 'color_data',
            'color_cut', 'RS_color', 'RS_sigma', 'clus_color_cut', 'signal',
            'background', 'richness', 'mems', 'phot', 'spec', 'spec_mems',
            'deg_per_rvir', 'SA_outer', 'SA_inner', 'outer_red_dense',
            'inner_background', 'Nphot_inner', 'Nphot_outer', 'red_inner',
            'red_outer', 'all', 'outer_edge', 'inner_edge', 'shift_cut', 'set',
            'pair', 'Nspec', 'gr_slope', 'gr_inter', 'gr_width', 'obs_tot',
            'obs_back_scaled'
        ]
        self.__dict__.update(ez.create(keys, locals()))

        # Take rough virial radius measurement, this method is BAD...
        if clus_rvir == None:
            clus_rvir = np.exp(-1.86) * len(
                np.where((rmags < -19.55) & (rdata < 1.0)
                         & (np.abs(vdata) < 3500))[0])**0.51
            self.clus_rvir = clus_rvir

        # Set clus_rvir = 1.0 Mpc if fixed aperture == True
        if fixed_aperture == True:
            clus_rvir = 1.0

        # Magnitue Cut at 19.1 Apparent R Mag, then at -19 Absolute R Mag and sort by them
        bright = np.where(rmags < 19.1)[0]
        data = np.vstack([ra, dec, z, pz, gmags, rmags, imags, abs_rmags])
        data = data.T[bright].T
        ra, dec, z, pz, gmags, rmags, imags, abs_rmags = data

        bright = np.where(abs_rmags < -19.5)[0]
        data = np.vstack([ra, dec, z, pz, gmags, rmags, imags, abs_rmags])
        data = data.T[bright].T
        ra, dec, z, pz, gmags, rmags, imags, abs_rmags = data

        sorts = np.argsort(abs_rmags)
        data = np.vstack([ra, dec, z, pz, gmags, rmags, imags, abs_rmags])
        data = data.T[sorts].T
        ra, dec, z, pz, gmags, rmags, imags, abs_rmags = data
        self.__dict__.update(ez.create(keys, locals()))

        # Separate Into Spectro Z and Photo Z DataSets
        # Define the Spectro Z catalogue
        if spec_list == None:
            spec_cut = np.abs(z) > 1e-4
        else:
            if type(spec_list) == list: spec_list = np.array(spec_list)
            if spec_list.dtype != 'bool':
                spec_cut = np.array([False] * len(ra))
                spec_cut[spec_list] = True
        all = {
            'ra': ra,
            'dec': dec,
            'z': z,
            'pz': pz,
            'gmags': gmags,
            'rmags': rmags,
            'imags': imags,
            'abs_rmags': abs_rmags
        }
        spec = {
            'ra': ra[spec_cut],
            'dec': dec[spec_cut],
            'z': z[spec_cut],
            'pz': pz[spec_cut],
            'gmags': gmags[spec_cut],
            'rmags': rmags[spec_cut],
            'imags': imags[spec_cut],
            'abs_rmags': abs_rmags[spec_cut]
        }
        phot = {
            'ra': ra[~spec_cut],
            'dec': dec[~spec_cut],
            'z': z[~spec_cut],
            'pz': pz[~spec_cut],
            'gmags': gmags[~spec_cut],
            'rmags': rmags[~spec_cut],
            'imags': imags[~spec_cut],
            'abs_rmags': abs_rmags[~spec_cut]
        }

        # Project Spectra into radius and velocity
        ang_d, lum_d = C.zdistance(clus_z, self.H0)
        angles = C.findangle(spec['ra'], spec['dec'], clus_ra, clus_dec)
        rdata = angles * ang_d
        vdata = self.c * (spec['z'] - clus_z) / (1 + clus_z)
        spec.update({'rdata': rdata, 'vdata': vdata})
        self.__dict__.update(ez.create(keys, locals()))

        # Take Hard Phasespace Limits
        limit = np.where((rdata < 5) & (np.abs(vdata) < 5000))[0]
        clus_data = np.vstack([
            rdata, vdata, spec['ra'], spec['dec'], spec['z'], spec['pz'],
            spec['gmags'], spec['rmags'], spec['imags'], spec['abs_rmags']
        ])
        clus_data = clus_data.T[limit].T
        rdata, vdata, spec['ra'], spec['dec'], spec['z'], spec['pz'], spec[
            'gmags'], spec['rmags'], spec['imags'], spec[
                'abs_rmags'] = clus_data
        spec.update({'rdata': rdata, 'vdata': vdata})
        self.__dict__.update(ez.create(keys, locals()))

        # Shiftgapper for Interlopers
        if shiftgap == True:
            len_before = np.where((rdata < clus_rvir * 1.5)
                                  & (np.abs(vdata) < 4000))[0].size
            clus_data = np.vstack([
                rdata, vdata, spec['ra'], spec['dec'], spec['z'], spec['pz'],
                spec['gmags'], spec['rmags'], spec['imags'], spec['abs_rmags']
            ])
            clus_data = C.shiftgapper(clus_data.T).T
            sorts = np.argsort(clus_data[-1])
            clus_data = clus_data.T[sorts].T
            rdata, vdata, spec['ra'], spec['dec'], spec['z'], spec['pz'], spec[
                'gmags'], spec['rmags'], spec['imags'], spec[
                    'abs_rmags'] = clus_data
            shift_cut = len_before - np.where((rdata < clus_rvir)
                                              & (np.abs(vdata) < 4000))[0].size

        # Measure Velocity Dispersion of all galaxies within 1 * r_vir and np.abs(vdata) < 4000 km/s
        spec.update({'rdata': rdata, 'vdata': vdata})
        self.__dict__.update(ez.create(keys, locals()))
        vel_disp = astats.biweight_midvariance(
            vdata[np.where((rdata < clus_rvir * 1) & (np.abs(vdata) < 4000))])
        if fixed_vdisp == True: vel_disp = 1000

        # Run Cluster Pair Finder
        if find_pairs == True:
            pair, d1_chi, d2_chi, d3_chi, s_chi, double1, double2, double3, single, v_range, bins = pair_find(
                rdata, vdata)

        # Calculate Nspec, Nspec = # of galaxies within RVIR
        try:
            Nspec = np.where(rdata < clus_rvir)[0].size
        except:
            Nspec = 0
        self.__dict__.update(ez.create(keys, locals()))

        # Get Members from Specta, get their red sequence color
        mems = np.where((spec['rdata'] < clus_rvir)
                        & (np.abs(spec['vdata']) < 2 * vel_disp))[0]
        color_data = spec['gmags'] - spec['rmags']
        color_cut = np.where((color_data[mems] < 1.2)
                             & (color_data[mems] > 0.65))[0]
        RS_color = astats.biweight_location(color_data[mems][color_cut])
        RS_shift = color_data[mems][color_cut] - RS_color
        RS_sigma = astats.biweight_midvariance(
            RS_shift[np.where(np.abs(RS_shift) < .15)])

        if fit_rs == True:
            clf = linear_model.LinearRegression()
            set = np.where(
                np.abs(color_data[mems] - RS_color) < 3 * RS_sigma)[0]
            clf.fit(spec['rmags'][mems][set].reshape(set.size, 1),
                    color_data[mems][set])

        if use_bcg == True:
            bright = np.argsort(all['abs_rmags'][mems])[0]
            RS_color = color_data[mems][bright]
            RS_shift = color_data[mems][color_cut] - RS_color
            RS_sigma = astats.biweight_midvariance(
                RS_shift[np.where(np.abs(RS_shift) < .15)])

        # spec_mems is # of members that are within 2 sigma of cluster color
        clus_color_cut = np.where(
            np.abs(color_data[mems] - RS_color) < RS_sigma * 2)[0]
        spec_mems = len(clus_color_cut)
        self.__dict__.update(ez.create(keys, locals()))

        # If fed gr fit
        if gr_slope != None:

            def RS_color(r_mag):
                return r_mag * gr_slope + gr_inter

            RS_sigma = gr_width / 2
            clus_color_cut = np.where(
                np.abs(color_data[mems] -
                       RS_color(spec['rmags'])[mems]) < RS_sigma * 2)[0]
            spec_mems = len(clus_color_cut)
            self.__dict__.update(ez.create(keys, locals()))

        # Get Rdata from PhotoZ & SpecZ Data Set
        angles = C.findangle(all['ra'], all['dec'], clus_ra, clus_dec)
        all['rdata'] = angles * ang_d

        # Get deg per RVIR proper
        deg_per_rvir = R.Cosmo.arcsec_per_kpc_proper(
            clus_z).value * 1e3 * clus_rvir / 3600

        # Get Area of Virial Circle out to 1 rvir, and Area of outer annuli, which is annuli from 4rvir < R < 6rvir, or dependent on rvir
        if clus_rvir < 2.5:
            outer_edge = 6.0
            inner_edge = 4.0
        elif clus_rvir >= 2.5 and clus_rvir < 3:
            outer_edge = 5.0
            inner_edge = 3.5
        else:
            outer_edge = 3.0
            inner_edge = 2.0
        SA_inner = np.pi * deg_per_rvir**2
        SA_outer = np.pi * ((outer_edge * deg_per_rvir)**2 -
                            (inner_edge * deg_per_rvir)**2)

        # Get Number of Cluster Color Galaxies from Photo Data Set in Inner Circle and Outer Annuli
        RS_color_sig = 2.0
        if gr_slope == None:
            red_inner = np.where(((all['gmags'] - all['rmags']) < RS_color +
                                  RS_color_sig * RS_sigma)
                                 & ((all['gmags'] - all['rmags']) > RS_color -
                                    RS_color_sig * RS_sigma)
                                 & (all['rdata'] < clus_rvir))[0]
            red_outer = np.where((
                (all['gmags'] - all['rmags']) < RS_color + 1.5 * RS_sigma) & (
                    (all['gmags'] - all['rmags']) > RS_color - 1.5 * RS_sigma)
                                 & (all['rdata'] < outer_edge * clus_rvir)
                                 & (all['rdata'] > inner_edge * clus_rvir))[0]
        else:
            red_inner = np.where((
                (all['gmags'] - all['rmags']) < RS_color(all['rmags']) +
                RS_color_sig * RS_sigma) & (
                    (all['gmags'] - all['rmags']) > RS_color(all['rmags']) -
                    RS_color_sig * RS_sigma) & (all['rdata'] < clus_rvir))[0]
            red_outer = np.where((
                (all['gmags'] -
                 all['rmags']) < RS_color(all['rmags']) + 1.5 * RS_sigma) & (
                     (all['gmags'] - all['rmags']) > RS_color(all['rmags']) -
                     1.5 * RS_sigma) & (all['rdata'] < outer_edge * clus_rvir)
                                 & (all['rdata'] > inner_edge * clus_rvir))[0]

        Nphot_inner = len(red_inner)
        Nphot_outer = len(red_outer)

        self.__dict__.update(ez.create(keys, locals()))

        # Get Solid Angle Density of Outer Red Galaxies
        outer_red_dense = Nphot_outer / SA_outer
        inner_background = int(np.ceil(outer_red_dense * SA_inner))

        # Define obs_tot and obs_back_scaled, where obs_back_scaled is obs_back already scaled to inner aperture (aka. inner_background)
        obs_tot = np.copy(Nphot_inner)
        obs_back_scaled = np.copy(inner_background)

        # If inner_background is less than Nphot_inner, then Nphot_inner -= inner_background, otherwise Nphot_inner = 0
        if inner_background < Nphot_inner:
            Nphot_inner -= inner_background
        else:
            Nphot_inner = 0

        # Richness = spec_mems + Nphot_inner or just Nphot_inner
        if use_specs == True:
            richness = spec_mems + Nphot_inner
        else:
            richness = Nphot_inner
        self.__dict__.update(ez.create(keys, locals()))

        # Plot
        if plot_gr == True:
            fig, ax = mp.subplots()
            ax.plot(rmags, gmags - rmags, 'ko', alpha=.8)
            ax.plot(spec['rmags'][mems], color_data[mems], 'co')
            if gr_slope == None:
                ax.axhline(RS_color, color='r')
                ax.axhline(RS_color + RS_sigma, color='b')
                ax.axhline(RS_color - RS_sigma, color='b')
            else:
                xdata = np.arange(spec['rmags'][mems].min(),
                                  spec['rmags'][mems].max(), .1)
                ax.plot(xdata, RS_color(xdata), color='r')
                ax.plot(xdata, RS_color(xdata) + RS_sigma, color='b')
                ax.plot(xdata, RS_color(xdata) - RS_sigma, color='b')
            ax.set_xlim(13, 19)
            ax.set_ylim(0, 1.3)
            ax.set_xlabel('Apparent R Mag', fontsize=16)
            ax.set_ylabel('App G Mag - App R Mag', fontsize=16)
            ax.set_title('Color-Mag Diagram, Cluster ' + str(haloid))
            fig.savefig('colormag_' + str(haloid) + '.png',
                        bbox_inches='tight')
            mp.close(fig)

        if plot_sky == True:
            fig, ax = mp.subplots()
            ax.plot(all['ra'], all['dec'], 'ko')
            ax.plot(all['ra'][red_inner], all['dec'][red_inner], 'ro')
            ax.plot(all['ra'][red_outer], all['dec'][red_outer], 'yo')
            ax.plot(clus_ra, clus_dec, 'co', markersize=9)
            ax.set_xlabel('RA', fontsize=15)
            ax.set_ylabel('Dec.', fontsize=15)
            ax.set_title('Richness Annuli for Halo ' + str(haloid))
            fig.savefig('skyplot_' + str(haloid) + '.png', bbox_inches='tight')
            mp.close(fig)

        if plot_phase == True:
            fig, ax = mp.subplots()
            ax.plot(spec['rdata'], spec['vdata'], 'ko')
            ax.plot(spec['rdata'][mems], spec['vdata'][mems], 'co')
            bcg = np.where(
                spec['abs_rmags'] == spec['abs_rmags'][mems].min())[0][0]
            ax.plot(spec['rdata'][bcg], spec['vdata'][bcg], 'ro')
            ax.set_xlim(0, 5)
            ax.set_ylim(-5000, 5000)
            ax.set_xlabel('Radius (Mpc)', fontsize=15)
            ax.set_ylabel('Velocity (km/s)', fontsize=15)
            ax.set_title('phasespace haloid ' + str(haloid))
            fig.savefig('phasespace_' + str(haloid) + '.png',
                        bbox_inches='tight')
            mp.close(fig)

# Check to make sure galaxies exist (somewhat) uniformely in outer annuli (aka. cluster isn't on edge of observation strip)
# First, project all galaxies into polar coordinates centered on cluster center
        x = (all['ra'] - clus_ra) / np.cos(
            clus_dec * np.pi /
            180)  # x coordinate in arcsec (of dec) centered on cluster center
        y = (all['dec'] - clus_dec)
        all['radius'] = np.sqrt(x**2 +
                                y**2) / deg_per_rvir  #radius scaled by RVIR
        all['theta'] = np.arctan(y / x)
        # Add corrections to arctan function
        all['theta'][np.where((x < 0) & (y > 0))] += np.pi  # Quadrant II
        all['theta'][np.where((x < 0) & (y < 0))] += np.pi  # Quadrant III
        all['theta'][np.where((x > 0) & (y < 0))] += 2 * np.pi  # Quadrant IV
        # Then break outer annuli into 4 sections and check if at least 1 galaxy exists in each section
        sizes1 = np.array([
            np.where((np.abs(all['theta'] - i) <= np.pi / 2)
                     & (all['radius'] > inner_edge)
                     & (all['radius'] < 15))[0].size
            for i in np.linspace(0, 2 * np.pi, 4)
        ])
        # Do it again but shift theta by np.pi/8 this time
        sizes2 = np.array([
            np.where((np.abs(all['theta'] - i) <= np.pi / 2)
                     & (all['radius'] > inner_edge)
                     & (all['radius'] < 15))[0].size
            for i in np.linspace(np.pi / 8, 17 * np.pi / 8, 4)
        ])
        if 0 in sizes1 or 0 in sizes2:
            p = False
            if p == True:
                mp.plot(all['radius'], all['theta'], 'ko')
                mp.xlabel('radius')
                mp.ylabel('theta')
                mp.savefig('rth_' + str(haloid) + '.png')
                mp.close()
            print 'sizes1=', sizes1
            print 'sizes2=', sizes2
            return -99

        return richness