Exemplo n.º 1
0
Arquivo: MassRich.py Projeto: nkern/C4
	new_good = np.array(new_good)
	Nspec = np.array(Nspec)
	N200 = N200[new_good]


## Calculate Average Halo Positions from Galaxies
calc_avg = False
if calc_avg == True:
	from stack_class_c4 import CFOUR
	from causticpy import Caustic
	
	root = '/nfs/christoq_ls/nkern'
	data_loc = 'MassRich/TRUTH_CAUSTIC'
	write_loc = 'individual'

	C4 = CFOUR({'H0':70,'chris_data_root':'/nfs/christoq_ls/MILLENNIUM/Henriques/TRUTH_CAUSTIC'})
	C = Caustic()

	# Load Halos
        halos = fits.open(root+'/C4/'+data_loc+'/halos.fits')[1].data
        HaloID = halos['orig_order']
        RA = halos['halo_ra']
        DEC = halos['halo_dec']
        Z = halos['halo_z']
        Nspec = halos['Nspec']
        N200 = halos['N200']
        HVD = halos['HVD']
        RVIR = halos['RVIR']
        SINGLE = halos['single']
        SUB = halos['sub']
        NC4 = halos['nc4']
Exemplo n.º 2
0
Arquivo: CatPrep.py Projeto: nkern/C4
def clus_avg(data_loc,halo_file,chris_data_root,newfilename,write_data=True,clobber=True):

	C4 = CFOUR({'H0':70,'chris_data_root':chris_data_root})
	C = Caustic()

	# Load Halos
        halos = fits.open(data_loc+'/'+halo_file)[1].data
        HaloID = halos['orig_order']
        RA = halos['ra_bcg']
        DEC = halos['dec_bcg']
        Z = halos['z_biwt']
        RVIR = halos['RVIR']
        SINGLE = halos['single']
        SUB = halos['sub']
        NC4 = halos['nc4']

	RA_AVG,DEC_AVG,Z_AVG = [],[],[]

	# Loop Over Halos
	print ''
	print '-'*40
	print '...running average cluster center code'
	for i in range(len(halos)):
		if i % 100 == 0: print '...working on cluster '+str(i)+' out of '+str(len(halos))
		try:
			# Assign Halo Properties
			clus_ra = RA[i]
			clus_dec = DEC[i]
			clus_z = Z[i]

			# Load Galaxies
			galdata = C4.load_chris_gals(HaloID[i])
			gal_ra,gal_dec,gal_z,gal_gmags,gal_rmags,gal_imags = galdata

			# Take Iterative Average, four times
			# vlim = 1500, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1500,1.5,C)
			# vlim = 1000, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1000,1.5,C)
			# vlim = 1500, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,1000,1.5,C)
			# vlim = 2000, rlim = 1.5
			clus_ra,clus_dec,clus_z = proj_avg(clus_ra,clus_dec,clus_z,gal_ra,gal_dec,gal_z,2000,1.5,C)

		except:
			print i
			clus_ra,clus_dec,clus_z = 0, 0, 0

		RA_AVG.append(clus_ra)
		DEC_AVG.append(clus_dec)
		Z_AVG.append(clus_z)

	RA_AVG,DEC_AVG,Z_AVG = np.array(RA_AVG),np.array(DEC_AVG),np.array(Z_AVG)

	print '...finished average cluster-center calculations'

	## Write Data Out
	if write_data == True:
		print '...writing out cluster catalgoue with average centers included'
		# Dictionary of new columns
		new_keys = ['RA_AVG','DEC_AVG','Z_AVG']
		new_dic = ez.create(new_keys,locals())

		# Original fits record file
		orig_table = halos

		# Write own fits file
		keys = ['HaloID','RA','DEC','Z','RVIR','RA_AVG','DEC_AVG','Z_AVG']
		dic = ez.create(keys,locals())
		fits_table(dic,keys,data_loc+'/avg_centers.fits',clobber=True)

		# Append new columns
		fits_append(orig_table,new_dic,new_keys,filename=data_loc+'/'+newfilename,clobber=clobber)
		print '-'*40
		print ''