def findvdisp4(self,r,v,r200,maxv): "shifting gapper method" k = False b = 6 while k == False: b -= 1 (n,bins) = np.histogram(r,bins=b) k = np.all([n>15]) print 'bin sizes', n d = np.digitize(r,bins[:-1]) v_final = np.array([]) r_final = np.array([]) for i in range(n.size): velocities_p = np.sort(v[np.where((d==i+1) & (v>0))]) radius_p = (r[np.where((d==i+1) & (v>0))])[np.argsort(v[np.where((d==i+1) & (v>0))])] velocities_n = np.sort(v[np.where((d==i+1) & (v<0))])[::-1] radius_n = (r[np.where((d==i+1) & (v<0))])[np.argsort(v[np.where((d==i+1) & (v<0))])[::-1]] dv_p = velocities_p[1:] - velocities_p[:-1] dv_n = velocities_n[:-1] - velocities_n[1:] for j in range(dv_p.size): if dv_p[j] >= 1000.0: v_final = np.append(v_final,velocities_p[:j+1]) r_final = np.append(r_final,radius_p[:j+1]) break for j in range(dv_n.size): if dv_n[j] >= 1000.0: v_final = np.append(v_final,velocities_n[:j+1]) r_final = np.append(r_final,radius_n[:j+1]) break try: vvar = (astStats.biweightScale(v,9.0))**2 except: vvar = np.var(v) return vvar
def betaprofile(self,x,y,z,vx,vy,vz,halox,haloy,haloz,halovx,halovy,halovz,radii,rlimit): #go to cluster reference frame x = x-halox y = y-haloy z = z-haloz #correct for cluster proper motion vx = vx-halovx vy = vy-halovy vz = vz-halovz thetavec = np.arccos(z/np.sqrt(x**2.0+y**2.0+z**2.0)) phivec = np.arctan(y/x) vrad = vx*np.sin(thetavec)*np.cos(phivec)+vy*np.sin(thetavec)*np.sin(phivec)+vz*np.cos(thetavec) vtheta = vx*np.cos(thetavec)*np.cos(phivec)+vy*np.cos(thetavec)*np.sin(phivec)-vz*np.sin(thetavec) vphi = -vx*np.sin(phivec)+vy*np.cos(phivec) rvec = np.sqrt(x**2.0+y**2.0+z**2.0) self.beta = np.zeros(radii.size) self.beta -= 999.0 for i in range(radii.size-1): i += 1 w = np.where((rvec>radii[i-1]) & (rvec<=radii[i])) if w[0].size >= 20: self.beta[i] = 1.0 - (astStats.biweightScale(vtheta[w],9.0)**2.0 + astStats.biweightScale(vphi[w],9.0)**2.0)/(2.0*astStats.biweightScale(vrad[w],9.0)**2.0) #fit = np.polyfit(radii[np.where((self.beta>-5))],self.beta[np.where((self.beta>-5))],6) #self.yfit = fit[0]*radii**6.0 + fit[1]*radii**5.0 + fit[2]*radii**4.0 + fit[3]*radii**3.0 + fit[4]*radii**2.0 + fit[5]*radii + fit[6] return self.beta
def findvdisp(self,r,v,r200,maxv): """ Use astLib.astStats biweight sigma clipping Scale estimator for the velocity dispersion """ v_cut = v[np.where((r<r200) & (np.abs(v)<maxv))] try: self.gal_vdisp = astStats.biweightScale(v_cut,9.0) except: self.gal_vdisp = np.std(v_cut,ddof=1)
def findvdisp(self, r, v, r200, maxv): """ Use astLib.astStats biweight sigma clipping Scale estimator for the velocity dispersion """ v_cut = v[np.where((r < r200) & (np.abs(v) < maxv))] try: self.gal_vdisp = astStats.biweightScale(v_cut, 9.0) except: self.gal_vdisp = np.std(v_cut, ddof=1)
def membervdisp(self,r,v,vi,ri,r200): "This function is for the ideal scenario that you know which galaxies are members" #print 'standard dev of zone= ',np.std(v[np.where((r<r200))])# & (v>-2000) & (v < 2000))]) #return np.var(v) #return np.var(v[np.where((r<r200) & (v>-2000) & (v < 2000))]) try: vvar = (astStats.biweightScale(v,9.0))**2.0 except: vvar = np.var(v) return vvar
def findvdisp(self,r,v,vi,ri,r200,maxv): #print 'average r', np.average(r) avgr = r200 #dispvals = v[np.where((r>np.average(r)-.4) & (r<np.average(r)+.4) & (v<2000) & (v>-2000))] for i in range(6): v2 = v[np.where((r<avgr) & (v<maxv) & (v>-maxv))] r2 = r[np.where((r<avgr) & (v<maxv) & (v>-maxv))] stv = 3.5 * np.std(v2) print '3.5 sigma of v = ', stv v = v2[np.where((v2 > -stv) & (v2 < stv))] r = r2[np.where((v2 > -stv) & (v2 < stv))] if v.size > 15.0: vstd = astStats.biweightScale(v,9.0) vvar = (astStats.biweightScale(v,9.0))**2 else: vstd = np.std(v) vvar = np.var(v) #print 'standard dev of zone= ',vstd return (np.sqrt(vvar))**2
def __init__(self,data,ri,vi,Zi,memberflags=None,r200=2.0,maxv=5000.0,halo_scale_radius=None,halo_scale_radius_e=0.01,halo_vdisp=None,bin=None,plotphase=True,beta=None): kappaguess = np.max(Zi) #first guess at the level self.levels = np.linspace(0.00001,kappaguess,100)[::-1] #create levels (kappas) to try out fitting_radii = np.where((ri>=r200/3.0) & (ri<=r200)) #when fitting an NFW (later), this defines the r range to fit within self.r200 = r200 if halo_scale_radius is None: self.halo_scale_radius = self.r200/5.0 else: self.halo_scale_radius = halo_scale_radius self.halo_scale_radius_e = halo_scale_radius_e if beta is None: self.beta = 0.2+np.zeros(ri.size) else: self.beta = beta self.gb = (3-2.0*self.beta)/(1-self.beta) #Calculate velocity dispersion with either members, fed value, or estimate using 3.5sigma clipping if memberflags is not None: vvarcal = data[:,1][np.where(memberflags==1)] try: self.gal_vdisp = astStats.biweightScale(vvarcal[np.where(np.isfinite(vvarcal))],9.0) print 'O ya! membership calculation!' except: self.gal_vdisp = np.std(vvarcal,ddof=1) self.vvar = self.gal_vdisp**2 elif halo_vdisp is not None: self.gal_vdisp = halo_vdisp self.vvar = self.gal_vdisp**2 else: #Variable self.gal_vdisp try: self.findvdisp(data[:,0],data[:,1],r200,maxv) except: self.gal_vdisp = np.std(data[:,1][np.where((data[:,0]<r200) & (np.abs(data[:,1])<maxv))],ddof=1) self.vvar = self.gal_vdisp**2 #initilize arrays self.vesc = np.zeros(self.levels.size) Ar_final_opt = np.zeros((self.levels.size,ri[np.where((ri<r200) & (ri>=0))].size)) #find the escape velocity for all level (kappa) guesses for i in range(self.vesc.size): self.vesc[i],Ar_final_opt[i] = self.findvesc(self.levels[i],ri,vi,Zi,r200) #difference equation to search for minimum value self.skr = (self.vesc-4.0*self.vvar)**2 try: self.level_elem = np.where(self.skr == np.min(self.skr[np.isfinite(self.skr)]))[0][0] self.level_final = self.levels[self.level_elem] self.Ar_finalD = np.zeros(ri.size) for k in range(self.Ar_finalD.size): self.Ar_finalD[k] = self.findAofr(self.level_final,Zi[k],vi) if k != 0: self.Ar_finalD[k] = self.restrict_gradient2(np.abs(self.Ar_finalD[k-1]),np.abs(self.Ar_finalD[k]),ri[k-1],ri[k]) #This exception occurs if self.skr is entirely NAN. A flag should be raised for this in the output table except ValueError: self.Ar_finalD = np.zeros(ri.size) #fit an NFW to the resulting caustic profile. self.NFWfit(ri[fitting_radii],self.Ar_finalD[fitting_radii]*np.sqrt(self.gb[fitting_radii]),self.halo_scale_radius,ri,self.gb) #self.NFWfit(ri[fitting_radii],self.Ar_finalD[fitting_radii],self.halo_scale_radius,ri,self.gb) if plotphase == True: s =figure() ax = s.add_subplot(111) ax.plot(data[:,0],data[:,1],'k.') for t in range(Ar_final_opt.shape[0]): ax.plot(ri[:Ar_final_opt[t].size],Ar_final_opt[t],c='0.4',alpha=0.5) ax.plot(ri[:Ar_final_opt[t].size],-Ar_final_opt[t],c='0.4',alpha=0.5) ax.plot(ri,self.Ar_finalD,c='blue') ax.plot(ri,-self.Ar_finalD,c='blue') ax.set_ylim(-3500,3500) s.savefig('/nfs/christoq_ls/giffordw/plotphase.png') close() ##Output galaxy membership kpc2km = 3.09e16 try: fitfunc = lambda x,a,b: np.sqrt(2*4*np.pi*6.67e-20*a*(b*kpc2km)**2*np.log(1+x/b)/(x/b)) popt,pcov = curve_fit(fitfunc,ri,self.Ar_finalD) self.Arfit = fitfunc(ri,popt[0],popt[1]) except: fitfunc = lambda x,a: np.sqrt(2*4*np.pi*6.67e-20*a*(30.0*kpc2km)**2*np.log(1+x/30.0)/(x/30.0)) popt,pcov = curve_fit(fitfunc,ri,self.Ar_finalD) self.Arfit = fitfunc(ri,popt[0]) self.memflag = np.zeros(data.shape[0]) #fcomp = interp1d(ri,self.Ar_finalD) print ri.size, self.vesc_fit.size fcomp = interp1d(ri,self.vesc_fit) for k in range(self.memflag.size): vcompare = fcomp(data[k,0]) if np.abs(vcompare) >= np.abs(data[k,1]): self.memflag[k] = 1
def bin_stack_clusters(self,HaloID,HaloData,BinData,Halo_P,Halo_V,Gal_P,Gal_V,G_Mags,R_Mags,I_Mags,k,j): ''' Building Ensemble Cluster and Calculating Property Statistics ''' ## Unpack HaloData array M_crit200,R_crit200,Z,SRAD,ESRAD,HVD = HaloData BIN_M200,BIN_R200,BIN_HVD = BinData ## Define Arrays for Building Ensemble and LOS # Ensemble Arrays: [Successive Ensemble Number][Data] # Line of Sight Arrays: [Line Of Sight][Data] ens_r,ens_v,ens_gmags,ens_rmags,ens_imags,ens_hvd = [],[],[],[],[],[] ens_caumass, ens_caumass_est, ens_causurf, ens_nfwsurf = [], [], [], [] los_r,los_v,los_gmags,los_rmags,los_imags,los_hvd = [],[],[],[],[],[] los_caumass, los_caumass_est, los_causurf, los_nfwsurf = [], [], [], [] sample_size,pro_pos = [],[] ens_gal_id,los_gal_id = [],[] ens_clus_id = [] gal_id_count = 0 ## Loop over lines of sight (different clusters) for [l,s] in zip(np.arange(self.line_num*j,self.line_num*(j+1)),self.stack_range[j*self.line_num:(j+1)*self.line_num]): # Define index for to-be-stacked halo as cluster index (k) + line of sight index (l) if self.light_cone == True: # Configure RA, DEC and Z into cluster-centric radius and velocity pass else: # Line of Sight Calculation for naturally 3D data r, v, projected_pos = self.U.line_of_sight(Gal_P[l],Gal_V[l],Halo_P[s],Halo_V[s],s) # Create Ensemble and LOS Galaxy ID Array for 3D extraction later on en_gal_id = np.arange( gal_id_count, len(r)+gal_id_count ) gal_id_count += len(r) ln_gal_id = np.arange(len(r)) en_clus_id = np.array([HaloID[s]]*len(r),int) # Limit Data in Phase Space r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,samp_size = self.U.limit_gals(r,v,en_gal_id,en_clus_id,ln_gal_id,G_Mags[l],R_Mags[l],I_Mags[l],R_crit200[s],HVD[s]) # Build LOS and Ensemble, with given method of stacking en_r,en_v,en_gal_id,en_clus_id,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gal_id,ln_gmags,ln_rmags,ln_imags = self.S.build_ensemble(r,v,en_gal_id,en_clus_id,ln_gal_id,gmags,rmags,imags,HaloData.T[s],l) # If Scale data before stack is desired if self.scale_data == True: en_r = self.U.scale_gals(en_r,R_crit200[s]) # Build Ensemble Arrays ens_r.extend(en_r) ens_v.extend(en_v) ens_gmags.extend(en_gmags) ens_rmags.extend(en_rmags) ens_imags.extend(en_imags) ens_gal_id.extend(np.array(en_gal_id,int)) ens_clus_id.extend(np.array(en_clus_id,int)) # Calculate LOS HVD (this is after shiftgapper) if run_los == True if self.run_los == True: # Pick out gals within r200 ln_within = np.where(ln_r<R_crit200[s])[0] gal_count = len(ln_within) if gal_count <= 3: '''biweightScale can't take less than 4 elements''' # Calculate hvd with numpy std of galaxies within r200 (b/c this is quoted richness) ln_hvd = np.std(np.copy(ln_v)[ln_within]) else: # Calculate hvd with astStats biweightScale (see Beers 1990) try: ln_hvd = astStats.biweightScale(np.copy(ln_v)[ln_within],9.0) # Sometimes divide by zero error in biweight function for low gal_num except ZeroDivisionError: print 'ZeroDivisionError in biweightfunction, line 140 in caustic_class_stack2D' print 'ln_v[ln_within]=',ln_v[ln_within] ln_hvd = np.std(np.copy(ln_v)[ln_within]) else: ln_hvd = [] # Run Caustic Technique for LOS mass estimation if run_los == True if self.run_los == True: ln_caumass,ln_caumass_est,ln_causurf,ln_nfwsurf = self.S.kernel_caustic_masscalc(ln_r,ln_v,HaloData.T[s],BinData.T[k],ln_hvd,k,l) print 'j = '+str(j)+', k = '+str(k)+', l = '+str(l)+', s = '+str(s) else: ln_caumass,ln_caumass_est,ln_causurf,ln_nfwsurf = [],[],[],[] # Append LOS Data Arrays los_r.append(ln_r) los_v.append(ln_v) los_gal_id.append(np.array(ln_gal_id,int)) los_gmags.append(ln_gmags) los_rmags.append(ln_rmags) los_imags.append(ln_imags) los_hvd.append(ln_hvd) los_caumass.append(ln_caumass) los_caumass_est.append(ln_caumass_est) los_causurf.append(ln_causurf) los_nfwsurf.append(ln_nfwsurf) sample_size.append(samp_size) pro_pos.append(projected_pos) # If scale data == True, re-scale by ensemble r200 if self.scale_data == True: ens_r = np.array(ens_r)*BIN_R200[k] # Shiftgapper for Ensemble Interloper treatment ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags = self.C.shiftgapper(np.vstack([ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags]).T).T # Sort by R_Mag sort = np.argsort(ens_rmags) ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags = ens_r[sort],ens_v[sort],ens_gal_id[sort],ens_clus_id[sort],ens_gmags[sort],ens_rmags[sort],ens_imags[sort] # Reduce system to gal_num richness within r200 within = np.where(ens_r <= BIN_R200[k])[0] end = within[:self.gal_num*self.line_num + 1][-1] ens_r = ens_r[:end] ens_v = ens_v[:end] ens_gal_id = ens_gal_id[:end] ens_clus_id = ens_clus_id[:end] ens_gmags = ens_gmags[:end] ens_rmags = ens_rmags[:end] ens_imags = ens_imags[:end] # Calculate HVD en_hvd = astStats.biweightScale(np.copy(ens_v)[np.where(ens_r<=BIN_R200[k])],9.0) # Run Caustic Technique! en_caumass,en_caumass_est,en_causurf,en_nfwsurf = self.S.kernel_caustic_masscalc(ens_r,ens_v,HaloData.T[k],BinData.T[k],en_hvd,k) # Append Ensemble Data Arrays ens_hvd.append(en_hvd) ens_caumass.append(en_caumass) ens_caumass_est.append(en_caumass_est) # Turn into numpy arrays ens_r,ens_v,ens_gmags,ens_rmags,ens_imags = np.array(ens_r),np.array(ens_v),np.array(ens_gmags),np.array(ens_rmags),np.array(ens_imags) ens_hvd,ens_caumass,ens_caumass_est = np.array(ens_hvd),np.array(ens_caumass),np.array(ens_caumass_est) ens_causurf,ens_nfwsurf = np.array(en_causurf),np.array(en_nfwsurf) los_r,los_v,los_gmags,los_rmags,los_imags = np.array(los_r),np.array(los_v),np.array(los_gmags),np.array(los_rmags),np.array(los_imags) los_hvd,los_caumass,los_caumass_est = np.array(los_hvd),np.array(los_caumass),np.array(los_caumass_est) los_causurf,los_nfwsurf = np.array(los_causurf),np.array(los_nfwsurf) sample_size,pro_pos = np.array(sample_size),np.array(pro_pos) ens_gal_id = np.array(ens_gal_id,int) los_gal_id = np.array(los_gal_id) ens_clus_id = np.array(ens_clus_id,int) return ens_r,ens_v,ens_gal_id,ens_clus_id,ens_gmags,ens_rmags,ens_imags,ens_hvd,ens_caumass,ens_caumass_est,ens_causurf,ens_nfwsurf,los_r,los_v,los_gal_id,los_gmags,los_rmags,los_imags,los_hvd,los_caumass,los_caumass_est,los_causurf,los_nfwsurf,self.C.x_range,sample_size,pro_pos
def run_caustic(self, data, gal_mags=None, gal_memberflag=None, clus_ra=None, clus_dec=None, clus_z=None, gal_r=None, gal_v=None, r200=None, clus_vdisp=None, rlimit=4.0, vlimit=3500, q=10.0, H0=100.0, xmax=6.0, ymax=5000.0, cut_sample=True, gapper=True, mirror=True, absflag=False): self.clus_ra = clus_ra self.clus_dec = clus_dec self.clus_z = clus_z if gal_r == None: if self.clus_ra == None: #calculate average ra from galaxies self.clus_ra = np.average(data[:, 0]) if self.clus_dec == None: #calculate average dec from galaxies self.clus_dec = np.average(data[:, 1]) #Reduce data set to only valid redshifts data_spec = data[np.where((np.isfinite(data[:, 2])) & (data[:, 2] > 0.0) & (data[:, 2] < 5.0))] if self.clus_z == None: #calculate average z from galaxies self.clus_z = np.average(data_spec[:, 2]) #calculate angular diameter distance. #Variable self.ang_d self.ang_d, self.lum_d = self.zdistance(self.clus_z, H0) #calculate the spherical angles of galaxies from cluster center. #Variable self.angle self.angle = self.findangle(data_spec[:, 0], data_spec[:, 1], self.clus_ra, self.clus_dec) self.r = self.angle * self.ang_d self.v = c * (data_spec[:, 2] - self.clus_z) / (1 + self.clus_z) else: data_spec = data[np.where(np.isfinite(gal_v))] self.r = gal_r self.v = gal_v #package galaxy data, USE ASTROPY TABLE HERE!!!!! if gal_memberflag is None: self.data_table = np.vstack((self.r, self.v, data_spec.T)).T else: self.data_table = np.vstack( (self.r, self.v, data_spec.T, gal_memberflag)).T #reduce sample within limits if cut_sample == True: self.data_set = self.set_sample(self.data_table, rlimit=rlimit, vlimit=vlimit) else: self.data_set = self.data_table #further select sample via shifting gapper if gapper == True: self.data_set = self.shiftgapper(self.data_set) print 'DATA SET SIZE', self.data_set[:, 0].size ''' #tries to identify double groups that slip through the gapper process upper_max = np.max(self.data_set[:,1][np.where((self.data_set[:,1]>0.0)&(self.data_set[:,0]<1.0))]) lower_max = np.min(self.data_set[:,1][np.where((self.data_set[:,1]<0.0)&(self.data_set[:,0]<1.0))]) if np.max(np.array([upper_max,-lower_max])) > 1000.0+np.min(np.array([upper_max,-lower_max])): self.data_set = self.data_set[np.where(np.abs(self.data_set[:,1])<1000.0+np.min(np.array([upper_max,-lower_max])))] ''' if absflag: abs_mag = self.data_table[:, 5] else: abs_mag = self.data_table[:, 7] - magnitudes.distance_modulus( self.clus_z, **fidcosmo) self.Ngal_1mpc = self.r[np.where((abs_mag < -20.55) & (self.r < 1.0) & (np.abs(self.v) < 3500))].size if r200 == None: self.r200 = 0.01 * self.Ngal_1mpc + 0.584 #+np.random.normal(0,0.099) #vdisp_prelim = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<3.0)],9.0) #r200_mean_prelim = 0.002*vdisp_prelim + 0.40 #self.r200 = r200_mean_prelim/1.7 ''' #original r200 est rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_1 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_2 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] if vdisp_prelim_2 < 0.6*vdisp_prelim_1: vdisp_prelim = vdisp_prelim_2 else: vdisp_prelim = vdisp_prelim_1 r200_mean_prelim = 0.002*vdisp_prelim + 0.40 self.r200 = r200_mean_prelim/1.7 ''' if self.r200 > 3.0: self.r200 = 3.0 if 3.0 * self.r200 < 6.0: rlimit = 3.0 * self.r200 else: rlimit = 5.5 else: self.r200 = r200 print 'Pre_r200=', self.r200 if mirror == True: print 'Calculating Density w/Mirrored Data' self.gaussian_kernel(np.append(self.data_set[:, 0], self.data_set[:, 0]), np.append(self.data_set[:, 1], -self.data_set[:, 1]), self.r200, normalization=H0, scale=q, xmax=xmax, ymax=ymax, xres=200, yres=220) else: print 'Calculating Density' self.gaussian_kernel(self.data_set[:, 0], self.data_set[:, 1], self.r200, normalization=H0, scale=q, xmax=xmax, ymax=ymax, xres=200, yres=220) self.img_tot = self.img / np.max(np.abs(self.img)) self.img_grad_tot = self.img_grad / np.max(np.abs(self.img_grad)) self.img_inf_tot = self.img_inf / np.max(np.abs(self.img_inf)) if clus_vdisp is None: self.pre_vdisp = 9.15 * self.Ngal_1mpc + 350.32 print 'Pre_vdisp=', self.pre_vdisp print 'Ngal<1Mpc=', self.Ngal_1mpc v_cut = self.data_set[:, 1][ np.where((self.data_set[:, 0] < self.r200) & (np.abs(self.data_set[:, 1]) < 5000.0))] try: self.pre_vdisp2 = astStats.biweightScale(v_cut, 9.0) except: self.pre_vdisp2 = np.std(v_cut, ddof=1) print 'Vdisp from galaxies=', self.pre_vdisp2 if self.data_set[:, 0].size < 15: self.v_unc = 0.35 self.c_unc_sys = 0.75 self.c_unc_int = 0.35 elif self.data_set[:, 0].size < 25 and self.data_set[:, 0].size >= 15: self.v_unc = 0.30 self.c_unc_sys = 0.55 self.c_unc_int = 0.22 elif self.data_set[:, 0].size < 50 and self.data_set[:, 0].size >= 25: self.v_unc = 0.23 self.c_unc_sys = 0.42 self.c_unc_int = 0.16 elif self.data_set[:, 0].size < 100 and self.data_set[:, 0].size >= 50: self.v_unc = 0.18 self.c_unc_sys = 0.34 self.c_unc_int = 0.105 else: self.v_unc = 0.15 self.c_unc_sys = 0.29 self.c_unc_int = 0.09 if self.pre_vdisp2 > 1.75 * self.pre_vdisp: self.pre_vdisp_comb = 9.15 * self.Ngal_1mpc + 450.32 else: self.pre_vdisp_comb = self.pre_vdisp2 ''' if self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)].size >= 10: self.pre_vdisp_comb = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],9.0) else: self.pre_vdisp_comb = np.std(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],ddof=1) #self.pre_vdisp_comb = (self.pre_vdisp*(self.pre_vdisp2*self.v_unc)**2+self.pre_vdisp2*118.14**2)/(118.14**2+(self.pre_vdisp2*self.v_unc)**2) ''' else: self.pre_vdisp_comb = clus_vdisp print 'Combined Vdisp=', self.pre_vdisp_comb self.beta = 0.5 * self.x_range / (self.x_range + self.r200 / 4.0) #Identify initial caustic surface and members within the surface print 'Calculating initial surface' if gal_memberflag is None: self.Caustics = CausticSurface(self.data_set, self.x_range, self.y_range, self.img_tot, r200=self.r200, halo_vdisp=self.pre_vdisp_comb, beta=None) else: self.Caustics = CausticSurface(self.data_set, self.x_range, self.y_range, self.img_tot, memberflags=self.data_set[:, -1], r200=self.r200) self.caustic_profile = self.Caustics.Ar_finalD self.caustic_fit = self.Caustics.vesc_fit self.gal_vdisp = self.Caustics.gal_vdisp self.memflag = self.Caustics.memflag #Estimate the mass based off the caustic profile, beta profile (if given), and concentration (if given) if clus_z is not None: #self.Mass = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) #self.Mass2 = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.Mass = MassCalc(self.x_range, self.caustic_fit, self.gal_vdisp, self.clus_z, r200=self.r200, fbr=None, H0=H0) self.Mass2 = MassCalc(self.x_range, self.caustic_fit, self.gal_vdisp, self.clus_z, r200=self.r200, fbr=0.65, H0=H0) self.r200_est = self.Mass.r200_est self.r200_est_fbeta = self.Mass2.r200_est self.M200_est = self.Mass.M200_est self.M200_est_fbeta = self.Mass2.M200_est print 'r200 estimate: ', self.Mass.r200_est print 'M200 estimate: ', self.Mass.M200_est self.Ngal = self.data_set[np.where((self.memflag == 1) & ( self.data_set[:, 0] <= self.r200_est))].shape[0] #calculate velocity dispersion try: self.vdisp_gal = astStats.biweightScale( self.data_set[:, 1][self.memflag == 1], 9.0) except: try: self.vdisp_gal = np.std(self.data_set[:, 1][self.memflag == 1], ddof=1) except: self.vdisp_gal = 0.0 '''
def self_stack_clusters(self,ENC_R,ENC_V,ENC_MAG,ENC_VDISP,ENC_GPX3D,ENC_GPY3D,ENC_GPZ3D,ENC_GVX3D,ENC_GVY3D,ENC_GVZ3D,LINE_VDISP,Gal_P,Gal_V,Halo_P,Halo_V,M_crit200,R_crit200,SRAD,ESRAD,MAGS,k,r_limit,vlimit,gal_num,line_num,method_num,H0,q,c,LINE_DIAMASS,LINE_INFMASS,LINE_DIACAU,LINE_R,LINE_V,LINE_MAG,root,beta): '''Not really binning data, but organizing self halo data to mimic binned data from before''' #Binning arrays enc_r = [] enc_v = [] enc_mag = [] enc_gpx3d = [] enc_gpy3d = [] enc_gpz3d = [] enc_gvx3d = [] enc_gvy3d = [] enc_gvz3d = [] #Line of sight arrays line_diamass = [] line_infmass = [] line_dia_cau = [] line_inf_cau = [] line_inf_nfw = [] line_vdisp = [] line_r = [] line_v = [] line_mag = [] for l in range(line_num): # Line of Sight Calculation r,v = U.line_of_sight(Gal_P[k],Gal_V[k],Halo_P[k],Halo_V[k],H0,c) # Limit Data r,v,mags,gpx3d,gpy3d,gpz3d,gvx3d,gvy3d,gvz3d,en_r,en_v,en_mags,en_gpx3d,en_gpy3d,en_gpz3d,en_gvx3d,en_gvy3d,en_gvz3d = self.limit_gals(r,v,MAGS[k],R_crit200[k],Gal_P[k],Gal_V[k],r_limit,vlimit,gal_num,line_num,method_num,l) # Build Ensemble Data (w/o gapping method per LOS) enc_r.extend(en_r) enc_v.extend(en_v) enc_mag.extend(en_mags) enc_gpx3d.extend(en_gpx3d) enc_gpy3d.extend(en_gpy3d) enc_gpz3d.extend(en_gpz3d) enc_gvx3d.extend(en_gvx3d) enc_gvy3d.extend(en_gvy3d) enc_gvz3d.extend(en_gvz3d) # Calculate LOS HVD (after interloper removal) gal_count = len(where( r<R_crit200[k] )[0] ) if gal_count <= 3: '''biweightScale freaks out w/ less than 3 data points''' gal_vdisp = std(copy(v)[where( r<R_crit200[k] )]) if gal_count > 3: # This is the best way to calculate vdisp gal_vdisp = biweightScale(copy(v)[where( r<R_crit200[k] )],9.0) # Running Mass Estimation on Line of Sight x_range,line_diamass,line_infmass,line_dia_cau,line_inf_cau,line_inf_nfw = self.self_stack_kernel_caustic_masscalc(r,v,line_diamass,line_infmass,line_dia_cau,line_inf_cau,line_inf_nfw,R_crit200[k],M_crit200[k],SRAD[k],ESRAD[k],gal_vdisp,r_limit,vlimit,H0,q,k,root,beta,l,samp_size=samp_size) # Append LOS Data line_vdisp.append(gal_vdisp) line_r.append(r) line_v.append(v) line_mag.append(mags) # Shift Gapper Method to remove interlopers (for the ensemble) enc_r,enc_v,enc_mag,enc_gpx3d,enc_gpy3d,enc_gpz3d,enc_gvx3d,enc_gvy3d,enc_gvz3d = U.shiftgapper(vstack((enc_r,enc_v,enc_mag,enc_gpx3d,enc_gpy3d,enc_gpz3d,enc_gvx3d,enc_gvy3d,enc_gvz3d)).T).T # Reduce system to gal_num*line_num gals within r200 within = where(enc_r<R_crit200[k])[0] end = within[:gal_num*line_num][-1] + 1 enc_r,enc_v,enc_mag,enc_gpx3d,enc_gpy3d,enc_gpz3d,enc_gvx3d,enc_gvy3d,enc_gvz3d = enc_r[:end],enc_v[:end],enc_mag[:end],enc_gpx3d[:end],enc_gpy3d[:end],enc_gpz3d[:end],enc_gvx3d[:end],enc_gvy3d[:end],enc_gvz3d[:end] # Calculate Ensemble HVD enc_vdisp = biweightScale(copy(enc_v)[where( enc_r<R_crit200[k] )],9.0) #Ensemble Arrays ENC_R.append(enc_r) ENC_V.append(enc_v) ENC_MAG.append(enc_mag) ENC_VDISP.append(enc_vdisp) ENC_GPX3D.append(enc_gpx3d) ENC_GPY3D.append(enc_gpy3d) ENC_GPZ3D.append(enc_gpz3d) ENC_GVX3D.append(enc_gvx3d) ENC_GVY3D.append(enc_gvy3d) ENC_GVZ3D.append(enc_gvz3d) #Line of Sight Arrays LINE_DIAMASS.append(line_diamass) LINE_INFMASS.append(line_infmass) LINE_DIACAU.append(line_dia_cau) LINE_VDISP.append(line_vdisp) LINE_R.append(line_r) LINE_V.append(line_v) LINE_MAG.append(line_mag) return ENC_R,ENC_V,ENC_MAG,ENC_VDISP,ENC_GPX3D,ENC_GPY3D,ENC_GPZ3D,ENC_GVX3D,ENC_GVY3D,ENC_GVZ3D,LINE_VDISP,LINE_DIAMASS,LINE_INFMASS,LINE_DIACAU,LINE_R,LINE_V,LINE_MAG
def bin_clusters(self,ENC_R,ENC_V,ENC_MAG,ENC_VDISP,ENC_R200,ENC_M200,ENC_SRAD,ENC_ESRAD,ENC_GPX3D,ENC_GPY3D,ENC_GPZ3D,ENC_GVX3D,ENC_GVY3D,ENC_GVZ3D,LINE_VDISP,Gal_P,Gal_V,Halo_P,Halo_V,M_crit200,R_crit200,SRAD,ESRAD,MAGS,k,r_limit,vlimit,gal_num,line_num,H0,q,c,LINE_DIAMASS,LINE_INFMASS,LINE_DIACAU,LINE_DISSECT,root,beta,scale_data): #update bin range: list of halo ids who belong in the kth ensemble bin_range = arange(k*line_num,k*line_num+line_num,1,int) #Binning arrays enc_r = [] enc_v = [] enc_mag = [] enc_gpx3d = [] enc_gpy3d = [] enc_gpz3d = [] enc_gvx3d = [] enc_gvy3d = [] enc_gvz3d = [] #Line of sight arrays line_diamass = [] line_infmass = [] line_dia_cau = [] line_inf_cau = [] line_inf_nfw = [] line_vdisp = [] line_r = [] line_v = [] line_mag = [] line_dissect = [] #Loop over binned halos for l in bin_range: #Line of Sight Calculation r,v = U.line_of_sight(Gal_P[l],Gal_V[l],Halo_P[l],Halo_V[l],H0,c) #Limit Data r,v,mags,gal_vdisp,gpx3d,gpy3d,gpz3d,gvx3d,gvy3d,gvz3d = self.limit_gals(r,v,MAGS[l],R_crit200[l],Gal_P[l],Gal_V[l],r_limit,vlimit,gal_num,line_num,l) line_dissect.append( len(r) ) # Append LOS RV arrays line_r.append(r) line_v.append(v) line_mag.append(mags) #Scale Data if scale_data == True: r,v = self.scale_gals(r,v,R_crit200[l],gal_vdisp) # Do Mass Estimation for each Line of Sight x_range,line_diamass,line_infmass,line_dia_cau,line_inf_cau,line_inf_nfw = self.kernel_caustic_masscalc(r,v,line_diamass,line_infmass,line_dia_cau,line_inf_cau,line_inf_nfw,R_crit200[l],M_crit200[l],SRAD[l],ESRAD[l],gal_vdisp,r_limit,vlimit,H0,q,k,root,beta,l=l) enc_r.extend(r) enc_v.extend(v) enc_mag.extend(mags) enc_gpx3d.extend(gpx3d) enc_gpy3d.extend(gpy3d) enc_gpz3d.extend(gpz3d) enc_gvx3d.extend(gvx3d) enc_gvy3d.extend(gvy3d) enc_gvz3d.extend(gvz3d) line_vdisp.append(gal_vdisp) # Shift Gapper Method to remove interlopers enc_r,enc_v,enc_mag,enc_gpx3d,enc_gpy3d,enc_gpz3d,enc_gvx3d,enc_gvy3d,enc_gvz3d = U.shiftgapper(vstack((enc_r,enc_v,enc_mag,enc_gpx3d,enc_gpy3d,enc_gpz3d,enc_gvx3d,enc_gvy3d,enc_gvz3d)).T).T # Calculated or Average Ensemble Properties enc_vdisp = biweightScale(copy(enc_v)[where( copy(enc_r)<R_crit200[l] )[0]],9.0) ENC_R200.append(U.bin_meancalc(R_crit200[bin_range])) ENC_M200.append(U.bin_medcalc(M_crit200[bin_range])) ENC_SRAD.append(U.bin_meancalc(SRAD[bin_range])) ENC_ESRAD.append(U.bin_meancalc(ESRAD[bin_range])) #Ensemble Arrays ENC_R.append(enc_r) ENC_V.append(enc_v) ENC_MAG.append(enc_mag) ENC_VDISP.append(enc_vdisp) ENC_GPX3D.append(enc_gpx3d) ENC_GPY3D.append(enc_gpy3d) ENC_GPZ3D.append(enc_gpz3d) ENC_GVX3D.append(enc_gvx3d) ENC_GVY3D.append(enc_gvy3d) ENC_GVZ3D.append(enc_gvz3d) #Line of Sight Arrays LINE_DIAMASS.append(line_diamass) LINE_INFMASS.append(line_infmass) LINE_DIACAU.append(line_dia_cau) LINE_VDISP.append(line_vdisp) LINE_DISSECT.append(line_dissect) return ENC_R,ENC_V,ENC_MAG,ENC_VDISP,ENC_R200,ENC_M200,ENC_SRAD,ENC_ESRAD,ENC_GPX3D,ENC_GPY3D,ENC_GPZ3D,ENC_GVX3D,ENC_GVY3D,ENC_GVZ3D,LINE_VDISP,LINE_DIAMASS,LINE_INFMASS,LINE_DIACAU,LINE_DISSECT
def ss_recover(): # Preliminary data file upload global h, gal_num, line_num, halo_num, r_limit, vlimit, beta h, gal_num, line_num, halo_num, r_limit, vlimit, beta = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/program_constants.tab", unpack=True, ) halo_num = int(halo_num) line_num, gal_num = int(line_num), int(gal_num) # Second preliminary data file upload global HaloID, M_crit200, R_crit200, SRAD, ESARD, HVD HaloID, M_crit200, R_crit200, SRAD, ESRAD, HVD = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/simdata.tab", unpack=True, ) HaloID = str(HaloID) HaloID, M_crit200, R_crit200, SRAD, ESRAD, HVD = ( HaloID[:halo_num], M_crit200[:halo_num], R_crit200[:halo_num], SRAD[:halo_num], ESRAD[:halo_num], HVD[:halo_num], ) # First Data file upload global ENC_CAUMASS, ENC_INFMASS, ENC_VDISP j = 0 for m in range(halo_num): if j == 0: # Initialization of arrays ENC_CAUMASS, ENC_INFMASS, ENC_VDISP = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_constants.tab", usecols=(0, 1, 2), unpack=True, ) else: ENC_CAUMASSt, ENC_INFMASSt, ENC_VDISPt = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_constants.tab", usecols=(0, 1, 2), unpack=True, ) ENC_CAUMASS = hstack([ENC_CAUMASS, ENC_CAUMASSt]) ENC_INFMASS = hstack([ENC_INFMASS, ENC_INFMASSt]) ENC_VDISP = hstack([ENC_VDISP, ENC_VDISPt]) j += 1 # Second data file upload global LINE_CAUMASS, LINE_INFMASS, LINE_VDISP j = 0 for m in range(halo_num): if j == 0: # Initialization of arrays LINE_CAUMASS, LINE_INFMASS, LINE_VDISP = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_linenum.tab", unpack=True, ) else: line_caumass, line_infmass, line_vdisp = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_linenum.tab", unpack=True, ) LINE_CAUMASS = vstack([LINE_CAUMASS, line_caumass]) LINE_INFMASS = vstack([LINE_INFMASS, line_infmass]) LINE_VDISP = vstack([LINE_VDISP, line_vdisp]) j += 1 # Third data file upload global ENC_CAUSURF, ENC_INFSURF, ENC_INFNFW, x_range j = 0 for m in range(halo_num): if j == 0: # Initialization of arrays ENC_CAUSURF, ENC_INFSURF, ENC_INFNFW, x_range = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_profiles.tab", unpack=True, ) else: enc_causurf, enc_infsurf, enc_infnfw, x_range = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_profiles.tab", unpack=True, ) ENC_CAUSURF = vstack([ENC_CAUSURF, enc_causurf]) ENC_INFSURF = vstack([ENC_INFSURF, enc_infsurf]) ENC_INFNFW = vstack([ENC_INFNFW, enc_infnfw]) j += 1 # Fourth data file upload global ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D = ( [], [], [], [], [], [], [], [], [], ) j = 0 for m in range(halo_num): enc_r, enc_v, enc_mag, enc_gpx3d, enc_gpy3d, enc_gpz3d, enc_gvx3d, enc_gvy3d, enc_gvz3d = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_RVdata.tab", unpack=True, ) ENC_R.append(enc_r) ENC_V.append(enc_v) ENC_MAG.append(enc_mag) ENC_GPX3D.append(enc_gpx3d) ENC_GPY3D.append(enc_gpy3d) ENC_GPZ3D.append(enc_gpz3d) ENC_GVX3D.append(enc_gvx3d) ENC_GVY3D.append(enc_gvy3d) ENC_GVZ3D.append(enc_gvz3d) j += 1 ENC_R, ENC_V, ENC_MAG, ENC_GPX3D, ENC_GPY3D, ENC_GPZ3D, ENC_GVX3D, ENC_GVY3D, ENC_GVZ3D = ( array(ENC_R), array(ENC_V), array(ENC_MAG), array(ENC_GPX3D), array(ENC_GPY3D), array(ENC_GPZ3D), array(ENC_GVX3D), array(ENC_GVY3D), array(ENC_GVZ3D), ) # Fifth data file to upload global LINE_CAUSURF j = 0 for m in range(halo_num): if j == 0: line_prof = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_losprofile.tab", unpack=True, ) LINE_CAUSURF = array([line_prof[0:line_num]]) else: line_prof = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/halo_" + str(m) + "_losprofile.tab", unpack=True, ) line_causurf = array([line_prof[0:line_num]]) LINE_CAUSURF = vstack([LINE_CAUSURF, line_causurf]) j += 1 # Sixth data set upload (los rv data) if get_los == True: global LINE_R, LINE_V, LINE_MAG LINE_R, LINE_V, LINE_MAG = [], [], [] j = 0 for m in range(halo_num): line_r, line_v, line_mag = [], [], [] for l in range(line_num): r, v, mag = loadtxt( "" + root + "/nkern/Documents/MDB_milliMil_halodata/Caustic/stack_data/" + str(run_loc) + "/LOS_RV/halo_" + str(m) + "_los_" + str(l) + "_rv.tab", unpack=True, ) line_r.append(r) line_v.append(v) line_mag.append(mag) LINE_R.append(line_r) LINE_V.append(line_v) LINE_MAG.append(line_mag) LINE_R, LINE_V, LINE_MAG = array(LINE_R), array(LINE_V), array(LINE_MAG) # Other data arrays to use: global avg_mfrac, avg_hvdfrac, stack_mfrac, stack_hvdfrac, maLINE_CAUMASS, maLINE_VDISP global stack_mbias, stack_mscat, stack_vbias, stack_vscat, avg_mbias, avg_mscat, avg_vbias, avg_vscat maLINE_CAUMASS = ma.masked_array(LINE_CAUMASS, mask=LINE_CAUMASS == 0) # Mask 0 Values maLINE_VDISP = ma.masked_array(LINE_VDISP, mask=LINE_VDISP == 0) # Mask 0 Values ### Mass Fractions ### # Note: I was using map() as an iterator, but for N = 5, sometimes there are less than 3 non-masked values per los # Note: and biweight###() does not take less than 4 unique values. I don't yet know how to incorporate a "try:" # Note: statement into an iterator function like map(), so I resort to a "for" loop ## Ensemble fractions stack_mfrac = ma.log(ENC_CAUMASS / M_crit200) stack_hvdfrac = ma.log(ENC_VDISP / HVD) ## Averaged fractions a_size = halo_num # This becomes line_num if doing vertical average first!! avg_mfrac, avg_hvdfrac = zeros(a_size), zeros(a_size) for a in range(a_size): try: avg_mfrac[a] = astStats.biweightLocation(ma.copy(ma.log(maLINE_CAUMASS[a] / M_crit200[a])), 6.0) avg_hvdfrac[a] = astStats.biweightLocation(ma.copy(ma.log(maLINE_VDISP[a] / HVD[a])), 6.0) except: avg_mfrac[a] = ma.mean(ma.log(maLINE_CAUMASS[a] / M_crit200[a])) avg_hvdfrac[a] = ma.mean(ma.log(maLINE_VDISP[a] / M_crit200[a])) # Bias and Scatter for Ensemble and LOS Average Systems stack_mbias, stack_mscat = ( astStats.biweightLocation(ma.copy(stack_mfrac), 6.0), astStats.biweightScale(ma.copy(stack_mfrac), 9.0), ) avg_mbias, avg_mscat = ( astStats.biweightLocation(ma.copy(avg_mfrac), 6.0), astStats.biweightScale(ma.copy(avg_mfrac), 9.0), ) stack_vbias, stack_vscat = ( astStats.biweightLocation(ma.copy(stack_hvdfrac), 6.0), astStats.biweightScale(ma.copy(stack_hvdfrac), 9.0), ) avg_vbias, avg_vscat = ( astStats.biweightLocation(ma.copy(avg_hvdfrac), 6.0), astStats.biweightScale(ma.copy(avg_hvdfrac), 9.0), )
def stat_calc(self,MASS_EST,MASS_TRUE,HVD_EST,HVD_TRUE,data_set=None,ens=True): ''' Does bias and scatter calculations ''' # Cut data set if necessary if data_set == 'cut_low_mass': '''Cutting all 'true' mass estimates below 1e14 off''' cut = np.where(MASS_TRUE>1e14)[0] MASS_EST = MASS_EST[cut] MASS_TRUE = MASS_TRUE[cut] HVD_EST = HVD_EST[cut] HVD_TRUE = HVD_TRUE[cut] # Define a Masked array for sometimes zero terms epsilon = 10.0 use_est = False # Use MassCalc estimated r200 mass values if true maMASS_EST = ma.masked_array(MASS_EST,mask=MASS_EST<epsilon) # Mask essentially zero values maHVD_EST = ma.masked_array(HVD_EST,mask=HVD_EST<epsilon) # Mass / HVD Fractions if ens == True: # Ensemble Arrays MFRAC = np.log(maMASS_EST/MASS_TRUE) VFRAC = np.log(maHVD_EST/HVD_TRUE) else: # LOS Mass Fraction Arrays: 0th axis is halo number, 1st axis is line of sight number MFRAC,VFRAC = [],[] for a in range(len(MASS_EST)): MFRAC.append( ma.log( maMASS_EST[a]/MASS_TRUE[a] ) ) VFRAC.append( ma.log( maHVD_EST[a]/HVD_TRUE[a] ) ) MFRAC,VFRAC = np.array(MFRAC),np.array(VFRAC) if ens == True: mbias,mscat = astStats.biweightLocation(MFRAC,6.0),astStats.biweightScale(MFRAC,9.0) vbias,vscat = astStats.biweightLocation(VFRAC,6.0),astStats.biweightScale(VFRAC,9.0) return MFRAC,mbias,mscat,VFRAC,vbias,vscat else: if self.ss: # Create vertically averaged (by halo averaged) arrays, with line_num elements # biweightLocation takes only arrays with 4 or more elements HORZ_MFRAC,HORZ_VFRAC = [],[] VERT_MFRAC,VERT_VFRAC = [],[] for a in range(self.line_num): if len(ma.compressed(MFRAC[:,a])) > 4: VERT_MFRAC.append( astStats.biweightLocation( ma.compressed( MFRAC[:,a] ), 6.0 ) ) VERT_VFRAC.append( astStats.biweightLocation( ma.compressed( VFRAC[:,a] ), 6.0 ) ) else: VERT_MFRAC.append( np.median( ma.compressed( MFRAC[:,a] ) ) ) VERT_VFRAC.append( np.median( ma.compressed( VFRAC[:,a] ) ) ) VERT_MFRAC,VERT_VFRAC = np.array(VERT_MFRAC),np.array(VERT_VFRAC) # Create horizontally averaged (by line of sight) arrays, with halo_num elements for a in self.halo_range: if len(ma.compressed(MFRAC[a])) > 4: HORZ_MFRAC.append( astStats.biweightLocation( ma.compressed( MFRAC[a] ), 6.0 ) ) HORZ_VFRAC.append( astStats.biweightLocation( ma.compressed( VFRAC[a] ), 6.0 ) ) else: HORZ_MFRAC.append( np.median( ma.compressed( MFRAC[a] ) ) ) HORZ_VFRAC.append( np.median( ma.compressed( VFRAC[a] ) ) ) HORZ_MFRAC,HORZ_VFRAC = np.array(HORZ_MFRAC),np.array(HORZ_VFRAC) # Bias and Scatter Calculations mbias,mscat = astStats.biweightLocation(VERT_MFRAC,6.0),astStats.biweightScale(VERT_MFRAC,9.0) vbias,vscat = astStats.biweightLocation(VERT_VFRAC,6.0),astStats.biweightScale(VERT_VFRAC,9.0) else: # Bin stack LOS systems need only one average mbias,mscat = astStats.biweightLocation(MFRAC,6.0),astStats.biweightScale(MFRAC,9.0) vbias,vscat = astStats.biweightLocation(VFRAC,6.0),astStats.biweightScale(VFRAC,9.0) return MFRAC,mbias,mscat,VFRAC,vbias,vscat
mem_flag = 1 #This is a flag to alerting to (1) if members > 0 and (0) if not. Affects number output. #Get current galaxy info print 'GETTING GALAXIES' part_xpos,part_ypos,part_zpos,part_vx,part_vy,part_vz = G.get_particles(HaloID[i],H0) gal_p = np.array([part_xpos,part_ypos,part_zpos]) gal_v = np.array([part_vx,part_vy,part_vz]) #organize the current halo position and velocity Halo_P = np.array([Halo_PX[i],Halo_PY[i],Halo_PZ[i]]) #current halo position Halo_V = np.array([Halo_VX[i],Halo_VY[i],Halo_VZ[i]]) #current halo velocity #calculate the radial position of every particle we have loaded, as well as the total velocity. r_pos = np.sqrt((part_xpos-Halo_P[0])**2+(part_ypos-Halo_P[1])**2+(part_zpos-Halo_P[2])**2) #calculate vdisp of particles HVD = np.sqrt(astStats.biweightScale(part_vx[np.where(r_pos<=HaloR200[i])]-Halo_V[0],9.0)**2+astStats.biweightScale(part_vy[np.where(r_pos<=HaloR200[i])]-Halo_V[1],9.0)**2+astStats.biweightScale(part_vz[np.where(r_pos<=HaloR200[i])]-Halo_V[2],9.0)**2)/np.sqrt(3) #Define the filename(s) to output the results if use_mems == True: f = open('files/'+str(rich_lim)+'n/'+str(HaloID[i])+'.minimill_masses_'+str(rich_lim)+'n_mems.tab','w') elif use_vdisp == True: f = open('files/'+str(rich_lim)+'n/'+str(HaloID[i])+'.minimill_masses_'+str(rich_lim)+'n_vdisp_los_part.tab','w') else: f = open('files/'+str(rich_lim)+'n/'+str(HaloID[i])+'.minimill_masses_'+str(rich_lim)+'n_los_part.tab','w') line_mass = np.zeros(line_num) #empty array for different lines of sight masses vdispersion = np.zeros(line_num) #empty array for different lines of sight vdisp #line_error = np.zeros(line_num) #empty array for the errors above for j in range(line_num): #loop over different lines of sight #define r200 and limits to select our particles for the caustic estimation
gal_pos_vect = np.zeros((3,gal_dist.size)) code = """ int u,w; for (u=0;u<n;++u){ for(w=0;w<3;++w){ gal_pos_vect(w,u) = (gal_p(w,u)-new_pos(w))/gal_dist(u); } gal_vlos(u) = gal_pos_vect(0,u)*gal_v(0,u)+gal_pos_vect(1,u)*gal_v(1,u)+gal_pos_vect(2,u)*gal_v(2,u); } """ fast = weave.inline(code,['gal_pos_vect','n','gal_dist','gal_vlos','gal_v','new_pos','gal_p'],type_converters=converters.blitz,compiler='gcc') angles = np.arccos(np.dot(halo_pos_vect,gal_pos_vect)) ''' r = gal_radius#angles*halo_dist v = gal_velocity#gal_vlos-halo_vlos*np.dot(halo_pos_vect,gal_pos_vect) gal_vdisp3d[i] = np.sqrt(astStats.biweightScale(gal_v[0][np.where(gal_radius<=HaloR200[i])]-Halo_V[0],9.0)**2+astStats.biweightScale(gal_v[1][np.where(gal_radius<=HaloR200[i])]-Halo_V[1],9.0)**2+astStats.biweightScale(gal_v[2][np.where(gal_radius<=HaloR200[i])]-Halo_V[2],9.0)**2)/np.sqrt(3) #print 'MY VELOCITY OF GALAXIES', gal_vdisp3d[i] particle_vdisp3d[i] = HVD*np.sqrt(3) gal_rmag_new = gal_abs_rmag# + 5*np.log10(gal_dist*1e6/10.0) ''' rand_r200 = findr200(r,v,gal_rmag_new,angles,gal_lumdist,HaloAD[i],H0)*0.615#*1.1 vlimit = 3500 rlimit = rand_r200*1.25 ''' #import average beta profile and create a fit. Apply fit to your xvalues later in code xbeta,abeta = np.loadtxt('data/average_betaprofile.tab',dtype='float',usecols=(0,1),unpack=True) fit = np.polyfit((xbeta*rand_r200)[xbeta<4],abeta[xbeta<4],6) ################################### #End of line of sight calculations#
def __init__(self,data,gal_mags=None,gal_memberflag=None,clus_ra=None,clus_dec=None,clus_z=None,gal_r=None,gal_v=None,r200=None,clus_vdisp=None,rlimit=4.0,vlimit=3500,q=10.0,H0=100.0,xmax=6.0,ymax=5000.0,cut_sample=True,gapper=True,mirror=True,absflag=False): self.clus_ra = clus_ra self.clus_dec = clus_dec self.clus_z = clus_z if gal_r == None: if self.clus_ra == None: #calculate average ra from galaxies self.clus_ra = np.average(data[:,0]) if self.clus_dec == None: #calculate average dec from galaxies self.clus_dec = np.average(data[:,1]) #Reduce data set to only valid redshifts data_spec = data[np.where((np.isfinite(data[:,2])) & (data[:,2] > 0.0) & (data[:,2] < 5.0))] if self.clus_z == None: #calculate average z from galaxies self.clus_z = np.average(data_spec[:,2]) #calculate angular diameter distance. #Variable self.ang_d self.ang_d,self.lum_d = self.zdistance(self.clus_z,H0) #calculate the spherical angles of galaxies from cluster center. #Variable self.angle self.angle = self.findangle(data_spec[:,0],data_spec[:,1],self.clus_ra,self.clus_dec) self.r = self.angle*self.ang_d self.v = c*(data_spec[:,2] - self.clus_z)/(1+self.clus_z) else: data_spec = data[np.where(np.isfinite(gal_v))] self.r = gal_r self.v = gal_v #package galaxy data, USE ASTROPY TABLE HERE!!!!! if gal_memberflag is None: self.data_table = np.vstack((self.r,self.v,data_spec.T)).T else: self.data_table = np.vstack((self.r,self.v,data_spec.T,gal_memberflag)).T #reduce sample within limits if cut_sample == True: self.data_set = self.set_sample(self.data_table,rlimit=rlimit,vlimit=vlimit) else: self.data_set = self.data_table #further select sample via shifting gapper if gapper == True: self.data_set = self.shiftgapper(self.data_set) print 'DATA SET SIZE',self.data_set[:,0].size ''' #tries to identify double groups that slip through the gapper process upper_max = np.max(self.data_set[:,1][np.where((self.data_set[:,1]>0.0)&(self.data_set[:,0]<1.0))]) lower_max = np.min(self.data_set[:,1][np.where((self.data_set[:,1]<0.0)&(self.data_set[:,0]<1.0))]) if np.max(np.array([upper_max,-lower_max])) > 1000.0+np.min(np.array([upper_max,-lower_max])): self.data_set = self.data_set[np.where(np.abs(self.data_set[:,1])<1000.0+np.min(np.array([upper_max,-lower_max])))] ''' if absflag: abs_mag = self.data_table[:,5] else: abs_mag = self.data_table[:,7] - magnitudes.distance_modulus(self.clus_z,**fidcosmo) self.Ngal_1mpc = self.r[np.where((abs_mag < -20.55) & (self.r < 1.0) & (np.abs(self.v) < 3500))].size if r200 == None: self.r200 = 0.01*self.Ngal_1mpc+0.584#+np.random.normal(0,0.099) #vdisp_prelim = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<3.0)],9.0) #r200_mean_prelim = 0.002*vdisp_prelim + 0.40 #self.r200 = r200_mean_prelim/1.7 ''' #original r200 est rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_1 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_2 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] if vdisp_prelim_2 < 0.6*vdisp_prelim_1: vdisp_prelim = vdisp_prelim_2 else: vdisp_prelim = vdisp_prelim_1 r200_mean_prelim = 0.002*vdisp_prelim + 0.40 self.r200 = r200_mean_prelim/1.7 ''' if self.r200 > 3.0: self.r200 = 3.0 if 3.0*self.r200 < 6.0: rlimit = 3.0*self.r200 else: rlimit = 5.5 else: self.r200 = r200 print 'Pre_r200=',self.r200 if mirror == True: print 'Calculating Density w/Mirrored Data' self.gaussian_kernel(np.append(self.data_set[:,0],self.data_set[:,0]),np.append(self.data_set[:,1],-self.data_set[:,1]),self.r200,normalization=H0,scale=q,xmax=xmax,ymax=ymax,xres=200,yres=220) else: print 'Calculating Density' self.gaussian_kernel(self.data_set[:,0],self.data_set[:,1],self.r200,normalization=H0,scale=q,xmax=xmax,ymax=ymax,xres=200,yres=220) self.img_tot = self.img/np.max(np.abs(self.img)) self.img_grad_tot = self.img_grad/np.max(np.abs(self.img_grad)) self.img_inf_tot = self.img_inf/np.max(np.abs(self.img_inf)) if clus_vdisp is None: self.pre_vdisp = 9.15*self.Ngal_1mpc+350.32 print 'Pre_vdisp=',self.pre_vdisp print 'Ngal<1Mpc=',self.Ngal_1mpc v_cut = self.data_set[:,1][np.where((self.data_set[:,0]<self.r200) & (np.abs(self.data_set[:,1])<5000.0))] try: self.pre_vdisp2 = astStats.biweightScale(v_cut,9.0) except: self.pre_vdisp2 = np.std(v_cut,ddof=1) print 'Vdisp from galaxies=',self.pre_vdisp2 if self.data_set[:,0].size < 15: self.v_unc = 0.35 self.c_unc_sys = 0.75 self.c_unc_int = 0.35 elif self.data_set[:,0].size < 25 and self.data_set[:,0].size >= 15: self.v_unc = 0.30 self.c_unc_sys = 0.55 self.c_unc_int = 0.22 elif self.data_set[:,0].size < 50 and self.data_set[:,0].size >= 25: self.v_unc = 0.23 self.c_unc_sys = 0.42 self.c_unc_int = 0.16 elif self.data_set[:,0].size < 100 and self.data_set[:,0].size >= 50: self.v_unc = 0.18 self.c_unc_sys = 0.34 self.c_unc_int = 0.105 else: self.v_unc = 0.15 self.c_unc_sys = 0.29 self.c_unc_int = 0.09 if self.pre_vdisp2 > 1.75*self.pre_vdisp: self.pre_vdisp_comb = 9.15*self.Ngal_1mpc+450.32 else: self.pre_vdisp_comb = self.pre_vdisp2 ''' if self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)].size >= 10: self.pre_vdisp_comb = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],9.0) else: self.pre_vdisp_comb = np.std(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],ddof=1) #self.pre_vdisp_comb = (self.pre_vdisp*(self.pre_vdisp2*self.v_unc)**2+self.pre_vdisp2*118.14**2)/(118.14**2+(self.pre_vdisp2*self.v_unc)**2) ''' else: self.pre_vdisp_comb = clus_vdisp print 'Combined Vdisp=',self.pre_vdisp_comb self.beta = 0.5*self.x_range/(self.x_range + self.r200/4.0) #Identify initial caustic surface and members within the surface print 'Calculating initial surface' if gal_memberflag is None: self.Caustics = CausticSurface(self.data_set,self.x_range,self.y_range,self.img_tot,r200=self.r200,halo_vdisp=self.pre_vdisp_comb,beta=None) else: self.Caustics = CausticSurface(self.data_set,self.x_range,self.y_range,self.img_tot,memberflags=self.data_set[:,-1],r200=self.r200) self.caustic_profile = self.Caustics.Ar_finalD self.caustic_fit = self.Caustics.vesc_fit self.gal_vdisp = self.Caustics.gal_vdisp self.memflag = self.Caustics.memflag #Estimate the mass based off the caustic profile, beta profile (if given), and concentration (if given) if clus_z is not None: #self.Mass = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) #self.Mass2 = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.Mass = MassCalc(self.x_range,self.caustic_fit,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) self.Mass2 = MassCalc(self.x_range,self.caustic_fit,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.r200_est = self.Mass.r200_est self.r200_est_fbeta = self.Mass2.r200_est self.M200_est = self.Mass.M200_est self.M200_est_fbeta = self.Mass2.M200_est print 'r200 estimate: ',self.Mass.r200_est print 'M200 estimate: ',self.Mass.M200_est self.Ngal = self.data_set[np.where((self.memflag==1)&(self.data_set[:,0]<=self.r200_est))].shape[0] #calculate velocity dispersion try: self.vdisp_gal = astStats.biweightScale(self.data_set[:,1][self.memflag==1],9.0) except: try: self.vdisp_gal = np.std(self.data_set[:,1][self.memflag==1],ddof=1) except: self.vdisp_gal = 0.0 '''
def __init__(self, data, ri, vi, Zi, memberflags=None, r200=2.0, maxv=5000.0, halo_scale_radius=None, halo_scale_radius_e=0.01, halo_vdisp=None, bin=None, plotphase=False, beta=None): kappaguess = np.max(Zi) #first guess at the level self.levels = np.linspace( 0.00001, kappaguess, 100)[::-1] #create levels (kappas) to try out fitting_radii = np.where( (ri >= r200 / 3.0) & (ri <= r200) ) #when fitting an NFW (later), this defines the r range to fit within self.r200 = r200 if halo_scale_radius is None: self.halo_scale_radius = self.r200 / 5.0 else: self.halo_scale_radius = halo_scale_radius self.halo_scale_radius_e = halo_scale_radius_e if beta is None: self.beta = 0.2 + np.zeros(ri.size) else: self.beta = beta self.gb = (3 - 2.0 * self.beta) / (1 - self.beta) #Calculate velocity dispersion with either members, fed value, or estimate using 3.5sigma clipping if memberflags is not None: vvarcal = data[:, 1][np.where(memberflags == 1)] try: self.gal_vdisp = astStats.biweightScale( vvarcal[np.where(np.isfinite(vvarcal))], 9.0) print 'O ya! membership calculation!' except: self.gal_vdisp = np.std(vvarcal, ddof=1) self.vvar = self.gal_vdisp**2 elif halo_vdisp is not None: self.gal_vdisp = halo_vdisp self.vvar = self.gal_vdisp**2 else: #Variable self.gal_vdisp try: self.findvdisp(data[:, 0], data[:, 1], r200, maxv) except: self.gal_vdisp = np.std( data[:, 1][np.where((data[:, 0] < r200) & (np.abs(data[:, 1]) < maxv))], ddof=1) self.vvar = self.gal_vdisp**2 #initilize arrays self.vesc = np.zeros(self.levels.size) Ar_final_opt = np.zeros( (self.levels.size, ri[np.where((ri < r200) & (ri >= 0))].size)) #find the escape velocity for all level (kappa) guesses for i in range(self.vesc.size): self.vesc[i], Ar_final_opt[i] = self.findvesc( self.levels[i], ri, vi, Zi, r200) #difference equation to search for minimum value self.skr = (self.vesc - 4.0 * self.vvar)**2 try: self.level_elem = np.where( self.skr == np.min(self.skr[np.isfinite(self.skr)]))[0][0] self.level_final = self.levels[self.level_elem] self.Ar_finalD = np.zeros(ri.size) for k in range(self.Ar_finalD.size): self.Ar_finalD[k] = self.findAofr(self.level_final, Zi[k], vi) if k != 0: self.Ar_finalD[k] = self.restrict_gradient2( np.abs(self.Ar_finalD[k - 1]), np.abs(self.Ar_finalD[k]), ri[k - 1], ri[k]) #This exception occurs if self.skr is entirely NAN. A flag should be raised for this in the output table except ValueError: self.Ar_finalD = np.zeros(ri.size) #fit an NFW to the resulting caustic profile. self.NFWfit( ri[fitting_radii], self.Ar_finalD[fitting_radii] * np.sqrt(self.gb[fitting_radii]), self.halo_scale_radius, ri, self.gb) #self.NFWfit(ri[fitting_radii],self.Ar_finalD[fitting_radii],self.halo_scale_radius,ri,self.gb) plotphase = False if plotphase == True: s = figure() ax = s.add_subplot(111) ax.plot(data[:, 0], data[:, 1], 'k.') for t in range(Ar_final_opt.shape[0]): ax.plot(ri[:Ar_final_opt[t].size], Ar_final_opt[t], c='0.4', alpha=0.5) ax.plot(ri[:Ar_final_opt[t].size], -Ar_final_opt[t], c='0.4', alpha=0.5) ax.plot(ri, self.Ar_finalD, c='blue') ax.plot(ri, -self.Ar_finalD, c='blue') ax.set_ylim(-3500, 3500) s.savefig('/nfs/christoq_ls/giffordw/plotphase.png') close() ##Output galaxy membership kpc2km = 3.09e16 try: fitfunc = lambda x, a, b: np.sqrt(2 * 4 * np.pi * 6.67e-20 * a * ( b * kpc2km)**2 * np.log(1 + x / b) / (x / b)) popt, pcov = curve_fit(fitfunc, ri, self.Ar_finalD) self.Arfit = fitfunc(ri, popt[0], popt[1]) except: fitfunc = lambda x, a: np.sqrt(2 * 4 * np.pi * 6.67e-20 * a * ( 30.0 * kpc2km)**2 * np.log(1 + x / 30.0) / (x / 30.0)) popt, pcov = curve_fit(fitfunc, ri, self.Ar_finalD) self.Arfit = fitfunc(ri, popt[0]) self.memflag = np.zeros(data.shape[0]) #fcomp = interp1d(ri,self.Ar_finalD) # print ri.size, self.vesc_fit.size fcomp = interp1d(ri, self.vesc_fit) for k in range(self.memflag.size): vcompare = fcomp(data[k, 0]) if np.abs(vcompare) >= np.abs(data[k, 1]): self.memflag[k] = 1
def self_stack_clusters(self,HaloID,HaloData,Halo_P,Halo_V,Gal_P,Gal_V,G_Mags,R_Mags,I_Mags,k,j): ''' Building Ensemble Cluster and Calculating Property Statistics ''' ## Unpack HaloData array M_crit200,R_crit200,Z,SRAD,ESRAD,HVD = HaloData ## Define Arrays for Building Ensemble and LOS # Ensemble Arrays: [Successive Ensemble Number][Data] # Line of Sight Arrays: [Line Of Sight][Data] ens_r,ens_v,ens_gmags,ens_rmags,ens_imags,ens_hvd = [],[],[],[],[],[] ens_caumass, ens_caumass_est, ens_causurf, ens_nfwsurf = [], [], [], [] los_r,los_v,los_gmags,los_rmags,los_imags,los_hvd = [],[],[],[],[],[] los_caumass, los_caumass_est, los_causurf, los_nfwsurf = [], [], [], [] sample_size,pro_pos = [],[] ## Loop over lines of sight for l in range(self.line_num): if self.light_cone == True: # Configure RA, DEC and Z into cluster-centric radius and velocity pass else: # Line of Sight Calculation for naturally 3D data r, v, projected_pos = self.U.line_of_sight(Gal_P[j],Gal_V[j],Halo_P[k],Halo_V[k],k) # Limit Data in Phase Space r,v,gmags,rmags,imags,samp_size = self.U.limit_gals(r,v,G_Mags[j],R_Mags[j],I_Mags[j],R_crit200[k],HVD[k]) # Build LOS and Ensemble, with given method of stacking en_r,en_v,en_gmags,en_rmags,en_imags,ln_r,ln_v,ln_gmags,ln_rmags,ln_imags = self.S.build_ensemble(r,v,gmags,rmags,imags,HaloData.T[k],l) # Build Ensemble Arrays ens_r.extend(en_r) ens_v.extend(en_v) ens_gmags.extend(en_gmags) ens_rmags.extend(en_rmags) ens_imags.extend(en_imags) # Calculate LOS HVD (this is after shiftgapper) if run_los == True if self.run_los == True: ln_within = np.where(ln_r<R_crit200[k])[0] gal_count = len(ln_within) if gal_count <= 3: '''biweightScale can't take less than 4 elements''' # Calculate hvd with numpy std of galaxies within r200 (b/c this is quoted richness) ln_hvd = np.std(np.copy(ln_v)[ln_within]) else: # Calculate hvd with astStats biweightScale (see Beers 1990) try: ln_hvd = astStats.biweightScale(np.copy(ln_v)[ln_within],9.0) # Sometimes divide by zero error in biweight function for low gal_num except ZeroDivisionError: print 'ZeroDivisionError in biweightfunction, line 140 in caustic_class_stack2D' print 'ln_v[ln_within]=',ln_v[ln_within] ln_hvd = np.std(np.copy(ln_v)[ln_within]) else: ln_hvd = [] # Run Caustic Technique for LOS mass estimation if run_los == True if self.run_los == True: ln_caumass,ln_caumass_est,ln_causurf,ln_nfwsurf = self.S.kernel_caustic_masscalc(ln_r,ln_v,HaloData.T[k],np.zeros(2),ln_hvd,k,l) print 'j = '+str(j)+', k = '+str(k)+', l = '+str(l) else: ln_caumass,ln_caumass_est,ln_causurf,ln_nfwsurf = [],[],[],[] # Append LOS Data Arrays los_r.append(ln_r) los_v.append(ln_v) los_gmags.append(ln_gmags) los_rmags.append(ln_rmags) los_imags.append(ln_imags) los_hvd.append(ln_hvd) los_caumass.append(ln_caumass) los_caumass_est.append(ln_caumass_est) los_causurf.append(ln_causurf) los_nfwsurf.append(ln_nfwsurf) sample_size.append(samp_size) pro_pos.append(projected_pos) # Shiftgapper for Ensemble Interloper treatment ens_r,ens_v,ens_gmags,ens_rmags,ens_imags = self.C.shiftgapper(np.vstack([ens_r,ens_v,ens_gmags,ens_rmags,ens_imags]).T).T # Sort by R_Mag sort = np.argsort(ens_rmags) ens_r,ens_v,ens_gmags,ens_rmags,ens_imags = ens_r[sort],ens_v[sort],ens_gmags[sort],ens_rmags[sort],ens_imags[sort] # Reduce system to gal_num richness within r200 within = np.where(ens_r <= R_crit200[k])[0] end = within[:self.gal_num*self.line_num + 1][-1] ens_r = ens_r[:end] ens_v = ens_v[:end] ens_gmags = ens_gmags[:end] ens_rmags = ens_rmags[:end] ens_imags = ens_imags[:end] # Calculate HVD en_hvd = astStats.biweightScale(np.copy(ens_v)[np.where(ens_r<=R_crit200[k])],9.0) # Run Caustic Technique! en_caumass,en_caumass_est,en_causurf,en_nfwsurf = self.S.kernel_caustic_masscalc(ens_r,ens_v,HaloData.T[k],np.zeros(2),en_hvd,k) # Append Ensemble Data Arrays ens_hvd.append(en_hvd) ens_caumass.append(en_caumass) ens_caumass_est.append(en_caumass_est) # Turn into numpy arrays ens_r,ens_v,ens_gmags,ens_rmags,ens_imags = np.array(ens_r),np.array(ens_v),np.array(ens_gmags),np.array(ens_rmags),np.array(ens_imags) ens_hvd,ens_caumass,ens_caumass_est = np.array(ens_hvd),np.array(ens_caumass),np.array(ens_caumass_est) ens_causurf,ens_nfwsurf = np.array(en_causurf),np.array(en_nfwsurf) los_r,los_v,los_gmags,los_rmags,los_imags = np.array(los_r),np.array(los_v),np.array(los_gmags),np.array(los_rmags),np.array(los_imags) los_hvd,los_caumass,los_caumass_est = np.array(los_hvd),np.array(los_caumass),np.array(los_caumass_est) los_causurf,los_nfwsurf = np.array(los_causurf),np.array(los_nfwsurf) sample_size,pro_pos = np.array(sample_size),np.array(pro_pos) return ens_r,ens_v,ens_gmags,ens_rmags,ens_imags,ens_hvd,ens_caumass,ens_caumass_est,ens_causurf,ens_nfwsurf,los_r,los_v,los_gmags,los_rmags,los_imags,los_hvd,los_caumass,los_caumass_est,los_causurf,los_nfwsurf,self.C.x_range,sample_size,pro_pos