#!/opt/local/bin/python
def run_caustic(self, data, gal_mags=None, gal_memberflag=None, clus_ra=None, clus_dec=None, clus_z=None, gal_r=None, gal_v=None, r200=None, clus_vdisp=None, rlimit=4.0, vlimit=3500, q=10.0, H0=100.0, xmax=6.0, ymax=5000.0, cut_sample=True, gapper=True, mirror=True, absflag=False): self.clus_ra = clus_ra self.clus_dec = clus_dec self.clus_z = clus_z if gal_r == None: if self.clus_ra == None: #calculate average ra from galaxies self.clus_ra = np.average(data[:, 0]) if self.clus_dec == None: #calculate average dec from galaxies self.clus_dec = np.average(data[:, 1]) #Reduce data set to only valid redshifts data_spec = data[np.where((np.isfinite(data[:, 2])) & (data[:, 2] > 0.0) & (data[:, 2] < 5.0))] if self.clus_z == None: #calculate average z from galaxies self.clus_z = np.average(data_spec[:, 2]) #calculate angular diameter distance. #Variable self.ang_d self.ang_d, self.lum_d = self.zdistance(self.clus_z, H0) #calculate the spherical angles of galaxies from cluster center. #Variable self.angle self.angle = self.findangle(data_spec[:, 0], data_spec[:, 1], self.clus_ra, self.clus_dec) self.r = self.angle * self.ang_d self.v = c * (data_spec[:, 2] - self.clus_z) / (1 + self.clus_z) else: data_spec = data[np.where(np.isfinite(gal_v))] self.r = gal_r self.v = gal_v #package galaxy data, USE ASTROPY TABLE HERE!!!!! if gal_memberflag is None: self.data_table = np.vstack((self.r, self.v, data_spec.T)).T else: self.data_table = np.vstack( (self.r, self.v, data_spec.T, gal_memberflag)).T #reduce sample within limits if cut_sample == True: self.data_set = self.set_sample(self.data_table, rlimit=rlimit, vlimit=vlimit) else: self.data_set = self.data_table #further select sample via shifting gapper if gapper == True: self.data_set = self.shiftgapper(self.data_set) print 'DATA SET SIZE', self.data_set[:, 0].size ''' #tries to identify double groups that slip through the gapper process upper_max = np.max(self.data_set[:,1][np.where((self.data_set[:,1]>0.0)&(self.data_set[:,0]<1.0))]) lower_max = np.min(self.data_set[:,1][np.where((self.data_set[:,1]<0.0)&(self.data_set[:,0]<1.0))]) if np.max(np.array([upper_max,-lower_max])) > 1000.0+np.min(np.array([upper_max,-lower_max])): self.data_set = self.data_set[np.where(np.abs(self.data_set[:,1])<1000.0+np.min(np.array([upper_max,-lower_max])))] ''' if absflag: abs_mag = self.data_table[:, 5] else: abs_mag = self.data_table[:, 7] - magnitudes.distance_modulus( self.clus_z, **fidcosmo) self.Ngal_1mpc = self.r[np.where((abs_mag < -20.55) & (self.r < 1.0) & (np.abs(self.v) < 3500))].size if r200 == None: self.r200 = 0.01 * self.Ngal_1mpc + 0.584 #+np.random.normal(0,0.099) #vdisp_prelim = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<3.0)],9.0) #r200_mean_prelim = 0.002*vdisp_prelim + 0.40 #self.r200 = r200_mean_prelim/1.7 ''' #original r200 est rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_1 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_2 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] if vdisp_prelim_2 < 0.6*vdisp_prelim_1: vdisp_prelim = vdisp_prelim_2 else: vdisp_prelim = vdisp_prelim_1 r200_mean_prelim = 0.002*vdisp_prelim + 0.40 self.r200 = r200_mean_prelim/1.7 ''' if self.r200 > 3.0: self.r200 = 3.0 if 3.0 * self.r200 < 6.0: rlimit = 3.0 * self.r200 else: rlimit = 5.5 else: self.r200 = r200 print 'Pre_r200=', self.r200 if mirror == True: print 'Calculating Density w/Mirrored Data' self.gaussian_kernel(np.append(self.data_set[:, 0], self.data_set[:, 0]), np.append(self.data_set[:, 1], -self.data_set[:, 1]), self.r200, normalization=H0, scale=q, xmax=xmax, ymax=ymax, xres=200, yres=220) else: print 'Calculating Density' self.gaussian_kernel(self.data_set[:, 0], self.data_set[:, 1], self.r200, normalization=H0, scale=q, xmax=xmax, ymax=ymax, xres=200, yres=220) self.img_tot = self.img / np.max(np.abs(self.img)) self.img_grad_tot = self.img_grad / np.max(np.abs(self.img_grad)) self.img_inf_tot = self.img_inf / np.max(np.abs(self.img_inf)) if clus_vdisp is None: self.pre_vdisp = 9.15 * self.Ngal_1mpc + 350.32 print 'Pre_vdisp=', self.pre_vdisp print 'Ngal<1Mpc=', self.Ngal_1mpc v_cut = self.data_set[:, 1][ np.where((self.data_set[:, 0] < self.r200) & (np.abs(self.data_set[:, 1]) < 5000.0))] try: self.pre_vdisp2 = astStats.biweightScale(v_cut, 9.0) except: self.pre_vdisp2 = np.std(v_cut, ddof=1) print 'Vdisp from galaxies=', self.pre_vdisp2 if self.data_set[:, 0].size < 15: self.v_unc = 0.35 self.c_unc_sys = 0.75 self.c_unc_int = 0.35 elif self.data_set[:, 0].size < 25 and self.data_set[:, 0].size >= 15: self.v_unc = 0.30 self.c_unc_sys = 0.55 self.c_unc_int = 0.22 elif self.data_set[:, 0].size < 50 and self.data_set[:, 0].size >= 25: self.v_unc = 0.23 self.c_unc_sys = 0.42 self.c_unc_int = 0.16 elif self.data_set[:, 0].size < 100 and self.data_set[:, 0].size >= 50: self.v_unc = 0.18 self.c_unc_sys = 0.34 self.c_unc_int = 0.105 else: self.v_unc = 0.15 self.c_unc_sys = 0.29 self.c_unc_int = 0.09 if self.pre_vdisp2 > 1.75 * self.pre_vdisp: self.pre_vdisp_comb = 9.15 * self.Ngal_1mpc + 450.32 else: self.pre_vdisp_comb = self.pre_vdisp2 ''' if self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)].size >= 10: self.pre_vdisp_comb = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],9.0) else: self.pre_vdisp_comb = np.std(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],ddof=1) #self.pre_vdisp_comb = (self.pre_vdisp*(self.pre_vdisp2*self.v_unc)**2+self.pre_vdisp2*118.14**2)/(118.14**2+(self.pre_vdisp2*self.v_unc)**2) ''' else: self.pre_vdisp_comb = clus_vdisp print 'Combined Vdisp=', self.pre_vdisp_comb self.beta = 0.5 * self.x_range / (self.x_range + self.r200 / 4.0) #Identify initial caustic surface and members within the surface print 'Calculating initial surface' if gal_memberflag is None: self.Caustics = CausticSurface(self.data_set, self.x_range, self.y_range, self.img_tot, r200=self.r200, halo_vdisp=self.pre_vdisp_comb, beta=None) else: self.Caustics = CausticSurface(self.data_set, self.x_range, self.y_range, self.img_tot, memberflags=self.data_set[:, -1], r200=self.r200) self.caustic_profile = self.Caustics.Ar_finalD self.caustic_fit = self.Caustics.vesc_fit self.gal_vdisp = self.Caustics.gal_vdisp self.memflag = self.Caustics.memflag #Estimate the mass based off the caustic profile, beta profile (if given), and concentration (if given) if clus_z is not None: #self.Mass = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) #self.Mass2 = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.Mass = MassCalc(self.x_range, self.caustic_fit, self.gal_vdisp, self.clus_z, r200=self.r200, fbr=None, H0=H0) self.Mass2 = MassCalc(self.x_range, self.caustic_fit, self.gal_vdisp, self.clus_z, r200=self.r200, fbr=0.65, H0=H0) self.r200_est = self.Mass.r200_est self.r200_est_fbeta = self.Mass2.r200_est self.M200_est = self.Mass.M200_est self.M200_est_fbeta = self.Mass2.M200_est print 'r200 estimate: ', self.Mass.r200_est print 'M200 estimate: ', self.Mass.M200_est self.Ngal = self.data_set[np.where((self.memflag == 1) & ( self.data_set[:, 0] <= self.r200_est))].shape[0] #calculate velocity dispersion try: self.vdisp_gal = astStats.biweightScale( self.data_set[:, 1][self.memflag == 1], 9.0) except: try: self.vdisp_gal = np.std(self.data_set[:, 1][self.memflag == 1], ddof=1) except: self.vdisp_gal = 0.0 '''
def __init__(self,data,gal_mags=None,gal_memberflag=None,clus_ra=None,clus_dec=None,clus_z=None,gal_r=None,gal_v=None,r200=None,clus_vdisp=None,rlimit=4.0,vlimit=3500,q=10.0,H0=100.0,xmax=6.0,ymax=5000.0,cut_sample=True,gapper=True,mirror=True,absflag=False): self.clus_ra = clus_ra self.clus_dec = clus_dec self.clus_z = clus_z if gal_r == None: if self.clus_ra == None: #calculate average ra from galaxies self.clus_ra = np.average(data[:,0]) if self.clus_dec == None: #calculate average dec from galaxies self.clus_dec = np.average(data[:,1]) #Reduce data set to only valid redshifts data_spec = data[np.where((np.isfinite(data[:,2])) & (data[:,2] > 0.0) & (data[:,2] < 5.0))] if self.clus_z == None: #calculate average z from galaxies self.clus_z = np.average(data_spec[:,2]) #calculate angular diameter distance. #Variable self.ang_d self.ang_d,self.lum_d = self.zdistance(self.clus_z,H0) #calculate the spherical angles of galaxies from cluster center. #Variable self.angle self.angle = self.findangle(data_spec[:,0],data_spec[:,1],self.clus_ra,self.clus_dec) self.r = self.angle*self.ang_d self.v = c*(data_spec[:,2] - self.clus_z)/(1+self.clus_z) else: data_spec = data[np.where(np.isfinite(gal_v))] self.r = gal_r self.v = gal_v #package galaxy data, USE ASTROPY TABLE HERE!!!!! if gal_memberflag is None: self.data_table = np.vstack((self.r,self.v,data_spec.T)).T else: self.data_table = np.vstack((self.r,self.v,data_spec.T,gal_memberflag)).T #reduce sample within limits if cut_sample == True: self.data_set = self.set_sample(self.data_table,rlimit=rlimit,vlimit=vlimit) else: self.data_set = self.data_table #further select sample via shifting gapper if gapper == True: self.data_set = self.shiftgapper(self.data_set) print 'DATA SET SIZE',self.data_set[:,0].size ''' #tries to identify double groups that slip through the gapper process upper_max = np.max(self.data_set[:,1][np.where((self.data_set[:,1]>0.0)&(self.data_set[:,0]<1.0))]) lower_max = np.min(self.data_set[:,1][np.where((self.data_set[:,1]<0.0)&(self.data_set[:,0]<1.0))]) if np.max(np.array([upper_max,-lower_max])) > 1000.0+np.min(np.array([upper_max,-lower_max])): self.data_set = self.data_set[np.where(np.abs(self.data_set[:,1])<1000.0+np.min(np.array([upper_max,-lower_max])))] ''' if absflag: abs_mag = self.data_table[:,5] else: abs_mag = self.data_table[:,7] - magnitudes.distance_modulus(self.clus_z,**fidcosmo) self.Ngal_1mpc = self.r[np.where((abs_mag < -20.55) & (self.r < 1.0) & (np.abs(self.v) < 3500))].size if r200 == None: self.r200 = 0.01*self.Ngal_1mpc+0.584#+np.random.normal(0,0.099) #vdisp_prelim = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<3.0)],9.0) #r200_mean_prelim = 0.002*vdisp_prelim + 0.40 #self.r200 = r200_mean_prelim/1.7 ''' #original r200 est rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<3.0) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_1 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] rclip,vclip = self.shiftgapper(np.vstack((self.r[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))],self.v[np.where((self.r<1.5) & (np.abs(self.v)<3500.0))])).T).T vdisp_prelim_2 = astStats.biweightClipped(vclip,9.0,3.0)['biweightScale'] if vdisp_prelim_2 < 0.6*vdisp_prelim_1: vdisp_prelim = vdisp_prelim_2 else: vdisp_prelim = vdisp_prelim_1 r200_mean_prelim = 0.002*vdisp_prelim + 0.40 self.r200 = r200_mean_prelim/1.7 ''' if self.r200 > 3.0: self.r200 = 3.0 if 3.0*self.r200 < 6.0: rlimit = 3.0*self.r200 else: rlimit = 5.5 else: self.r200 = r200 print 'Pre_r200=',self.r200 if mirror == True: print 'Calculating Density w/Mirrored Data' self.gaussian_kernel(np.append(self.data_set[:,0],self.data_set[:,0]),np.append(self.data_set[:,1],-self.data_set[:,1]),self.r200,normalization=H0,scale=q,xmax=xmax,ymax=ymax,xres=200,yres=220) else: print 'Calculating Density' self.gaussian_kernel(self.data_set[:,0],self.data_set[:,1],self.r200,normalization=H0,scale=q,xmax=xmax,ymax=ymax,xres=200,yres=220) self.img_tot = self.img/np.max(np.abs(self.img)) self.img_grad_tot = self.img_grad/np.max(np.abs(self.img_grad)) self.img_inf_tot = self.img_inf/np.max(np.abs(self.img_inf)) if clus_vdisp is None: self.pre_vdisp = 9.15*self.Ngal_1mpc+350.32 print 'Pre_vdisp=',self.pre_vdisp print 'Ngal<1Mpc=',self.Ngal_1mpc v_cut = self.data_set[:,1][np.where((self.data_set[:,0]<self.r200) & (np.abs(self.data_set[:,1])<5000.0))] try: self.pre_vdisp2 = astStats.biweightScale(v_cut,9.0) except: self.pre_vdisp2 = np.std(v_cut,ddof=1) print 'Vdisp from galaxies=',self.pre_vdisp2 if self.data_set[:,0].size < 15: self.v_unc = 0.35 self.c_unc_sys = 0.75 self.c_unc_int = 0.35 elif self.data_set[:,0].size < 25 and self.data_set[:,0].size >= 15: self.v_unc = 0.30 self.c_unc_sys = 0.55 self.c_unc_int = 0.22 elif self.data_set[:,0].size < 50 and self.data_set[:,0].size >= 25: self.v_unc = 0.23 self.c_unc_sys = 0.42 self.c_unc_int = 0.16 elif self.data_set[:,0].size < 100 and self.data_set[:,0].size >= 50: self.v_unc = 0.18 self.c_unc_sys = 0.34 self.c_unc_int = 0.105 else: self.v_unc = 0.15 self.c_unc_sys = 0.29 self.c_unc_int = 0.09 if self.pre_vdisp2 > 1.75*self.pre_vdisp: self.pre_vdisp_comb = 9.15*self.Ngal_1mpc+450.32 else: self.pre_vdisp_comb = self.pre_vdisp2 ''' if self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)].size >= 10: self.pre_vdisp_comb = astStats.biweightScale(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],9.0) else: self.pre_vdisp_comb = np.std(self.data_set[:,1][np.where(self.data_set[:,0]<self.r200)],ddof=1) #self.pre_vdisp_comb = (self.pre_vdisp*(self.pre_vdisp2*self.v_unc)**2+self.pre_vdisp2*118.14**2)/(118.14**2+(self.pre_vdisp2*self.v_unc)**2) ''' else: self.pre_vdisp_comb = clus_vdisp print 'Combined Vdisp=',self.pre_vdisp_comb self.beta = 0.5*self.x_range/(self.x_range + self.r200/4.0) #Identify initial caustic surface and members within the surface print 'Calculating initial surface' if gal_memberflag is None: self.Caustics = CausticSurface(self.data_set,self.x_range,self.y_range,self.img_tot,r200=self.r200,halo_vdisp=self.pre_vdisp_comb,beta=None) else: self.Caustics = CausticSurface(self.data_set,self.x_range,self.y_range,self.img_tot,memberflags=self.data_set[:,-1],r200=self.r200) self.caustic_profile = self.Caustics.Ar_finalD self.caustic_fit = self.Caustics.vesc_fit self.gal_vdisp = self.Caustics.gal_vdisp self.memflag = self.Caustics.memflag #Estimate the mass based off the caustic profile, beta profile (if given), and concentration (if given) if clus_z is not None: #self.Mass = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) #self.Mass2 = MassCalc(self.x_range,self.caustic_profile,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.Mass = MassCalc(self.x_range,self.caustic_fit,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=None,H0=H0) self.Mass2 = MassCalc(self.x_range,self.caustic_fit,self.gal_vdisp,self.clus_z,r200=self.r200,fbr=0.65,H0=H0) self.r200_est = self.Mass.r200_est self.r200_est_fbeta = self.Mass2.r200_est self.M200_est = self.Mass.M200_est self.M200_est_fbeta = self.Mass2.M200_est print 'r200 estimate: ',self.Mass.r200_est print 'M200 estimate: ',self.Mass.M200_est self.Ngal = self.data_set[np.where((self.memflag==1)&(self.data_set[:,0]<=self.r200_est))].shape[0] #calculate velocity dispersion try: self.vdisp_gal = astStats.biweightScale(self.data_set[:,1][self.memflag==1],9.0) except: try: self.vdisp_gal = np.std(self.data_set[:,1][self.memflag==1],ddof=1) except: self.vdisp_gal = 0.0 '''
n_bulge float default -999,r_bulge float default -999, m_bulge float default -999, ba_bulge float default -999, pa_bulge float default -999, r_disk float default -999,m_disk float default -999, ba_disk float default -999,pa_disk float default -999);""".format(**all_info) try: print cmd cursor.execute(cmd) except: pass for model in all_info['model_list']: for redshift in np.arange(all_info['z_range']['start'], all_info['z_range']['stop'], all_info['z_range']['scale']): kpc_scale = distance.angular_diameter_distance(redshift, **all_info['cosmo'])*1000.0*np.pi/(180.0*3600.0) #kpc_per_arcsec dismod = magnitudes.distance_modulus(redshift, **all_info['cosmo']) print "redshift:%.2f, scale:%.1f, DM:%.1f" %(redshift, kpc_scale, dismod) cmd = """insert into {dba}.{out_table} (model, galcount, name, kpc_per_arcsec, dismod, zeropoint, z, BT, n_bulge, ba_bulge, pa_bulge, ba_disk, pa_disk) select '{model}', b.galcount, b.name, {kpc_scale}, {dismod}, -1.0*c.aa_r-c.kk_r*c.airmass_r, {z}, b.BT, b.n, b.eb,b.bpa+90.0, b.ed, b.dpa+90.0 from {dba}.CAST as c, {dba}.{in_table} as b, {dba}.DERT as d where d.galcount = b.galcount and b.galcount = c.galcount;""".format(model=model, kpc_scale = kpc_scale, dismod = dismod, z=redshift, **all_info).format(model=model) print cmd cursor.execute(cmd)