def createBFITsample(): # load the truth truth_array = tabletools.loadTable(table_name='truth_array',filepath=args.filepath_truth,dtype=dtype_table_truth) logger.info('truth array have %d galaxies' % len(truth_array)) # load the results results_array = tabletools.loadTable(table_name='results_array',filepath=args.filepath_results,dtype=dtype_table_results) logger.info('results array have %d galaxies' % len(results_array)) # load the stats - they contain the galaxies which passed stats_array = tabletools.loadTable(table_name='stats_sarray',filepath=args.filepath_stats,dtype=dtype_table_stats) logger.info('stats array have %d galaxies' % len(stats_array)) results_array_bfit = numpy.zeros(1,dtype=dtype_table_results) truth_array_bfit = numpy.zeros(1,dtype=dtype_table_truth) for ig,g in enumerate(stats_array['cosmos_id']): select = truth_array['id_cosmos'] == g results_rows = results_array[select] truth_rows = truth_array[select] if ig % 100 == 0 : logger.info('passing galaxy %10d results id %10d truth id %10d' % (ig,results_rows['identifier'][0],truth_rows['id_unique'][0])) results_array_bfit = numpy.append(results_array_bfit,results_rows) truth_array_bfit = numpy.append(truth_array_bfit,truth_rows) # remove the last one results_array_bfit = results_array_bfit[1:] truth_array_bfit = truth_array_bfit[1:] n_gals = len(truth_array_bfit) logger.info('number of galaxies in bfit sample %d' % n_gals) n_gals = len(stats_array) filename_results_bfit = args.filepath_results.replace('results','bfit').replace('pp','fits') filename_truth_bfit = 'truth.%d.fits' % n_gals fits_results = tabletools.getFITSTable(results_array_bfit) fits_truth = tabletools.getFITSTable(truth_array_bfit) n_gals_fits = fits_truth[1].data.shape logger.info('got fits table from numpy, with %d rows' % n_gals_fits) fits_results.writeto(filename_results_bfit,clobber=True) fits_truth.writeto(filename_truth_bfit,clobber=True) logger.info('saved %s %s' % (filename_truth_bfit,filename_results_bfit)) # check results_array_bfit_loaded = pyfits.open(filename_results_bfit) n_loaded = len(results_array_bfit_loaded[1].data) logger.info('loaded n_gals %d' % n_loaded) logger.info('first ids %10d %10d' % (results_array_bfit_loaded[1].data[ 0]['identifier'] , results_array_bfit['identifier'][0])) logger.info('last ids %10d %10d' % (results_array_bfit_loaded[1].data[-1]['identifier'] , results_array_bfit['identifier'][-1])) logger.info('first e1 % f % f' % (results_array_bfit_loaded[1].data['e1'][0] , results_array_bfit['e1'][0] )) logger.info('last e1 % f % f' % (results_array_bfit_loaded[1].data['e1'][-1] , results_array_bfit['e1'][-1] ))
def main(): description = 'filaments_fit' parser = argparse.ArgumentParser(description=description, add_help=True) parser.add_argument('-v', '--verbosity', type=int, action='store', default=2, choices=(0, 1, 2, 3 ), help='integer verbosity level: min=0, max=3 [default=2]') # parser.add_argument('-o', '--filename_output', default='test2.cat',type=str, action='store', help='name of the output catalog') # parser.add_argument('-c', '--filename_config', default='test2.yaml',type=str, action='store', help='name of the yaml config file') # parser.add_argument('-d', '--dry', default=False, action='store_true', help='Dry run, dont generate data') args = parser.parse_args() # Parse the integer verbosity level from the command line args into a logging_level string logging_levels = { 0: logging.CRITICAL, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG } logging_level = logging_levels[args.verbosity] logging.basicConfig(format="%(message)s", level=logging_level, stream=sys.stdout) log = logging.getLogger("filaments_fit") log.setLevel(logging_level) id_pair = 7 filename_shears = 'shears_bcc_g.%03d.fits' % id_pair filename_pairs = 'pairs_bcc.fits' filename_halo1 = 'pairs_bcc.halos1.fits' pairs_table = tabletools.loadTable(filename_pairs) shears_info = tabletools.loadTable(filename_shears) halo1_table = tabletools.loadTable(filename_halo1) fitobj = filaments_model_1hmc.modelfit() fitobj.sigma_g = 0.01 fitobj.shear_g1 = shears_info['g1sc'] + np.random.randn(len(shears_info['g1sc']))*fitobj.sigma_g fitobj.shear_g2 = shears_info['g2sc'] + np.random.randn(len(shears_info['g1sc']))*fitobj.sigma_g fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.halo_u_arcmin = pairs_table['u1_arcmin'][id_pair] fitobj.halo_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo_z = pairs_table['z'][id_pair] pair_info = pairs_table[id_pair] fitobj.run_mcmc() print fitobj.sampler print halo1_table['m200'][id_pair] pl.figure() pl.hist(fitobj.sampler.flatchain[:,0], 100, color="k", histtype="step") pl.figure pl.hist(fitobj.sampler.flatchain[:,1], 100, color="k", histtype="step") pl.show() import pdb; pdb.set_trace()
def main(): global log , config , args description = 'Get statistics and plot results of noise bias calibration runs' parser = argparse.ArgumentParser(description=description, add_help=True) parser.add_argument('-v', '--verbosity', type=int, action='store', default=2, choices=(0, 1, 2, 3 ), help='integer verbosity level: min=0, max=3 [default=2]') parser.add_argument('-i', '--filename_input', default='sha1-O1.cat',type=str, action='store', help='name of the output catalog') parser.add_argument('-c', '--filename_config', default='sha1.yaml',type=str, action='store', help='name of the yaml config file') parser.add_argument('-m', '--method_id', default='hsm',type=str, action='store', help='name of the yaml config file') args = parser.parse_args() # Parse the integer verbosity level from the command line args into a logging_level string logging_levels = { 0: logging.CRITICAL, 1: logging.WARNING, 2: logging.INFO, 3: logging.DEBUG } logging_level = logging_levels[args.verbosity] log = logging.getLogger("nbc1_plots") log.setLevel(logging_level) log_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s ") stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(log_formatter) log.addHandler(stream_handler) log.info(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())) config = yaml.load(open(args.filename_config)) filename_results_sv_example = '/home/tomek/projects/131118_nbc1/sv-clusters-shears/results_sv_clusters.all.fits' filename_cal_all = '/home/tomek/projects/131118_nbc1/run-tiled-002/cleaned_calib.all.fits' global results_filename_fmt # results_filename_fmt = 'calib.v4.2013.12.20/cleaned_calib.v4.2013.12.20/nbc_%03d.fits.im3.cleaned.cat' results_filename_fmt = 'calib.v4.2014.01.24/cleaned_calib.v4.2014.01.24/nbc_%03d.fits.im3.cleaned.cat' global results_sv_example results_sv_example = tabletools.loadTable(filename_results_sv_example) global results_cal_all results_cal_all = tabletools.loadTable(filename_cal_all) # get_stats() # get_weights_with_histograms() plot_mc_in_fwhm_ratio_bins() # plot_ellipticity_variance() # get_mc() # plot_mc() # plot_mc_cuts() log.info(time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()))
def estimate_snr(): pairs_table = hp.get_pairs(Dxy=[6,18],Mstar=[3e13,1e16],zbin=[0.01,0.9],filename_halos='wide.fits',n_sigma=3) n_pairs=len(pairs_table) print 'n_pairs', n_pairs n_pairs_sdss = 200000 n_eff_sdss = 0.5 n_eff_cfht = 15 n_sigma_sdss = 10 kernel_gain_cfht = 3 n_eff_pairs_sdss = n_pairs_sdss*n_eff_sdss print 'n_eff_pairs_sdss', n_eff_pairs_sdss n_eff_pairs_cfht = n_pairs*n_eff_cfht*kernel_gain_cfht print 'n_eff_pairs_cfht', n_eff_pairs_cfht sigma_single_filament = np.sqrt(n_eff_pairs_sdss)/n_sigma_sdss n_sigma_cfht = np.sqrt(n_eff_pairs_cfht)/sigma_single_filament print 'n_sigma_cfht', n_sigma_cfht filename_durret = 'wide.fits' durret_clusters = tabletools.loadTable(filename_durret) pl.hist(durret_clusters['snr'],bins=range(1,10)) pl.show()
def get_shear_files_catalog(): filelist_shears = np.loadtxt('filelist_shears.txt',dtype='a1024') list_shearbase = [] total_n_gals = 0 for ix,fs in enumerate(filelist_shears): logger.info('%3d\t%s\t%1.2e' % ( ix, fs ,float(total_n_gals) )) shear_cat_full=tabletools.loadTable(fs) # use every 100 shear for speed shear_cat = shear_cat_full[::100] radius = shear_cat['ra']*0 + 1 # set to 1 # xyz = cosmology.get_euclidian_coords(shear_cat['ra'], shear_cat['dec'] , radius) xs,ys,zs = cosmology.spherical_to_cartesian_deg(shear_cat['ra'], shear_cat['dec'] , radius) x,y,z = np.mean(xs), np.mean(ys) , np.mean(zs) del(xs) del(ys) del(zs) row = np.array([(ix, len(shear_cat_full),fs,x,y,z )],dtype=dtype_shearbase) total_n_gals += len(shear_cat_full) list_shearbase.append(row) del(shear_cat) del(shear_cat_full) logger.info('total gals %d',total_n_gals) shearbase = np.concatenate(list_shearbase) tabletools.saveTable(filename_shearbase,shearbase)
def get_bcc_pz(self,filename_lenscat): if self.prob_z == None: # filename_lenscat = os.environ['HOME'] + '/data/BCC/bcc_a1.0b/aardvark_v1.0/lenscats/s2n10cats/aardvarkv1.0_des_lenscat_s2n10.351.fit' # filename_lenscat = os.environ['HOME'] + '/data/BCC/bcc_a1.0b/aardvark_v1.0/lenscats/s2n10cats/aardvarkv1.0_des_lenscat_s2n10.351.fit' if 'fits' in filename_lenscat: lenscat = tabletools.loadTable(filename_lenscat) if 'z' in lenscat.dtype.names: self.prob_z , _ = pl.histogram(lenscat['z'],bins=self.grid_z_edges,normed=True) elif 'z-phot' in lenscat.dtype.names: self.prob_z , _ = pl.histogram(lenscat['z-phot'],bins=self.grid_z_edges,normed=True) if 'e1' in lenscat.dtype.names: select = lenscat['star_flag'] == 0 lenscat = lenscat[select] select = lenscat['fitclass'] == 0 lenscat = lenscat[select] select = (lenscat['e1'] != 0.0) * (lenscat['e2'] != 0.0) lenscat = lenscat[select] self.sigma_ell = np.std(lenscat['e1']*lenscat['weight'],ddof=1) elif 'pp2' in filename_lenscat: pickle = tabletools.loadPickle(filename_lenscat,log=0) self.prob_z = pickle['prob_z'] self.grid_z_centers = pickle['bins_z'] self.grid_z_edges = plotstools.get_bins_edges(self.grid_z_centers)
def get_bcc_pz(self): if self.prob_z == None: filename_lenscat = os.environ['HOME'] + '/data/BCC/bcc_a1.0b/aardvark_v1.0/lenscats/s2n10cats/aardvarkv1.0_des_lenscat_s2n10.351.fit' lenscat = tabletools.loadTable(filename_lenscat) self.prob_z , _ = pl.histogram(lenscat['z'],bins=self.grid_z_edges,normed=True)
def get_calibration_bins(): results_calib = tabletools.loadTable(filename_results_calib) n_all = 0 for isize,vsize in enumerate(bins_size[:-1]): for isnr,vsnr in enumerate(bins_snr[:-1]): bin_size_min = bins_size[isize] bin_size_max = bins_size[isize+1] bin_snr_min = bins_snr[isnr] bin_snr_max = bins_snr[isnr+1] select_size = (results_calib['size'] < bin_size_max) * (results_calib['size'] > bin_size_min) select_snr = (results_calib['snr'] < bin_snr_max) * (results_calib['snr'] > bin_snr_min) select = select_size * select_snr current_results = results_calib[select] # pl.subplot(1,3,1) # pl.hist(current_results['size']) # pl.subplot(1,3,2) # pl.hist(current_results['snr']) # pl.subplot(1,3,3) # pl.hist(current_results['isnr_true']) # pl.show() n_gals = len(current_results) n_all+=n_gals g1_tru = current_results['g1_true'] g2_tru = current_results['g2_true'] g1_est = current_results['g1'] g2_est = current_results['g2'] g1_err = np.ones(n_gals)*np.std(current_results['g1'],ddof=1) g2_err = np.ones(n_gals)*np.std(current_results['g2'],ddof=1) [c1,m1,C1cm] = fitting.get_line_fit(g1_tru,g1_est,g1_err) [c2,m2,C2cm] = fitting.get_line_fit(g2_tru,g2_est,g2_err) m1_std = np.sqrt(C1cm[1,1]) m2_std = np.sqrt(C2cm[1,1]) c1_std = np.sqrt(C1cm[0,0]) c2_std = np.sqrt(C2cm[0,0]) m_mean = (m1 + m2)/2. m_mean_std = np.sqrt((m1_std**2 + m2_std**2)/2.) calib_struct['bias_m'][isize][isnr] = m_mean calib_struct['bias_m_std'][isize][isnr] = m_mean_std del(select_size) del(select_snr) del(current_results) log.info('size=%d [%5.2f,% 5.2f]\tsnr=%d [%5.2f,%5.2f]\tn_gals=%d\tm_mean=%2.4f\t(% 2.4f)' % (isize,bin_size_min,bin_size_max,isnr,bin_snr_min,bin_snr_max,n_gals,m_mean,m_mean_std)) file_calibration = open(filename_calibration,'w') pickle.dump(calib_struct,file_calibration) file_calibration.close() log.info('pickled %s' % filename_calibration)
def calibrate_results(): filelist = np.loadtxt(filelist_svclusters,dtype='a') for ifile,vfile in enumerate(filelist): results_sv = tabletools.loadTable(vfile,dtype=dtype_table_results_sv,log=1) results_sv = add_calibration_columns(results_sv) filename_calibrated = vfile.replace('.cat','.nbc.cat') tabletools.saveTable(filename_calibrated,results_sv) log.info('calibrated file %3d %s' %(ifile,filename_calibrated))
def get_bcc_pz(self,filename_lenscat): if self.prob_z == None: # filename_lenscat = os.environ['HOME'] + '/data/BCC/bcc_a1.0b/aardvark_v1.0/lenscats/s2n10cats/aardvarkv1.0_des_lenscat_s2n10.351.fit' # filename_lenscat = os.environ['HOME'] + '/data/BCC/bcc_a1.0b/aardvark_v1.0/lenscats/s2n10cats/aardvarkv1.0_des_lenscat_s2n10.351.fit' lenscat = tabletools.loadTable(filename_lenscat) if 'z' in lenscat.dtype.names: self.prob_z , _ = pl.histogram(lenscat['z'],bins=self.grid_z_edges,normed=True) elif 'z-phot' in lenscat.dtype.names: self.prob_z , _ = pl.histogram(lenscat['z-phot'],bins=self.grid_z_edges,normed=True) if 'e1' in lenscat.dtype.names: self.sigma_ell = np.std(lenscat['e1'],ddof=1)
def get_shears_for_single_pair(halo1,halo2,idp=0): global cfhtlens_shear_catalog if cfhtlens_shear_catalog == None: filename_cfhtlens_shears = config['filename_cfhtlens_shears'] cfhtlens_shear_catalog = tabletools.loadTable(filename_cfhtlens_shears) if 'star_flag' in cfhtlens_shear_catalog.dtype.names: select = cfhtlens_shear_catalog['star_flag'] == 0 cfhtlens_shear_catalog = cfhtlens_shear_catalog[select] select = cfhtlens_shear_catalog['fitclass'] == 0 cfhtlens_shear_catalog = cfhtlens_shear_catalog[select] logger.info('removed stars, remaining %d' , len(cfhtlens_shear_catalog)) select = (cfhtlens_shear_catalog['e1'] != 0.0) * (cfhtlens_shear_catalog['e2'] != 0.0) cfhtlens_shear_catalog = cfhtlens_shear_catalog[select] logger.info('removed zeroed shapes, remaining %d' , len(cfhtlens_shear_catalog)) # correcting additive systematics if 'e1corr' in cfhtlens_shear_catalog.dtype.names: shear_g1 , shear_g2 = cfhtlens_shear_catalog['e1corr'] , -cfhtlens_shear_catalog['e2corr'] shear_ra_deg , shear_de_deg , shear_z = cfhtlens_shear_catalog['ALPHA_J2000'] , cfhtlens_shear_catalog['DELTA_J2000'] , cfhtlens_shear_catalog['Z_B'] else: shear_g1 , shear_g2 = cfhtlens_shear_catalog['e1'] , -(cfhtlens_shear_catalog['e2'] - cfhtlens_shear_catalog['c2']) shear_ra_deg , shear_de_deg , shear_z = cfhtlens_shear_catalog['ra'] , cfhtlens_shear_catalog['dec'] , cfhtlens_shear_catalog['z'] halo1_ra_deg , halo1_de_deg = halo1['ra'],halo1['dec'] halo2_ra_deg , halo2_de_deg = halo2['ra'],halo2['dec'] pair_ra_deg, pair_de_deg = cosmology.get_midpoint(halo1_ra_deg , halo1_de_deg , halo2_ra_deg , halo2_de_deg,unit='deg') pair_z = np.mean([halo1['z'],halo2['z']]) pairs_shear , halos_coords , pairs_shear_full = filaments_tools.create_filament_stamp(halo1_ra_deg, halo1_de_deg, halo2_ra_deg, halo2_de_deg, shear_ra_deg, shear_de_deg, shear_g1, shear_g2, shear_z, pair_z, lenscat=cfhtlens_shear_catalog , shear_bias_m=cfhtlens_shear_catalog['m'] , shear_weight=cfhtlens_shear_catalog['weight'] ) if len(pairs_shear) < 100: logger.error('found only %d shears' % len(pairs_shear)) return None , None , None return pairs_shear , halos_coords, pairs_shear_full
def get_psf_index(ifwhm,ie1,ie2): filename_key = 'psf_key.fits' psf_table = tabletools.loadTable(filename_key) key_all , key_fwhm , key_e1 , key_e2 = psf_table['id_psf'] , psf_table['id_psf_fwhm'] , psf_table['id_psf_e1'] , psf_table['id_psf_e2'] # for ii,vv in enumerate(psf_table): # print 'psf_table index=%3d ifwhm=%d ie1=%d ie2=%d' % (key_all[ii] , key_fwhm[ii], key_e1[ii], key_e2[ii]) indices = np.ones_like(ifwhm) for ii,vv in enumerate(ifwhm): select = (ifwhm[ii]==key_fwhm) * (ie1[ii]==key_e1) * (ie2[ii]==key_e2) nz = np.nonzero(select)[0][0] indices[ii] = key_all[nz] # print 'index=%3d ifwhm=%d ie1=%d ie2=%d' % (indices[ii] , ifwhm[ii], ie1[ii], ie2[ii]) # import pdb; pdb.set_trace() return indices
def run_all(): bins_z = np.arange(0.025,3.5,0.05) filename_gals = '/home/kacprzak/data/CFHTLens/CFHTLens_2014-06-14.normalised.fits' filename_clusters = os.environ['HOME'] + '/data/CFHTLens/ClusterZ/clustersz.fits' cat_clusters = tabletools.loadTable(filename_clusters) cat_gals = tabletools.loadTable(filename_gals) gals_ra_deg = cat_gals['ALPHA_J2000'] gals_de_deg = cat_gals['DELTA_J2000'] gals_ra_rad , gals_de_rad = cosmology.deg_to_rad(gals_ra_deg, gals_de_deg) cylinder_radius_mpc=1 pz_all=np.sum(cat_gals['PZ_full'],axis=0) pz_all=pz_all/np.sum(pz_all) n_brigthest = 40 n_bins_hires = 10000 bins_z_hires=np.linspace(bins_z.min(), bins_z.max(),n_bins_hires) new_z = np.zeros(len(cat_clusters)) for ic in range(len(cat_clusters)): # for ic in range(2): cluster_ra_rad , cluster_de_rad = cosmology.deg_to_rad( cat_clusters[ic]['ra'] , cat_clusters[ic]['dec'] ) cluster_z = cat_clusters['z_bad'][ic] gals_u_rad , gals_v_rad = cosmology.get_gnomonic_projection(gals_ra_rad , gals_de_rad , cluster_ra_rad , cluster_de_rad) gals_u_mpc , gals_v_mpc = cosmology.rad_to_mpc(gals_u_rad,gals_v_rad,cluster_z) select = (np.sqrt(gals_u_mpc**2 + gals_v_mpc**2) < cylinder_radius_mpc)*( np.abs(cat_gals['Z_B']-cluster_z) < 0.1 ) # print 'selected %d gals in cylinder' % len(np.nonzero(select)[0]) cylinder_gals = cat_gals[select] gals_u_mpc = gals_u_mpc[select] gals_v_mpc = gals_v_mpc[select] select1 = (cylinder_gals['MAG_i'] > 10) * (cylinder_gals['MAG_i'] < 27) select2 = (cylinder_gals['MAG_r'] > 10) * (cylinder_gals['MAG_r'] < 27) select3 = (cylinder_gals['MAG_g'] > 10) * (cylinder_gals['MAG_g'] < 27) select4 = (cylinder_gals['MAG_u'] > 10) * (cylinder_gals['MAG_u'] < 27) select5 = (cylinder_gals['MAG_y'] > 10) * (cylinder_gals['MAG_y'] < 27) select6 = (cylinder_gals['MAG_z'] > 10) * (cylinder_gals['MAG_z'] < 27) select = select2*select1*select3*select4*select6 # print 'selected %d with good mags' % len(np.nonzero(select)[0]) if len(np.nonzero(select)[0]) == 0: print '%d not enough gals' , ic new_z[ic] = cat_clusters['z'][ic] continue cylinder_gals = cylinder_gals[select] gals_u_mpc = gals_u_mpc[select] gals_v_mpc = gals_v_mpc[select] x1=cylinder_gals['MAG_r']-cylinder_gals['MAG_i'] x2=cylinder_gals['MAG_g']-cylinder_gals['MAG_i'] x3=cylinder_gals['MAG_u']-cylinder_gals['MAG_i'] x4=cylinder_gals['MAG_y']-cylinder_gals['MAG_i'] x5=cylinder_gals['MAG_z']-cylinder_gals['MAG_i'] X=np.concatenate( [x1.astype('f4')[:,None], x2.astype('f4')[:,None], x3.astype('f4')[:,None], x5.astype('f4')[:,None]] ,axis=1) from scipy.stats import gaussian_kde kde = gaussian_kde(X.T,bw_method=0.3) w = kde(X.T)**3 w = w/np.max(w) # pl.figure() # pl.scatter(X[:,0],X[:,1],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,1],X[:,2],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,0],X[:,2],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,0],X[:,3],s=50,c=w) ; pl.colorbar() # # pl.figure() # # pl.scatter(X[:,0],X[:,4],s=50,c=w) ; pl.colorbar() # pl.show() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_r']-cylinder_gals[select_brightest]['MAG_i'], cylinder_gals[select_brightest]['MAG_g']-cylinder_gals[select_brightest]['MAG_i'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_i'],cylinder_gals[select_brightest]['MAG_r'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_g'],cylinder_gals[select_brightest]['MAG_r'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_u'],cylinder_gals[select_brightest]['MAG_i'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_r'],cylinder_gals[select_brightest]['Z_B'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(gals_u_mpc,gals_v_mpc,c=cylinder_brightest['Z_B'],s=cylinder_brightest['MAG_r']*2) # pl.colorbar() # pl.show() # pz_hires = np.zeros([len(cylinder_gals),n_bins_hires]) # for ib in range(len(cylinder_gals)): # fz=interp1d(bins_z,cylinder_gals['PZ_full'][ib],'cubic') # pz_hires[ib,:] = fz(bins_z_hires) # pz_this = pz_hires[ib,:]/np.sum(pz_hires[ib,:])*w[ib] # pl.plot(bins_z_hires,pz_this) # # pl.plot(bins_z,cylinder_gals['PZ_full'][ib],'-'); # print 'interp' , ib, np.sum(pz_this) # pz_hires[pz_hires<0] = 1e-10 # pz_prod = np.sum(np.log(pz_hires),axis=0) # pz_prod = pz_prod - pz_prod.max() # pz_cylinder=np.exp(pz_prod) # pz_cylinder=pz_cylinder/np.sum(pz_cylinder) # new_z[ic] = bins_z_hires[pz_cylinder.argmax()] new_z[ic] = np.sum(cylinder_gals['Z_B']*w)/np.sum(w) std_z=np.std(np.sqrt(((cylinder_gals['Z_B']*w - new_z[ic])**2)/np.sum(w))) print '%3d new_z=%.4f bad_z=%.4f naomi_z=%.4f n_eff=%2.4f n_cylinder_gals=%d std_z=%2.5f' % (ic,new_z[ic],cluster_z,cat_clusters['z'][ic],np.sum(w),len(cylinder_gals),std_z) tabletools.appendColumn(arr=new_z,rec=cat_clusters,dtype='f4',name='z_est') filename_clusters_est = filename_clusters.replace('.fits','.update.fits') tabletools.saveTable(filename_clusters_est,cat_clusters)
def fit_single_filament(): id_pair = 7 # fitobj.parameters[0]['box']['min'] = 0.01 # fitobj.parameters[0]['box']['max'] = 0.03 # fitobj.parameters[1]['box']['min'] = 0.1 # fitobj.parameters[1]['box']['max'] = 2.5 filename_shears = 'shears_bcc_g.%03d.fits' % id_pair filename_pairs = 'pairs_bcc.fits' filename_halo1 = 'pairs_bcc.halos1.fits' filename_halo2 = 'pairs_bcc.halos2.fits' pairs_table = tabletools.loadTable(filename_pairs) shears_info = tabletools.loadTable(filename_shears) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) true_M200 = np.log10(halo1_table['m200'][id_pair]) true_M200 = np.log10(halo2_table['m200'][id_pair]) halo1_conc = halo1_table['r200'][id_pair]/halo1_table['rs'][id_pair]*1000. halo2_conc = halo2_table['r200'][id_pair]/halo2_table['rs'][id_pair]*1000. log.info( 'halo1 M200 %5.2e',halo1_table['m200'][id_pair] ) log.info( 'halo2 M200 %5.2e',halo2_table['m200'][id_pair] ) log.info( 'halo1 conc %5.2f',halo1_conc) log.info( 'halo2 conc %5.2f',halo2_conc) fitobj = filaments_model_1f.modelfit() fitobj.get_bcc_pz() fitobj.sigma_g = 0.02 fitobj.shear_g1 = shears_info['g1'] + np.random.randn(len(shears_info['g1']))*fitobj.sigma_g fitobj.shear_g2 = shears_info['g2'] + np.random.randn(len(shears_info['g2']))*fitobj.sigma_g fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair ] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo1_z = pairs_table['z'][id_pair] fitobj.halo1_M200 = halo1_table['m200'][id_pair] fitobj.halo1_conc = halo1_conc fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj.halo2_z = pairs_table['z'][id_pair] fitobj.halo2_M200 = halo2_table['m200'][id_pair] fitobj.halo2_conc = halo2_conc fitobj.parameters[0]['box']['min'] = 0.0001 fitobj.parameters[0]['box']['max'] = 0.1 fitobj.parameters[1]['box']['min'] = 0.0001 fitobj.parameters[1]['box']['max'] = 10 # fitobj.plot_shears_mag(fitobj.shear_g1,fitobj.shear_g2) # pl.show() # fitobj.save_all_models=False log.info('running grid search') log_post , params, grid_kappa0, grid_radius = fitobj.run_gridsearch(n_grid=100) vmax_post , best_model_g1, best_model_g2 , limit_mask, vmax_kappa0 , vmax_radius = fitobj.get_grid_max(log_post,params) scatter_size=10 pl.figure() pl.subplot(1,2,1) prob_post = get_post_from_log(log_post) pl.scatter( params[:,0] , params[:,1] , scatter_size , log_post , lw=0) pl.colorbar() pl.subplot(1,2,2) pl.scatter( params[:,0] , params[:,1] , scatter_size , prob_post , lw=0) pl.plot( vmax_kappa0 , vmax_radius , 'ro' ) pl.colorbar() filename_fig = 'post.png' pl.savefig(filename_fig, dpi=1000) # fitobj.plot_residual_whisker(best_model_g1, best_model_g2) # pl.suptitle('model post=% 10.4e kappa0=%5.2e radius=%2.4f' % (vmax_post,vmax_kappa0,vmax_radius) ) # fitobj.plot_residual_g1g2(best_model_g1, best_model_g2) # pl.suptitle('model post=% 10.4e kappa0=%5.2e radius=%2.4f' % (vmax_post,vmax_kappa0,vmax_radius) ) log.info('running mcmc') import pdb; pdb.set_trace() fitobj.n_samples=5000 fitobj.run_mcmc() samples = fitobj.sampler.flatchain import pdb; pdb.set_trace() pl.figure() plotstools.plot_dist(samples) pl.show()
import pyfits import numpy as np import pylab as pl import scipy.interpolate as interp import cosmology import tabletools import yaml, argparse, sys, logging from sklearn.neighbors import BallTree as BallTree import galsim filename_big = 'big_halos.fits' halocat = tabletools.loadTable(filename_big,table_name='big') sorting = np.argsort(halocat['M200']) biggest_halo = halocat[sorting[-3]] biggest_halo['RA'],biggest_halo['DEC'] = 0. , 0. dtheta=0.1 lenscat={} lenscat['ra'] = np.random.uniform(biggest_halo['RA']-dtheta,biggest_halo['RA']+dtheta,1000) lenscat['dec'] = np.random.uniform((biggest_halo['DEC']-dtheta),(biggest_halo['DEC']+dtheta),1000) lenscat['z'] = lenscat['dec']*0 + biggest_halo['Z'] * 2. conc=biggest_halo['RVIR']/(biggest_halo['RS']/1e3) halo1_ra_arcsec, halo1_de_arcsec = cosmology.deg_to_arcsec(biggest_halo['RA'], biggest_halo['DEC']) shear_ra_arcsec, shear_de_arcsec = cosmology.deg_to_arcsec(lenscat['ra'], lenscat['dec']) nfw1=galsim.NFWHalo(conc=conc,redshift=biggest_halo['Z'],mass=biggest_halo['M200'],omega_m = cosmology.cospars.omega_m,halo_pos=galsim.PositionD(x=halo1_ra_arcsec,y=halo1_de_arcsec)) (g1,g2,_)=nfw1.getLensing(pos=(shear_ra_arcsec, shear_de_arcsec),z_s=lenscat['z'])
def self_fit(): fixed_kappa = 0.05 fixed_radius = 2 fixed_m200 = 14 fixed_m200 = 14 filename_pairs = 'pairs_cfhtlens_null1.fits' filename_halo1 = 'pairs_cfhtlens_null1.halos1.fits' filename_halo2 = 'pairs_cfhtlens_null1.halos2.fits' filename_shears = 'shears_cfhtlens_g_null1.fits' filename_selffit = 'shears_selftest_kappa%2.2f.fits' % fixed_kappa pairs_table = tabletools.loadTable(filename_pairs) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) sigma_g_add = 0. fitobj = filaments_model_2hf.modelfit() pz = fitobj.get_bcc_pz('cfhtlens_cat_sample.fits') prob_z = fitobj.prob_z id_pair = 0 shears_info = tabletools.loadTable(filename_shears,hdu=id_pair+1) fitobj = filaments_model_2hf.modelfit() fitobj.prob_z = prob_z fitobj.halo1_z = 0.2 fitobj.halo2_z = 0.2 fitobj.halo1_u_arcmin = 20 fitobj.halo1_v_arcmin = 0 fitobj.halo2_u_arcmin = -20 fitobj.halo2_v_arcmin = 0 fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.shear_u_mpc = shears_info['u_mpc'] fitobj.shear_v_mpc = shears_info['v_mpc'] fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo1_u_mpc = pairs_table['u1_mpc'][id_pair] fitobj.halo1_v_mpc = pairs_table['v1_mpc'][id_pair] fitobj.halo1_z = pairs_table['z'][id_pair] fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj.halo2_u_mpc = pairs_table['u2_mpc'][id_pair] fitobj.halo2_v_mpc = pairs_table['v2_mpc'][id_pair] fitobj.halo2_z = pairs_table['z'][id_pair] fitobj.pair_z = (fitobj.halo1_z + fitobj.halo2_z) / 2. fitobj.filam = filament.filament() fitobj.filam.pair_z =fitobj.pair_z fitobj.filam.grid_z_centers = fitobj.grid_z_centers fitobj.filam.prob_z = fitobj.prob_z fitobj.filam.set_mean_inv_sigma_crit(fitobj.filam.grid_z_centers,fitobj.filam.prob_z,fitobj.filam.pair_z) fitobj.nh1 = nfw.NfwHalo() fitobj.nh1.z_cluster= fitobj.halo1_z fitobj.nh1.theta_cx = fitobj.halo1_u_arcmin fitobj.nh1.theta_cy = fitobj.halo1_v_arcmin fitobj.nh1.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.nh2 = nfw.NfwHalo() fitobj.nh2.z_cluster= fitobj.halo2_z fitobj.nh2.theta_cx = fitobj.halo2_u_arcmin fitobj.nh2.theta_cy = fitobj.halo2_v_arcmin fitobj.nh2.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] shear_model_g1, shear_model_g2, limit_mask , _ , _ = fitobj.draw_model([fixed_kappa, fixed_radius, fixed_m200, fixed_m200]) fitobj.shear_g1 = shear_model_g1 + np.random.randn(len(shears_info['g1']))*sigma_g_add fitobj.shear_g2 = shear_model_g2 + np.random.randn(len(shears_info['g2']))*sigma_g_add fitobj.sigma_g = np.std(shear_model_g2,ddof=1) # fitobj.inv_sq_sigma_g = 1./sigma_g_add**2 # logger.info('using sigma_g=%2.5f' , fitobj.sigma_g) fitobj.parameters[0]['box']['min'] = 0 fitobj.parameters[0]['box']['max'] = 1 fitobj.parameters[1]['box']['min'] = 1 fitobj.parameters[1]['box']['max'] = 10 fitobj.parameters[2]['box']['min'] = 14 fitobj.parameters[2]['box']['max'] = 15 fitobj.parameters[3]['box']['min'] = 14 fitobj.parameters[3]['box']['max'] = 15 # print 'halo1 m200' , halo1_table['m200'][id_pair] # print 'halo2 m200' , halo2_table['m200'][id_pair] shears_info['g1'] = fitobj.shear_g1 shears_info['g2'] = fitobj.shear_g2 fitobj.plot_shears(shears_info['g1'], shears_info['g2'],quiver_scale=0.1) pl.show() pl.scatter(shears_info['u_mpc'],shears_info['v_mpc'],c=np.abs(shears_info['g1'] + 1j*shears_info['g2'])); pl.colorbar(); pl.show() tabletools.saveTable(filename_selffit, shears_info)
def add_nfw_to_random_points(): import filaments_model_2hf, filament, nfw halo1 = tt.load(config['filename_pairs'].replace('.fits','.halos1.fits')) halo2 = tt.load(config['filename_pairs'].replace('.fits','.halos2.fits')) pairs_table = tabletools.loadTable(config['filename_pairs']) filename_shears_nfw = config['filename_shears'].replace('.pp2','.nfw.pp2') if os.path.isfile(filename_shears_nfw): os.remove(filename_shears_nfw) logger.warning('overwriting file %s' , filename_shears_nfw) fitobj = filaments_model_2hf.modelfit() fitobj.get_bcc_pz(config['filename_pz']) prob_z = fitobj.prob_z grid_z_centers = fitobj.grid_z_centers grid_z_edges = fitobj.grid_z_edges for id_pair in range(len(pairs_table)): id_shear = pairs_table[id_pair]['ipair'] logger.info('--------- pair %d shear %d --------' , id_pair, id_shear) # now we use that id_pair_in_catalog = id_pair shears_info = tabletools.loadPickle(config['filename_shears'],pos=id_shear) fitobj = filaments_model_2hf.modelfit() fitobj.kappa_is_K = config['kappa_is_K'] fitobj.prob_z = prob_z fitobj.grid_z_centers = grid_z_centers fitobj.grid_z_edges = grid_z_edges fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.shear_u_mpc = shears_info['u_mpc'] fitobj.shear_v_mpc = shears_info['v_mpc'] fitobj.shear_g1 = shears_info['g1'] fitobj.shear_g2 = shears_info['g2'] fitobj.shear_w = shears_info['weight'] fitobj.Dlos = pairs_table[id_pair]['Dlos'] fitobj.Dtot = np.sqrt(pairs_table[id_pair]['Dxy']**2+pairs_table[id_pair]['Dlos']**2) fitobj.boost = fitobj.Dtot/pairs_table[id_pair]['Dxy'] fitobj.use_boost = config['use_boost'] fitobj.R_start = config['R_start'] # choose a method to add and account for noise if config['sigma_method'] == 'add': sigma_g_add = config['sigma_add'] fitobj.shear_g1 = shears_info['g1'] + np.random.randn(len(shears_info['g1']))*sigma_g_add fitobj.shear_g2 = shears_info['g2'] + np.random.randn(len(shears_info['g2']))*sigma_g_add fitobj.sigma_g = np.std(fitobj.shear_g2,ddof=1) fitobj.sigma_ell = fitobj.sigma_g fitobj.inv_sq_sigma_g = 1./fitobj.sigma_g**2 logger.info('added noise with level %f , using sigma_g=%2.5f' , sigma_g_add, fitobj.sigma_g) elif config['sigma_method'] == 'orig': fitobj.shear_n_gals = shears_info['n_gals'] fitobj.inv_sq_sigma_g = fitobj.shear_w logger.info('using different sigma_g per pixel mean(inv_sq_sigma_g)=%2.5f len(inv_sq_sigma_g)=%d' , np.mean(fitobj.inv_sq_sigma_g) , len(fitobj.inv_sq_sigma_g)) elif type(config['sigma_method'])==float: fitobj.shear_n_gals = shears_info['n_gals'] fitobj.inv_sq_sigma_g = shears_info['weight']**2 / ( shears_info['weight_sq'] * config['sigma_method']**2 ) # remove infs fitobj.inv_sq_sigma_g[shears_info['weight_sq']<1e-8]=0 logger.info('using constant sigma_g per pixel: sigma_e=%2.5f, mean(sigma_gp)=%2.5f n_zeros=%d len(inv_sq_sigma_g)=%d n_nan=%d n_inf=%d' , config['sigma_method'], len(np.nonzero(shears_info['weight_sq']<1e-8)[0]), np.mean(fitobj.inv_sq_sigma_g) , len(fitobj.inv_sq_sigma_g) , len( np.nonzero(np.isnan(fitobj.inv_sq_sigma_g))[0] ) , len( np.nonzero(np.isinf(fitobj.inv_sq_sigma_g))[0] ) ) fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair_in_catalog] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair_in_catalog] fitobj.halo1_u_mpc = pairs_table['u1_mpc'][id_pair_in_catalog] fitobj.halo1_v_mpc = pairs_table['v1_mpc'][id_pair_in_catalog] fitobj.halo1_z = pairs_table['z'][id_pair_in_catalog] fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair_in_catalog] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair_in_catalog] fitobj.halo2_u_mpc = pairs_table['u2_mpc'][id_pair_in_catalog] fitobj.halo2_v_mpc = pairs_table['v2_mpc'][id_pair_in_catalog] fitobj.halo2_z = pairs_table['z'][id_pair_in_catalog] fitobj.n_model_evals = 0 fitobj.pair_z = (fitobj.halo1_z + fitobj.halo2_z) / 2. fitobj.filam = filament.filament() fitobj.filam.pair_z =fitobj.pair_z fitobj.filam.grid_z_centers = fitobj.grid_z_centers fitobj.filam.prob_z = fitobj.prob_z fitobj.filam.set_mean_inv_sigma_crit(fitobj.filam.grid_z_centers,fitobj.filam.prob_z,fitobj.filam.pair_z) fitobj.nh1 = nfw.NfwHalo() fitobj.nh1.z_cluster= fitobj.halo1_z fitobj.nh1.theta_cx = fitobj.halo1_u_arcmin fitobj.nh1.theta_cy = fitobj.halo1_v_arcmin fitobj.nh1.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.nh2 = nfw.NfwHalo() fitobj.nh2.z_cluster= fitobj.halo2_z fitobj.nh2.theta_cx = fitobj.halo2_u_arcmin fitobj.nh2.theta_cy = fitobj.halo2_v_arcmin fitobj.nh2.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) mock_m200_h1 = pairs_table['m200_h1_fit'][id_pair_in_catalog]/1e14 mock_m200_h2 = pairs_table['m200_h2_fit'][id_pair_in_catalog]/1e14 mock_kappa0 = 0 mock_radius = 2 shear_model_g1, shear_model_g2, limit_mask , _ , _ = fitobj.draw_model([mock_kappa0, mock_radius, mock_m200_h1, mock_m200_h2]) # pl.scatter(fitobj.shear_u_mpc,fitobj.shear_v_mpc,c=shear_model_g2); pl.colorbar(); pl.show() shears_info['g1'] = shears_info['g1'] + shear_model_g1 shears_info['g2'] = shears_info['g2'] + shear_model_g2 tabletools.savePickle(filename_shears_nfw,shears_info,append=True) logger.info('noise mean_g1=%2.2f mean_g2=%2.2f std_g1=%2.2f std_g2=%2.2f',np.mean(shears_info['g1']),np.mean(shears_info['g2']),np.std(shears_info['g1'],ddof=1), np.std(shears_info['g2'],ddof=1))
def get_weights_with_histograms(): truth_cat = np.loadtxt(args.filename_input,dtype=dtype_table_cat) n_gals_total = sum(truth_cat['n_gals']) log.info('opened %s with %d rows and %d galaxies total' % (args.filename_input,len(truth_cat),n_gals_total)) iall = 0 for ipsf_fwhm,vpsf_fwhm in enumerate(config['grid']['psf_fwhm']): for isnr,vsnr in enumerate(config['grid']['snr']): select = np.array(truth_cat['ipsf_fwhm'] == ipsf_fwhm) * np.array(truth_cat['isnr'] == isnr) current_truth_cat = truth_cat[select] list_results = [] for itruth,vtruth in enumerate(current_truth_cat): filename_result = results_filename_fmt % (vtruth['index']) current_res = tabletools.loadTable(filename_result,dtype=dtype_table_results_calib) list_results.append(current_res) results_ca_current = np.concatenate(list_results) current_snr = current_truth_cat['snr'][0] current_psf_fwhm = current_truth_cat['psf_fwhm'][0] select1 = (results_sv_example['fwhm_psf']*DES_PIXEL_SCALE < current_psf_fwhm + 0.05) * (results_sv_example['fwhm_psf']*DES_PIXEL_SCALE > current_psf_fwhm - 0.05) select2 = (results_sv_example['snr'] < current_snr + 2.5) * (results_sv_example['snr'] > current_snr - 2.5) results_sv_current = results_sv_example[select1*select2] n_current_gals = len(results_sv_current) log.info('snr=%2.2f psf_fwhm=%2.2f n_gals in that bin=%d' % (current_truth_cat['snr'][0],current_truth_cat['psf_fwhm'][0],n_current_gals)) n_bins = 20 bins_size=np.linspace(1.2,3,n_bins) # log.info('size of size bin %2.2f' % (bins_size[1]-bins_size[0])) title = 'snr=%2.2f psf_fwhm=%2.2f' % (current_truth_cat['snr'][0],current_truth_cat['psf_fwhm'][0]) if n_current_gals > 2000: pl.subplot(1,2,1) h1,b1,_=pl.hist(results_ca_current['size'],bins=bins_size,histtype='step',normed=True,color='r',label='cal') h2,b2,_=pl.hist(results_sv_current['size'],bins=bins_size,histtype='step',normed=True,color='b',label='sv') pl.ylim([0,1.1*max([max(h1),max(h2)])]) pl.legend() pl.xlabel('measured fwhm_ratio') pl.subplot(1,2,2) # abs_g_ca = np.abs(results_ca_current['g1']+1j*results_ca_current['g2']) # abs_g_sv = np.abs(results_sv_current['g1']+1j*results_sv_current['g2']) h1,b1,_=pl.hist(results_ca_current['g1'],bins=np.linspace(-1,1,n_bins),histtype='step',normed=True,color='r',label='cal') h2,b2,_=pl.hist(results_sv_current['g1'],bins=np.linspace(-1,1,n_bins),histtype='step',normed=True,color='b',label='sv') pl.legend() pl.xlabel('measured ellipticity') pl.ylim([0,1.1*max([max(h1),max(h2)])]) pl.suptitle(title) filename_fig = 'figs/fig.hists.snr%02d.psf%02d.png' % (isnr,ipsf_fwhm) pl.savefig(filename_fig) pl.close() log.info('saved %s' % filename_fig) else: log.info('not enough gals to bother')
def self_fit(): filename_pairs = 'pairs_bcc.fits' filename_halo1 = 'pairs_bcc.halos1.fits' filename_halo2 = 'pairs_bcc.halos2.fits' filename_shears = 'shears_bcc_g.fits' pairs_table = tabletools.loadTable(filename_pairs) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) sigma_g_add = 0.1 id_pair = 48 shears_info = tabletools.loadTable(filename_shears,hdu=id_pair+1) fitobj = modelfit() fitobj.get_bcc_pz('aardvarkv1.0_des_lenscat_s2n10.351.fit') fitobj.halo1_z = 0.2 fitobj.halo2_z = 0.2 fitobj.halo1_u_arcmin = 20 fitobj.halo1_v_arcmin = 0 fitobj.halo2_u_arcmin = -20 fitobj.halo2_v_arcmin = 0 fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.shear_u_mpc = shears_info['u_mpc'] fitobj.shear_v_mpc = shears_info['v_mpc'] fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo1_u_mpc = pairs_table['u1_mpc'][id_pair] fitobj.halo1_v_mpc = pairs_table['v1_mpc'][id_pair] fitobj.halo1_z = pairs_table['z'][id_pair] fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj.halo2_u_mpc = pairs_table['u2_mpc'][id_pair] fitobj.halo2_v_mpc = pairs_table['v2_mpc'][id_pair] fitobj.halo2_z = pairs_table['z'][id_pair] fitobj.pair_z = (fitobj.halo1_z + fitobj.halo2_z) / 2. fitobj.filam = filament.filament() fitobj.filam.pair_z =fitobj.pair_z fitobj.filam.grid_z_centers = fitobj.grid_z_centers fitobj.filam.prob_z = fitobj.prob_z fitobj.filam.set_mean_inv_sigma_crit(fitobj.filam.grid_z_centers,fitobj.filam.prob_z,fitobj.filam.pair_z) fitobj.nh1 = nfw.NfwHalo() fitobj.nh1.z_cluster= fitobj.halo1_z fitobj.nh1.theta_cx = fitobj.halo1_u_arcmin fitobj.nh1.theta_cy = fitobj.halo1_v_arcmin fitobj.nh1.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.nh2 = nfw.NfwHalo() fitobj.nh2.z_cluster= fitobj.halo2_z fitobj.nh2.theta_cx = fitobj.halo2_u_arcmin fitobj.nh2.theta_cy = fitobj.halo2_v_arcmin fitobj.nh2.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.shear_u_arcmin = shears_info['u_arcmin'] shear_model_g1, shear_model_g2, limit_mask , _ , _ = fitobj.draw_model([0., 2., 14.5, 14.5]) fitobj.plot_shears(shear_model_g1, shear_model_g2,quiver_scale=0.5) pl.show() fitobj.shear_g1 = shear_model_g1 + np.random.randn(len(shears_info['g1']))*sigma_g_add fitobj.shear_g2 = shear_model_g2 + np.random.randn(len(shears_info['g2']))*sigma_g_add fitobj.sigma_g = np.std(shear_model_g2,ddof=1) fitobj.inv_sq_sigma_g = 1./sigma_g_add**2 log.info('using sigma_g=%2.5f' , fitobj.sigma_g) fitobj.parameters[0]['box']['min'] = 0 fitobj.parameters[0]['box']['max'] = 1 fitobj.parameters[1]['box']['min'] = 1 fitobj.parameters[1]['box']['max'] = 10 fitobj.parameters[2]['box']['min'] = 14 fitobj.parameters[2]['box']['max'] = 15 fitobj.parameters[3]['box']['min'] = 14 fitobj.parameters[3]['box']['max'] = 15 print 'halo1 m200' , halo1_table['m200'][id_pair] print 'halo2 m200' , halo2_table['m200'][id_pair] # import pdb; pdb.set_trace() # fitobj.plot_shears_mag(fitobj.shear_g1,fitobj.shear_g2) # pl.show() fitobj.save_all_models=False log.info('running grid search') fitobj.run_gridsearch()
def test(): filename_pairs = 'pairs_bcc.fits' filename_halo1 = 'pairs_bcc.halos1.fits' filename_halo2 = 'pairs_bcc.halos2.fits' filename_shears = 'shears_bcc_g.fits' pairs_table = tabletools.loadTable(filename_pairs) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) sigma_g_add = 0.0001 id_pair = 48 shears_info = tabletools.loadTable(filename_shears,hdu=id_pair+1) fitobj = modelfit() fitobj.get_bcc_pz() fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.shear_u_mpc = shears_info['u_mpc'] fitobj.shear_v_mpc = shears_info['v_mpc'] fitobj.shear_g1 = shears_info['g1'] + np.random.randn(len(shears_info['g1']))*sigma_g_add fitobj.shear_g2 = shears_info['g2'] + np.random.randn(len(shears_info['g2']))*sigma_g_add fitobj.sigma_g = np.std(shears_info['g2'],ddof=1) # fitobj.save_all_models = True log.info('using sigma_g=%2.5f' , fitobj.sigma_g) fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo1_u_mpc = pairs_table['u1_mpc'][id_pair] fitobj.halo1_v_mpc = pairs_table['v1_mpc'][id_pair] fitobj.halo1_z = pairs_table['z'][id_pair] fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj.halo2_u_mpc = pairs_table['u2_mpc'][id_pair] fitobj.halo2_v_mpc = pairs_table['v2_mpc'][id_pair] fitobj.halo2_z = pairs_table['z'][id_pair] fitobj.parameters[0]['box']['min'] = 0 fitobj.parameters[0]['box']['max'] = 1 fitobj.parameters[1]['box']['min'] = 1 fitobj.parameters[1]['box']['max'] = 10 fitobj.parameters[2]['box']['min'] = 14 fitobj.parameters[2]['box']['max'] = 15 fitobj.parameters[3]['box']['min'] = 14 fitobj.parameters[3]['box']['max'] = 15 print 'halo1 m200' , halo1_table['m200'][id_pair] print 'halo2 m200' , halo2_table['m200'][id_pair] # import pdb; pdb.set_trace() # fitobj.plot_shears_mag(fitobj.shear_g1,fitobj.shear_g2) # pl.show() fitobj.save_all_models=False log.info('running grid search') n_grid=10 log_post , params, grids = fitobj.run_gridsearch(n_grid=n_grid) vmax_post , best_model_g1, best_model_g2 , limit_mask, vmax_params = fitobj.get_grid_max(log_post , params)
def get_selection_split(selection_string, cols_res, cols_tru): results_filename_fmt = config['methods'][args.method]['filename_results'] truth_filename_fmt = config['filename_truth'] list_shears = [] list_all_res = [] list_all_tru = [] n_all_loaded=0 n_all_selected=0 ia=0 n_missing=0 for ig,vg in enumerate(config['shear']): list_results = [] id_first = args.first id_last = id_first + args.num # if id_last > 200: # id_last=200; # warnings.warn('hard coded max number of files 200') for ip in range(id_first,id_last): filename_tru = truth_filename_fmt % (ip,ig) filename_res = results_filename_fmt % (ip,ig) try: cat_tru_all = tabletools.loadTable(filename_tru,log=1,remember=False) cat_tru = cat_tru_all logger.debug('loaded %05d galaxies from file: %s' % (len(cat_tru_all),filename_tru)) except Exception,errmsg: logger.error('file %s : %s' % (filename_tru,errmsg) ) continue try: cat_res_all = tabletools.loadTable(filename_res,log=1,remember=False) cat_res = cat_res_all logger.debug('loaded %05d galaxies from file: %s' % (len(cat_res_all),filename_res)) except Exception,errmsg: logger.debug('sth wrong with file %s errmsg %s' % (filename_res,errmsg) ) n_missing+=1 continue if ('e1' in cols_res) & (args.method=='im3shape'): # cat_res['e1'] = cat_res['e1']*config['methods'][args.method]['flip_g1'] cat_tru['g1_true'] = -1*cat_tru['g1_true'] warnings.warn('flipping g1 in truth cat for method %s'%args.method) if len(cat_tru) != len(cat_res): cat_tru=cat_tru[cat_res['coadd_objects_id']] if args.method == 'ngmix': cat_res = rename_ngmix_cols(cat_res,cat_tru) elif args.method == 'im3shape': cat_res = rename_im3shape_cols(cat_res,cat_tru) cat_tru = rename_cols_truth(cat_tru) for col in cols_tru: if col not in cat_tru.dtype.names: raise Exception('column %s not found in truth catalog %s' % (col,filename_tru)) for col in cols_res: if col not in cat_res.dtype.names: raise Exception('column %s not found in results catalog %s' % (col,filename_res)) n_all_loaded+=len(cat_res) try: exec selection_string except Exception,errmsg: print errmsg import pdb; pdb.set_trace()
def update_truth_table(update_snr=True , update_cosmos=True , update_hsm=True, update_fwhm=True): log.info('getting snr, flux and fwhm for the truth table') noise_std = config['des_pixel_noise_sigma'] id_first = args.first id_last = id_first + args.num psf_images = None filename_cosmos_catalog = os.path.join(config['input']['real_catalog']['dir'],config['input']['real_catalog']['file_name']) filename_cosmos_catalog_fits = os.path.join(config['input']['real_catalog']['dir'],config['input']['real_catalog']['file_name']).replace('.fits','_fits.fits') cosmos_catalog = pyfits.getdata(filename_cosmos_catalog) cosmos_catalog_fits = pyfits.getdata(filename_cosmos_catalog_fits) n_cosmos_gals = len(cosmos_catalog) log.info('opened %s with %d images' , filename_cosmos_catalog, n_cosmos_gals) filename_great3_info = os.path.join(config['input']['real_catalog']['dir'],'real_galaxy_selection_info.fits') great3_info = np.array(pyfits.getdata(filename_great3_info)) for ip in range(id_first,id_last): # all_snr=[] for il,vl in enumerate(config['shear']): list_normsq = [] filename_cat = os.path.join(args.out_dir,'nbc.truth.%03d.g%02d.fits' % (ip,il)) filename_meds = os.path.join(args.out_dir,'nbc.meds.%03d.g%02d.noisefree.fits' % (ip,il)) log.info('part %d shear %d : getting snr, flux, hsm, and fwhm' , ip, il) log.debug('using %s %s', filename_meds, filename_cat) cat = tabletools.loadTable(filename_cat) n_gals = len(cat) # assure backwards compatibility if 'hsm_obs_g1' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_obs_g1', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_obs_g2' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_obs_g2', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_cor_g1' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_cor_g1', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_cor_g2' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_cor_g2', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_obs_sigma' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_obs_sigma', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_cor_sigma' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_cor_sigma', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_centroid_x' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_centroid_x', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_centroid_y' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_centroid_y', arr=np.zeros(len(cat)), dtype='f8') if 'hsm_mom_amp' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='hsm_mom_amp', arr=np.zeros(len(cat)), dtype='f8') if 'fwhm' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='fwhm', arr=np.zeros(len(cat)), dtype='f8') if 'sf_i' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_i', arr=np.zeros(len(cat)), dtype='f8') if 'sf_hlr' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_hlr', arr=np.zeros(len(cat)), dtype='f8') if 'sf_sersicn' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_sersicn', arr=np.zeros(len(cat)), dtype='f8') if 'sf_q' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_q', arr=np.zeros(len(cat)), dtype='f8') if 'sf_boxiness' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_boxiness', arr=np.zeros(len(cat)), dtype='f8') if 'sf_phi' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='sf_phi', arr=np.zeros(len(cat)), dtype='f8') if 'zphot' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='zphot', arr=np.zeros(len(cat)), dtype='f8') if 'psf_fwhm_measured' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='psf_fwhm_measured', arr=np.zeros(len(cat)), dtype='f8') if 'cosmos_mag_auto' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='cosmos_mag_auto', arr=np.zeros(len(cat)), dtype='f8') if 'cosmos_flux_radius' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='cosmos_flux_radius', arr=np.zeros(len(cat)), dtype='f8') if 'mean_rgpp_rp' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='mean_rgpp_rp', arr=np.zeros(len(cat)), dtype='f8') if 'to_use' not in cat.dtype.names: cat=tabletools.appendColumn(rec=cat, name='to_use', arr=np.zeros(len(cat)), dtype='i4') for ig in range(n_gals): if update_snr == True: noisless_gals = meds.MEDS(filename_meds) n_gals = len(noisless_gals._cat) img_gal = noisless_gals.get_cutout(ig,0) normsq= np.sum( img_gal.flatten()**2 ) snr = np.sqrt(normsq)/noise_std flux = np.sum(img_gal.flatten()) cat[ig]['snr'] = snr cat[ig]['flux'] = flux if update_cosmos == True: current_id_cosmos = cat[ig]['id_cosmos'] cat[ig]['sf_i'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][0] cat[ig]['sf_hlr'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][1]*ACS_PIXEL_SCALE cat[ig]['sf_sersicn'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][2] cat[ig]['sf_q'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][3] cat[ig]['sf_boxiness'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][4] cat[ig]['sf_phi'] = cosmos_catalog_fits[current_id_cosmos]['sersicfit'][7] cat[ig]['zphot'] = cosmos_catalog_fits[current_id_cosmos]['zphot'] cat[ig]['cosmos_mag_auto'] = cosmos_catalog_fits[current_id_cosmos]['mag_auto'] cat[ig]['cosmos_flux_radius'] = cosmos_catalog_fits[current_id_cosmos]['flux_radius'] cat[ig]['to_use'] = great3_info[current_id_cosmos]['to_use'] if update_hsm==True: img_gal = noisless_gals.get_cutout(ig,0) img_psf = pyfits.getdata(os.path.join(args.out_dir,'nbc.psf.lores.fits'),cat[ig]['id_psf']) gs_img_gal = image_array_to_galsim(img_gal) gs_img_psf = image_array_to_galsim(img_psf) try: shearobj1=galsim.hsm.EstimateShear(gs_img_gal,gs_img_psf) cat[ig]['hsm_cor_g1'] = shearobj1.corrected_e1 / 2. cat[ig]['hsm_cor_g2'] = shearobj1.corrected_e2 / 2. cat[ig]['hsm_mom_amp'] = shearobj1.moments_amp except: log.error('HSM failed for object ig=%d ip=%d id_cosmos=%d psf_fwhm=%2.2f' , ig, ip , cat['id_cosmos'][ig] , cat['psf_fwhm'][ig]) cat[ig]['hsm_cor_g1'] = -99 cat[ig]['hsm_cor_g1'] = -99 cat[ig]['hsm_mom_amp'] = -99 try: shearobj2=galsim.hsm.FindAdaptiveMom(gs_img_gal) cat[ig]['hsm_obs_g1'] = shearobj2.observed_shape.g1 cat[ig]['hsm_obs_g2'] = shearobj2.observed_shape.g2 cat[ig]['hsm_obs_sigma'] = shearobj2.moments_sigma cat[ig]['hsm_cor_sigma'] = shearobj2.moments_sigma cat[ig]['hsm_centroid_x'] = shearobj2.moments_centroid.x cat[ig]['hsm_centroid_y'] = shearobj2.moments_centroid.y except: cat[ig]['hsm_obs_g1'] = -99 cat[ig]['hsm_obs_g2'] = -99 cat[ig]['hsm_obs_sigma'] = -99 cat[ig]['hsm_cor_sigma'] = -99 cat[ig]['hsm_centroid_x'] = -99 cat[ig]['hsm_centroid_y'] = -99 if update_fwhm==True: try: noisless_gals = meds.MEDS(filename_meds) img_gal = noisless_gals.get_cutout(ig,0) import mathstools cat[ig]['fwhm'] = mathstools.get_2D_fwhm(img_gal) except: log.error('getting FWHM failed for galaxy %d in %s' , ig , filename_meds ) cat[ig]['fwhm'] = 666 try: # if psf_images == None: psf_images = pyfits.open(os.path.join(args.out_dir,'nbc.psf.hires.fits')) # img_hires_psf = psf_images[cat[ig]['id_psf']].data # cat[ig]['psf_fwhm_measured'] = mathstools.get_2D_fwhm(img_hires_psf) cat[ig]['psf_fwhm_measured'] = cat[ig]['psf_fwhm'] except: log.error('getting PSF FWHM failed for galaxy %d in %s' , ig , filename_meds ) cat[ig]['psf_fwhm_measured'] = 666 if (cat[ig]['fwhm'] != 666) & (cat[ig]['psf_fwhm_measured'] != 666): cat[ig]['mean_rgpp_rp'] = cat[ig]['fwhm']/cat[ig]['psf_fwhm_measured'] else: cat[ig]['mean_rgpp_rp'] = 666 if ig % 100 == 0: log.debug('getting snr, flux, hsm and fwhm of galaxy %d' , ig) tabletools.saveTable(filename_cat, cat)
def get_shears_for_single_pair(halo1,halo2,idp=0): logger.debug('ra=(%2.2f,%2.2f) dec=(%2.2f,%2.2f) ' % (halo1['ra'],halo2['ra'],halo1['dec'],halo2['dec'])) shear_base = tabletools.loadTable(filename_shearbase,dtype=dtype_shearbase) redshift_offset = 0.2 # pair_dra = np.abs(halo1['ra'] - halo2['ra']) # pair_ddec = np.abs(halo1['dec'] - halo2['dec']) halo1_ra_deg , halo1_de_deg = halo1['ra'],halo1['dec'] halo2_ra_deg , halo2_de_deg = halo2['ra'],halo2['dec'] pair_ra_deg, pair_de_deg = cosmology.get_midpoint_deg(halo1_ra_deg , halo1_de_deg , halo2_ra_deg , halo2_de_deg) pair_z = np.mean([halo1['z'],halo2['z']]) # find the corresponding files radius = 1. # pair_xyz = cosmology.get_euclidian_coords( pair_ra_deg , pair_de_deg , radius) x,y,z = cosmology.spherical_to_cartesian_deg( pair_ra_deg , pair_de_deg , radius) pair_xyz = np.array([x , y , z]) box_coords_x = shear_base['x'] box_coords_y = shear_base['y'] box_coords_z = shear_base['z'] box_coords_xyz = np.concatenate([ box_coords_x[:,None], box_coords_y[:,None], box_coords_z[:,None] ] , axis=1) logger.info('getting Ball Tree for 3D') BT = BallTree(box_coords_xyz, leaf_size=5) n_connections=5 bt_dx,bt_id = BT.query(pair_xyz,k=n_connections) list_set = [] for iset, vset in enumerate( shear_base[bt_id]['file'][0] ): # vset=vset.replace('kacprzak','tomek') lenscat=tabletools.loadTable(vset) # prelim cut on z select = lenscat['z'] > (pair_z + redshift_offset) lenscat=lenscat[select] list_set.append(lenscat) logger.debug('opened %s with %d gals mean_ra=%2.2f, mean_de=%2.2f' % (vset,len(lenscat),np.mean(lenscat['ra']),np.mean(lenscat['dec']))) shear1_col = config['shear1_col'] shear2_col = config['shear2_col'] lenscat_all = np.concatenate(list_set) shear_g1 , shear_g2 = -lenscat_all[shear1_col] , lenscat_all[shear2_col] shear_ra_deg , shear_de_deg , shear_z = lenscat_all['ra'] , lenscat_all['dec'] , lenscat_all['z'] # import pylab as pl # pl.figure() # pl.scatter(pair_ra_deg , pair_de_deg , 100, 'r' , marker='x') # pl.scatter(halo1_ra_deg , halo1_de_deg , 100, 'c' , marker='o') # pl.scatter(halo2_ra_deg , halo2_de_deg , 100, 'm' , marker='o') # select = np.random.permutation(len(shear_ra_deg))[:10000] # pl.scatter(shear_ra_deg[select] , shear_de_deg[select] ,1, 'm' , marker='.') # pl.show() pairs_shear , halos_coords , pairs_shear_full = filaments_tools.create_filament_stamp(halo1_ra_deg, halo1_de_deg, halo2_ra_deg, halo2_de_deg, shear_ra_deg, shear_de_deg, shear_g1, shear_g2, shear_z, pair_z, lenscat_all ) if len(pairs_shear) < 100: logger.error('found only %d shears' % len(pairs_shear)) return None , None return pairs_shear , halos_coords, pairs_shear_full
def get_closeby_shear(shear_ra_arcmin,shear_de_arcmin,pair): filename_pairs = config['filename_pairs'] # pairs_bcc.fits' filename_halo1 = config['filename_pairs'].replace('.fits' , '.halos1.fits') # pairs_bcc.halos1.fits' filename_halo2 = config['filename_pairs'].replace('.fits' , '.halos2.fits') # pairs_bcc.halos2.fits' filename_shears = config['filename_shears'] # args.filename_shears filename_halos = config['filename_halos'] # args.filename_shears pairs_table = tabletools.loadTable(filename_pairs) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) halos_table = tabletools.loadTable(filename_halos) limit_deg=2 shear_g1_removed , shear_g2_removed = np.zeros_like(shear_ra_arcmin) , np.zeros_like(shear_ra_arcmin) n_close = 0 for ih,vh in enumerate(halos_table): if (ih == pair['ih1']) | (ih == pair['ih2']): continue if (vh['m200_fit']<1e13): continue x , y = cosmology.get_gnomonic_projection(vh['ra'], vh['dec'] , pair['ra_mid'], pair['dec_mid'] , unit='deg') dist_sky_deg1 = cosmology.get_angular_separation(x,y,pair['u1_arcmin']/60.,0,unit='deg') dist_sky_deg2 = cosmology.get_angular_separation(x,y,pair['u2_arcmin']/60.,0,unit='deg') if ((dist_sky_deg1 > limit_deg) | (dist_sky_deg2 > limit_deg)): continue else: logger.info('closeby halo % 5d\tdist_sky1=%7.2f\tdist_sky2=%7.2f\tm200=%2.2e sig=%2.2f' , ih , dist_sky_deg1 , dist_sky_deg2 , vh['m200_fit'] , vh['m200_sig']) fitobj = filaments_model_1h.modelfit() fitobj.shear_u_arcmin = shear_ra_arcmin fitobj.shear_v_arcmin = shear_de_arcmin fitobj.halo_u_arcmin = x*60. fitobj.halo_v_arcmin = y*60. fitobj.halo_z = vh['z'] fitobj.get_bcc_pz(config['filename_pz']) fitobj.nh = nfw.NfwHalo() fitobj.nh.z_cluster= fitobj.halo_z fitobj.nh.theta_cx = fitobj.halo_u_arcmin fitobj.nh.theta_cy = fitobj.halo_v_arcmin fitobj.nh.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.halo_z) model_g1 , model_g2 , limit_mask , Delta_Sigma , kappa = fitobj.draw_model([vh['m200_fit']]) shear_g1_removed , shear_g2_removed = shear_g1_removed + model_g1, shear_g2_removed + model_g2 n_close +=1 # pl.figure() # pl.scatter(shear_ra_arcmin,shear_de_arcmin,c=shear_g1_removed,lw=0) # # pl.hist(shear_g1-shear_g1_removed,100); pl.show() # pl.colorbar() # pl.scatter(pair['u1_arcmin'],pair['v1_arcmin'],s=100) # pl.scatter(pair['u2_arcmin'],pair['v2_arcmin'],s=100) # # pl.xlim([pair['u2_arcmin'],pair['u1_arcmin']]) # pl.show() logger.info('n_close=%d' , n_close) return shear_g1_removed, shear_g2_removed
def select_halos(): halocat = tabletools.loadTable('DR7-Full.fits') # for now just save all LRGs tabletools.saveTable(filename_halos, halocat)
import tabletools import pylab as pl import scipy.stats import numpy as np import plotstools cosmos=tabletools.loadTable('cosmos_acs_shera_may2011.fits.gz') cos=cosmos[(cosmos['FWHM_IMAGE']<100) * (cosmos['ZPHOT']>0) * (cosmos['ZPHOT']<3) ] npix = 50 pixel_size=0.05 plotstools.plot_dist(X=[cos['FWHM_IMAGE']*pixel_size,cos['ZPHOT'],cos['MODD']],bins=[30,30,range(0,31)],labels=['FWHM [arcsec]' , 'ZPHOT' , 'MODD']) filename_fig = 'histograms_size_zphot_modd.eps' left = 0.1 # the left side of the subplots of the figure right = 0.9 # the right side of the subplots of the figure bottom = 0.1 # the bottom of the subplots of the figure top = 0.9 # the top of the subplots of the figure wspace = 0.0 # the amount of width reserved for blank space between subplots hspace = 0.0 # the amount of height reserved for white space between subplots pl.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) pl.savefig(filename_fig) print 'saved' , filename_fig
def runIm3shape(): # open the RGC if 'real_catalog' in config['input']: filepath_rgc = os.path.join(config['input']['real_catalog']['dir'],config['input']['real_catalog']['file_name']) rgc = pyfits.open(filepath_rgc)[1] # open the ring test catalog filename_cat = os.path.join(config['input']['catalog']['dir'],config['input']['catalog']['file_name']) # truth_cat = numpy.loadtxt(filename_cat,dtype=dtype_table_truth) truth_cat = tabletools.loadTable(filepath=filename_cat,table_name='truth_cat',dtype=dtype_table_truth,logger=logger) logger.info('loaded %s' % filename_cat) n_objects = truth_cat.shape[0] # get im3shape dirpath_im3shape = os.path.join(os.environ['IM3SHAPE'],'python') sys.path.append(dirpath_im3shape) import im3shape # get images img_gals = getGalaxyImages() # get options n_pix = config['image']['size'] pixel_scale = config['image']['pixel_scale'] i3_options = im3shape.I3_options() i3_options.read_ini_file(config['args'].filepath_ini) logger.info('loaded im3shape ini file %s' % config['args'].filepath_ini) i3_options.stamp_size = n_pix # get the file filename_results = 'results.%s.%012d.cat' % (config['args'].name_config,config['args'].obj_num) file_results = open(filename_results,'w') # create PSF from Moffat parameters # get i3 images - get first i3_galaxy to initialise the PSF - kind of strange, but hey.. i3_galaxy = im3shape.I3_image(n_pix, n_pix) psf_beta = float(config['psf']['beta']) psf_fwhm = float(config['psf']['fwhm'])/float(pixel_scale) psf_e1 = float(config['psf']['ellip']['g1']) psf_e2 = float(config['psf']['ellip']['g2']) i3_psf = i3_galaxy.make_great10_psf(psf_beta, psf_fwhm, psf_e1, psf_e2, i3_options) obj_num = config['args'].obj_num # loop over all created images for ig,img_gal in enumerate(img_gals): # get i3 images i3_galaxy = im3shape.I3_image(n_pix, n_pix) scale = img_gal.array.sum() i3_galaxy.from_array(img_gal.array) i3_galaxy.scale(1.0/scale) # this is a workaround - the array for model bias real images had id_unique, # but the results files have 'identifier' - so we use one or the other id they exist # get the unique_id if 'id_unique' in truth_cat.dtype.names: id_global = ig id_object = (obj_num+ig) % n_objects id_cosmos = truth_cat['id_cosmos'][ id_object ] id_unique = truth_cat['id_unique'][ id_object ] elif 'identifier' in truth_cat.dtype.names: id_global = ig id_object = (obj_num+ig) % n_objects id_unique = truth_cat['identifier'][ id_object ] id_cosmos = int(truth_cat['identifier'][ id_object ]//10000) i3_result, i3_best_fit = im3shape.i3_analyze(i3_galaxy, i3_psf, i3_options, ID=id_unique) saveResult(file_results,i3_result,id_global,id_object,id_unique,id_cosmos) printResult(i3_result,id_global) if 'e1' in truth_cat.dtype.names: printTruth(i3_result,truth_cat[ig]) # save residual plots if config['args'].verbosity > 2: i1 = i3_best_fit.array/sum(i3_best_fit.array.flatten()) i2 = img_gal.array/sum(img_gal.array.flatten()) import pylab pylab.subplot(1,5,1) pylab.imshow(i1,interpolation='nearest') pylab.title('best fit') pylab.subplot(1,5,2) pylab.imshow(i2,interpolation='nearest') pylab.title('galaxy') pylab.subplot(1,5,3) pylab.imshow(i1-i2,interpolation='nearest') pylab.title('residuals') pylab.subplot(1,5,4) pylab.imshow(i3_psf.array,interpolation='nearest') pylab.title('PSF') pylab.subplot(1,5,5) pylab.imshow(img_gal.array,interpolation='nearest') pylab.colorbar() pylab.title('best fit') filename_fig = 'debug/fig.residual.%09d.png' % id_unique pylab.savefig(filename_fig) logger.info('saved %s' % filename_fig) pylab.close() file_results.close() logger.info('saved %s' % filename_results)
def test_overlap(): filename_pairs = config['filename_pairs'] # pairs_bcc.fits' filename_halo1 = config['filename_pairs'].replace('.fits' , '.halos1.fits') # pairs_bcc.halos1.fits' filename_halo2 = config['filename_pairs'].replace('.fits' , '.halos2.fits') # pairs_bcc.halos2.fits' filename_shears = config['filename_shears'] # args.filename_shears filename_shears_overlap = filename_shears.replace('.pp2','.overlap.pp2') filename_pairs_overlap = filename_pairs.replace('.fits','.overlap.fits') filename_halo1_overlap = filename_halo1.replace('.halos1.fits','.overlap.halos1.fits') filename_halo2_overlap = filename_halo2.replace('.halos2.fits','.overlap.halos2.fits') if os.path.isfile(filename_shears_overlap): os.remove(filename_shears_overlap) logger.warning('overwriting file %s' , filename_shears_overlap) pairs_table = tabletools.loadTable(filename_pairs) halo1_table = tabletools.loadTable(filename_halo1) halo2_table = tabletools.loadTable(filename_halo2) sigma_g_add = 0.0001 id_pair = 3 shears_info = tabletools.loadPickle(filename_shears,id_pair) overlapping_halo_m200 = 2 # x 1e14 overlapping_halo_z = 0.3 no_m200 = 1e-8 n_pairs = len(grid_x) * len(grid_y) pairs_table_overlap = pairs_table[np.ones([n_pairs],dtype=np.int32)*id_pair] halo1_table_overlap = halo1_table[np.ones([n_pairs],dtype=np.int32)*id_pair] halo2_table_overlap = halo2_table[np.ones([n_pairs],dtype=np.int32)*id_pair] pairs_table_overlap['ipair'] = range(len(pairs_table_overlap)) tabletools.saveTable(filename_pairs_overlap,pairs_table_overlap) tabletools.saveTable(filename_halo1_overlap,halo1_table_overlap) tabletools.saveTable(filename_halo2_overlap,halo2_table_overlap) for x_pos in grid_x: for y_pos in grid_y: logger.info('dx = %2.2f dy = %2.2f' % (x_pos,y_pos)) fitobj = filaments_model_2hf.modelfit() fitobj.get_bcc_pz(config['filename_pz']) fitobj.kappa_is_K = False fitobj.R_start = config['R_start'] fitobj.Dlos = pairs_table[id_pair]['Dlos'] fitobj.Dtot = np.sqrt(pairs_table[id_pair]['Dxy']**2+pairs_table[id_pair]['Dlos']**2) fitobj.boost = fitobj.Dtot/pairs_table[id_pair]['Dxy'] fitobj.use_boost = config['use_boost'] fitobj.shear_v_arcmin = shears_info['v_arcmin'] fitobj.shear_u_arcmin = shears_info['u_arcmin'] fitobj.shear_u_mpc = shears_info['u_mpc'] fitobj.shear_v_mpc = shears_info['v_mpc'] fitobj.halo1_u_arcmin = pairs_table['u1_arcmin'][id_pair] fitobj.halo1_v_arcmin = pairs_table['v1_arcmin'][id_pair] fitobj.halo1_u_mpc = pairs_table['u1_mpc'][id_pair] fitobj.halo1_v_mpc = pairs_table['v1_mpc'][id_pair] fitobj.halo1_z = pairs_table['z'][id_pair] fitobj.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj.halo2_u_mpc = pairs_table['u2_mpc'][id_pair] fitobj.halo2_v_mpc = pairs_table['v2_mpc'][id_pair] fitobj.halo2_z = pairs_table['z'][id_pair] fitobj.pair_z = (fitobj.halo1_z + fitobj.halo2_z) / 2. fitobj.filam = filament.filament() fitobj.filam.pair_z =fitobj.pair_z fitobj.filam.grid_z_centers = fitobj.grid_z_centers fitobj.filam.prob_z = fitobj.prob_z fitobj.filam.set_mean_inv_sigma_crit(fitobj.filam.grid_z_centers,fitobj.filam.prob_z,fitobj.filam.pair_z) fitobj.nh1 = nfw.NfwHalo() fitobj.nh1.z_cluster= fitobj.halo1_z fitobj.nh1.theta_cx = fitobj.halo1_u_arcmin fitobj.nh1.theta_cy = fitobj.halo1_v_arcmin fitobj.nh1.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) fitobj.nh2 = nfw.NfwHalo() fitobj.nh2.z_cluster= fitobj.halo2_z fitobj.nh2.theta_cx = fitobj.halo2_u_arcmin fitobj.nh2.theta_cy = fitobj.halo2_v_arcmin fitobj.nh2.set_mean_inv_sigma_crit(fitobj.grid_z_centers,fitobj.prob_z,fitobj.pair_z) shear_model_g1, shear_model_g2, limit_mask , _ , _ = fitobj.draw_model([filament_ds, filament_radius, no_m200, no_m200]) fitobj.nh2.theta_cy = fitobj.halo2_v_arcmin fitobj.nh2.theta_cx = fitobj.halo2_u_arcmin # second fitobj ---------- overlapping halo fitobj2 = filaments_model_2hf.modelfit() fitobj2.get_bcc_pz(config['filename_pz']) fitobj2.use_boost = True fitobj2.kappa_is_K = False fitobj2.R_start = config['R_start'] fitobj2.Dlos = pairs_table[id_pair]['Dlos'] fitobj2.Dtot = np.sqrt(pairs_table[id_pair]['Dxy']**2+pairs_table[id_pair]['Dlos']**2) fitobj2.boost = fitobj.Dtot/pairs_table[id_pair]['Dxy'] fitobj2.use_boost = config['use_boost'] fitobj2.shear_v_arcmin = shears_info['v_arcmin'] fitobj2.shear_u_arcmin = shears_info['u_arcmin'] fitobj2.shear_u_mpc = shears_info['u_mpc'] fitobj2.shear_v_mpc = shears_info['v_mpc'] fitobj2.halo1_z = overlapping_halo_z fitobj2.halo1_u_arcmin = x_pos/cosmology.get_ang_diam_dist(overlapping_halo_z) * 180. / np.pi * 60. fitobj2.halo1_v_arcmin = y_pos/cosmology.get_ang_diam_dist(overlapping_halo_z) * 180. / np.pi * 60. fitobj2.halo1_u_mpc = x_pos fitobj2.halo1_v_mpc = y_pos fitobj2.halo2_u_arcmin = pairs_table['u2_arcmin'][id_pair] fitobj2.halo2_v_arcmin = pairs_table['v2_arcmin'][id_pair] fitobj2.halo2_u_mpc = pairs_table['u2_mpc'][id_pair] fitobj2.halo2_v_mpc = pairs_table['v2_mpc'][id_pair] fitobj2.halo2_z = pairs_table['z'][id_pair] fitobj2.pair_z = (fitobj2.halo1_z + fitobj2.halo2_z) / 2. fitobj2.filam = filament.filament() fitobj2.filam.pair_z =fitobj2.pair_z fitobj2.filam.grid_z_centers = fitobj2.grid_z_centers fitobj2.filam.prob_z = fitobj2.prob_z fitobj2.filam.set_mean_inv_sigma_crit(fitobj2.filam.grid_z_centers,fitobj2.filam.prob_z,fitobj2.filam.pair_z) fitobj2.nh1 = nfw.NfwHalo() fitobj2.nh1.z_cluster= fitobj2.halo1_z fitobj2.nh1.theta_cx = fitobj2.halo1_u_arcmin fitobj2.nh1.theta_cy = fitobj2.halo1_v_arcmin fitobj2.nh1.set_mean_inv_sigma_crit(fitobj2.grid_z_centers,fitobj2.prob_z,fitobj2.pair_z) fitobj2.nh2 = nfw.NfwHalo() fitobj2.nh2.z_cluster= fitobj2.halo2_z fitobj2.nh2.theta_cx = fitobj2.halo2_u_arcmin fitobj2.nh2.theta_cy = fitobj2.halo2_v_arcmin fitobj2.nh2.set_mean_inv_sigma_crit(fitobj2.grid_z_centers,fitobj2.prob_z,fitobj2.pair_z) shear_model_g1_neighbour, shear_model_g2_neighbour, limit_mask , _ , _ = fitobj2.draw_model([0.0, 0.5, overlapping_halo_m200, no_m200 ]) do_plot=False if do_plot: cmax = np.max([ np.abs(shear_model_g1_neighbour.min()),np.abs(shear_model_g1_neighbour.min()) , np.abs(shear_model_g1_neighbour.max()),np.abs(shear_model_g1_neighbour.max()),np.abs(shear_model_g1.max()),np.abs(shear_model_g1.max()),np.abs(shear_model_g1.min()),np.abs(shear_model_g1.min())]) pl.figure(figsize=(20,10)) pl.scatter( fitobj.shear_u_mpc , fitobj.shear_v_mpc , s=100, c=shear_model_g1+shear_model_g1_neighbour, lw=0) pl.clim(-cmax,cmax) pl.colorbar() pl.scatter( fitobj2.halo1_u_mpc , fitobj2.halo1_v_mpc , s=100, lw=0) pl.scatter( fitobj.halo1_u_mpc , fitobj.halo1_v_mpc , s=100) pl.scatter( fitobj.halo2_u_mpc , fitobj.halo2_v_mpc , s=100) pl.axis('equal') pl.show() fitobj.shear_g1 = shear_model_g1 + np.random.randn(len(fitobj.shear_u_arcmin))*sigma_g_add fitobj.shear_g2 = shear_model_g2 + np.random.randn(len(fitobj.shear_u_arcmin))*sigma_g_add fitobj.sigma_g = np.std(shear_model_g2,ddof=1) fitobj.inv_sq_sigma_g = 1./sigma_g_add**2 shears_info['g1'] = fitobj.shear_g1 shears_info['g2'] = fitobj.shear_g2 shears_info['weight'] = fitobj.inv_sq_sigma_g tabletools.savePickle(filename_shears_overlap,shears_info,append=True)
def test_calibration_procedure(): filelist = np.loadtxt(filelist_svclusters,dtype='a') calib_struct = pickle.load(open(filename_calibration)) for ifile,filename_results in enumerate(filelist): filename_results_calibrated = filename_results.replace('.cat','.nbc.cat') results_sv = tabletools.loadTable(filename_results_calibrated,dtype=dtype_table_results_sv_calibrated,log=1) select = results_sv['flag'] == 0 results_sv = results_sv[select] # analyse_population(results_sv) g1_mean = np.mean(results_sv['g1']) g1_bias_m = np.mean(results_sv['nbc_m']) g1_bias_m_std = get_m_std_for_sample(results_sv) g1_mean_calibrated = g1_mean / g1_bias_m log.info( 'g1_mean %10.4f' % g1_mean ) log.info( 'g1_mean_calibrated %10.4f' % g1_mean_calibrated ) log.info( 'g1_bias_m %10.4f' % g1_bias_m ) log.info( 'g1_bias_m_std %10.4f' % g1_bias_m_std ) marg_size_m, marg_size_m_std, marg_size_centers = get_marg('size',results_sv) marg_snr_m, marg_snr_m_std, marg_snr_centers = get_marg('snr',results_sv) pl.figure() pl.suptitle('%s\ntotal_calibration = %0.4f +/- %0.4f' % (filename_results,g1_bias_m,g1_bias_m_std)) pl.subplot(1,2,1) hh,bh,_=pl.hist(results_sv['size'],bins=calib_struct['bins_size'],histtype='step') # hh,bh,_=pl.hist(results_sv['size'],bins=np.linspace(1.2,3,20),histtype='step') pl.xlabel('FWHM_RATIO') pl.ylabel('histogram') ax2=pl.gca().twinx() ax2.errorbar(marg_size_centers,marg_size_m,yerr=marg_size_m_std,fmt='md') ax2.set_ylabel('multiplicative bias m') pl.subplot(1,2,2) # pl.hist(results_sv['snr'],bins=calib_struct['bins_snr'],histtype='step') pl.hist(results_sv['snr'],bins=np.linspace(0,60,20),histtype='step') pl.xlabel('SNR') pl.ylabel('histogram') ax2=pl.gca().twinx() ax2.errorbar(marg_snr_centers,marg_snr_m,yerr=marg_snr_m_std,fmt='cd') ax2.set_ylabel('multiplicative bias m') left = 0.125 # the left side of the subplots of the figure right = 0.9 # the right side of the subplots of the figure bottom = 0.1 # the bottom of the subplots of the figure top = 0.9 # the top of the subplots of the figure wspace = 0.7 # the amount of width reserved for blank space between subplots hspace = 0.5 # t pl.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=hspace) filename_fig = ('figs/fig.marg.%s.png' % filename_results ).replace('.fits.im3.cleaned.wcs.cat','').replace('sv-clusters-shears/','') pl.savefig(filename_fig) pl.close() log.info('saved %s' % filename_fig) # pl.show() # n_gals = len(results_sv) # n_gals_select = int(n_gals*0.25) # select = np.random.permutation(n_gals)[0:n_gals_select] # n_selected = len(select) select = results_sv['snr'] < 15 g1_mean = np.mean(results_sv[select]['g1']) g1_bias_m = np.mean(results_sv[select]['nbc_m']) g1_bias_m_std = get_m_std_for_sample(results_sv[select]) g1_mean_calibrated = g1_mean / g1_bias_m print 'g1_mean' , g1_mean print 'g1_mean_calibrated' , g1_mean_calibrated print 'g1_bias_m' , g1_bias_m print 'g1_bias_m_std' , g1_bias_m_std # import pdb; pdb.set_trace() # h,b,_=pl.hist(results_sv['nbc_m'][select],histtype='step') # pl.ylim([-10,max(h)]) # pl.yscale('log') # pl.hist(results_sv['nbc_m'],histtype='step') # results_sv['nbc_m'].sort() # pl.plot(results_sv['nbc_m']) # pl.figure() # pl.scatter(results_sv['nbc_m'],results_sv['size']) # pl.plot() # pl.figure() # pl.scatter(results_sv['nbc_m'],results_sv['snr']) pl.show()
def run_test(): bins_z = np.arange(0.025,3.5,0.05) filename_gals = '/home/kacprzak/data/CFHTLens/CFHTLens_2014-06-14.normalised.fits' filename_clusters = os.environ['HOME'] + '/data/CFHTLens/ClusterZ/clustersz.fits' filename_lrgclus = 'halos_cfhtlens_lrgsclus.fits' cat_clusters = tabletools.loadTable(filename_clusters) cat_lrgclus = tabletools.loadTable(filename_lrgclus) cat_gals = tabletools.loadTable(filename_gals) cat_clusters = cat_clusters[cat_lrgclus['id']] # i=1 # x=bins_z[20:30] # y=cat_gals['PZ_full'][i][20:30] # pl.plot(x,y,'o-'); # pl.plot(bins_z,cat_gals['PZ_full'][i],'rx-'); # xx=np.linspace(x.min(), x.max(),100) # f=interp1d(x,y,'cubic') # yy=f(xx) # pl.plot(xx,yy,'d'); pl.show() # import pdb; pdb.set_trace() # perm = np.random.permutation(len(cat_gals))[:10000] # pl.scatter(cat_gals['ALPHA_J2000'][perm],cat_gals['DELTA_J2000'][perm]) gals_ra_deg = cat_gals['ALPHA_J2000'] gals_de_deg = cat_gals['DELTA_J2000'] gals_ra_rad , gals_de_rad = cosmology.deg_to_rad(gals_ra_deg, gals_de_deg) cylinder_radius_mpc=1 pz_all=np.sum(cat_gals['PZ_full'],axis=0) pz_all=pz_all/np.sum(pz_all) n_brigthest = 40 n_bins_hires = 10000 bins_z_hires=np.linspace(bins_z.min(), bins_z.max(),n_bins_hires) new_z = np.zeros(len(cat_lrgclus)) print 'len(cat_lrgclus)', len(cat_lrgclus) for ic in range(len(cat_lrgclus)): cluster_ra_rad , cluster_de_rad = cosmology.deg_to_rad( cat_clusters[ic]['ra'] , cat_clusters[ic]['dec'] ) cluster_z = cat_clusters['z'][ic] cluster_zspec = cat_lrgclus['z'][ic] gals_u_rad , gals_v_rad = cosmology.get_gnomonic_projection(gals_ra_rad , gals_de_rad , cluster_ra_rad , cluster_de_rad) gals_u_mpc , gals_v_mpc = cosmology.rad_to_mpc(gals_u_rad,gals_v_rad,cluster_z) select = (np.sqrt(gals_u_mpc**2 + gals_v_mpc**2) < cylinder_radius_mpc)*( np.abs(cat_gals['Z_B']-cluster_z) < 0.1 ) # print 'selected %d gals in cylinder' % len(np.nonzero(select)[0]) cylinder_gals = cat_gals[select] gals_u_mpc = gals_u_mpc[select] gals_v_mpc = gals_v_mpc[select] select1 = (cylinder_gals['MAG_i'] > 10) * (cylinder_gals['MAG_i'] < 27) select2 = (cylinder_gals['MAG_r'] > 10) * (cylinder_gals['MAG_r'] < 27) select3 = (cylinder_gals['MAG_g'] > 10) * (cylinder_gals['MAG_g'] < 27) select4 = (cylinder_gals['MAG_u'] > 10) * (cylinder_gals['MAG_u'] < 27) select5 = (cylinder_gals['MAG_y'] > 10) * (cylinder_gals['MAG_y'] < 27) select6 = (cylinder_gals['MAG_z'] > 10) * (cylinder_gals['MAG_z'] < 27) select = select2*select1*select3*select4*select6 # print 'selected %d with good mags' % len(np.nonzero(select)[0]) if len(np.nonzero(select)[0]) == 0: continue cylinder_gals = cylinder_gals[select] gals_u_mpc = gals_u_mpc[select] gals_v_mpc = gals_v_mpc[select] # select_brightest_i = np.ones(len(cylinder_gals))[np.argsort(cylinder_gals['MAG_i'])[:n_brigthest]] == True # select_brightest_r = np.ones(len(cylinder_gals))[np.argsort(cylinder_gals['MAG_r'])[:n_brigthest]] == True # select_brightest_u = np.ones(len(cylinder_gals))[np.argsort(cylinder_gals['MAG_u'])[:n_brigthest]] == True # select_brightest_g = np.ones(len(cylinder_gals))[np.argsort(cylinder_gals['MAG_g'])[:n_brigthest]] == True # select_brightest = select_brightest_r # cylinder_brightest = cylinder_gals[select_brightest] # gals_u_mpc = gals_u_mpc[select_brightest] # gals_v_mpc = gals_v_mpc[select_brightest] # print 'using %d gals' % len(cylinder_brightest) x1=cylinder_gals['MAG_r']-cylinder_gals['MAG_i'] x2=cylinder_gals['MAG_g']-cylinder_gals['MAG_i'] x3=cylinder_gals['MAG_u']-cylinder_gals['MAG_i'] x4=cylinder_gals['MAG_y']-cylinder_gals['MAG_i'] x5=cylinder_gals['MAG_z']-cylinder_gals['MAG_i'] X=np.concatenate( [x1.astype('f4')[:,None], x2.astype('f4')[:,None], x3.astype('f4')[:,None], x5.astype('f4')[:,None]] ,axis=1) from scipy.stats import gaussian_kde kde = gaussian_kde(X.T,bw_method=0.3) w = kde(X.T)**3 w = w/np.max(w) # pl.figure() # pl.scatter(X[:,0],X[:,1],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,1],X[:,2],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,0],X[:,2],s=50,c=w) ; pl.colorbar() # pl.figure() # pl.scatter(X[:,0],X[:,3],s=50,c=w) ; pl.colorbar() # # pl.figure() # # pl.scatter(X[:,0],X[:,4],s=50,c=w) ; pl.colorbar() # pl.show() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_r']-cylinder_gals[select_brightest]['MAG_i'], cylinder_gals[select_brightest]['MAG_g']-cylinder_gals[select_brightest]['MAG_i'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_i'],cylinder_gals[select_brightest]['MAG_r'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_g'],cylinder_gals[select_brightest]['MAG_r'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_u'],cylinder_gals[select_brightest]['MAG_i'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(cylinder_gals[select_brightest]['MAG_r'],cylinder_gals[select_brightest]['Z_B'],c=cylinder_gals[select_brightest]['Z_B']) # pl.colorbar() # pl.figure() # pl.scatter(gals_u_mpc,gals_v_mpc,c=cylinder_brightest['Z_B'],s=cylinder_brightest['MAG_r']*2) # pl.colorbar() # pl.show() # pz_hires = np.zeros([len(cylinder_gals),n_bins_hires]) # for ib in range(len(cylinder_gals)): # fz=interp1d(bins_z,cylinder_gals['PZ_full'][ib],'cubic') # pz_hires[ib,:] = fz(bins_z_hires) # pz_this = pz_hires[ib,:]/np.sum(pz_hires[ib,:])*w[ib] # pl.plot(bins_z_hires,pz_this) # pl.plot(bins_z,cylinder_gals['PZ_full'][ib],'x'); # print 'interp' , ib, np.sum(pz_this) # pz_hires[pz_hires<0] = 1e-10 # pz_prod = np.sum(np.log(pz_hires),axis=0) # pz_prod = pz_prod - pz_prod.max() # pz_cylinder=np.exp(pz_prod) # pz_cylinder=pz_cylinder/np.sum(pz_cylinder) # new_z[ic] = bins_z_hires[pz_cylinder.argmax()] new_z[ic] = np.sum(cylinder_gals['Z_B']*w)/np.sum(w) std_z=np.std(np.sqrt(((cylinder_gals['Z_B']*w - new_z[ic])**2)/np.sum(w))) print '%3d new_z=%.4f zspec=%.4f bad_z=%.4f naomi_z=%.4f new-zpec=% .4f naomi-zspec=% .4f n_eff=%2.4f n_cylinder_gals=%d std_z=%2.5f' % (ic,new_z[ic],cluster_zspec,cluster_z,cat_clusters['z'][ic],new_z[ic]-cluster_zspec,cat_clusters['z'][ic]-cluster_zspec,np.sum(w),len(cylinder_gals),std_z) # pl.figure() # pl.plot(bins_z_hires,pz_cylinder,'kd'); # # pl.plot(bins_z,pz_all); # pl.axvline(cluster_z,color='b') # pl.axvline(cluster_zspec,color='c') # pl.axvline(cluster_z+0.1,color='r') # pl.axvline(cluster_z-0.1,color='r') # pl.xlim([0,1]) # pl.show() pl.hist(new_z-cat_lrgclus['z'],np.linspace(-0.1,0.1,20),histtype='step',label='new z',normed=True); pl.hist(cat_clusters['z']-cat_lrgclus['z'],np.linspace(-0.1,0.1,20),histtype='step',label='naomi z',normed=True); pl.hist(cat_clusters['z_bad']-cat_lrgclus['z'],np.linspace(-0.1,0.1,20),histtype='step',label='old z',normed=True); pl.xlabel('z_estimated - z_spec') pl.legend() pl.show() import pdb; pdb.set_trace()