def arrange_alhambraHDF5list_byfield(lista, save='yes'): """ It creates a global P(z)'s --------------------------------- import alhambrahdf5 from alhambrahdf5 import * mat = arrange_alhambraHDF5list_byfield(lista) """ ims = U.get_str(lista, 0) basez = U.arange(0.001, 7.001, 0.001) dim = len(ims) for ii in range(dim): print '%i/%i' % (ii + 1, dim) infile = ims[ii] data = U.get_data(infile, 0) if ii < 1: datos = data else: datos += data if save == 'yes': finaldata = decapfile(lista) + '.global.mat' U.put_data(finaldata, (datos, basez)) return datos
def select_galaxies_JPLUS_CATBPZcatalog(catalog): tile_num, obj_id, ra, dec, fw = U.get_data(catalog, (0, 1, 2, 3, 4)) u_m, u_em, g_m, g_em, r_m, r_em = U.get_data(catalog, (5, 6, 7, 8, 9, 10)) i_m, i_em, z_m, z_em, f395_m, f395_em = U.get_data( catalog, (11, 12, 13, 14, 15, 16)) f410_m, f410_em, f430_m, f430_em, f515_m, f515_em = U.get_data( catalog, (17, 18, 19, 20, 21, 22)) f660_m, f660_em, f861_m, f861_em = U.get_data(catalog, (23, 24, 25, 26)) zb, zb1, zb2, tb, ods = U.get_data(catalog, (27, 28, 29, 30, 31)) gs = select_jplus_galaxies(fw, r_m) out_cat = catalog[:-3] + 'gal.cat' header_cat = '# 1.tile_id 2.object_id 3.ra 4.dec 5.fwhm 6.uJAVA 7.uJAVA_err 8.gSDSS \ 9.gSDSS_err 10.rSDSS 11.rSDSS_err 12.iSDSS 13.iSDSS_err 14.zSDSS 15.zSDSS_err \ 16.J0395 17.J0395_err 18.J0410 19.J0410_err 20.J0430 21.J0430_err \ 17.J0515 18.J0515_err 19.J0660 20.J0660_err 21.J0861 22.J0861_err \ 23.zb_peak 24.zb_min_1sig 25.zb_max_1sig 26.Spectral-class 27.Odds' U.put_data(out_cat, (tile_num[gs], obj_id[gs], ra[gs], dec[gs], fw[gs], u_m[gs], u_em[gs], g_m[gs], g_em[gs], r_m[gs], r_em[gs], i_m[gs], i_em[gs], z_m[gs], z_em[gs], f395_m[gs], f395_em[gs], f410_m[gs], f410_em[gs], f430_m[gs], f430_em[gs], f515_m[gs], f515_em[gs], f660_m[gs], f660_em[gs], f861_m[gs], f861_em[gs], zb[gs], zb1[gs], zb2[gs], tb[gs], ods[gs]), header_cat)
def select_galaxies_JPLUS_catalog(catalog): """ import sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import numpy as N import jplus_calib_tools from jplus_calib_tools import select_jplus_galaxies,num_tiles catalog = '/Users/albertomolino/jplus_data_download/SV02_March07/SV02_March07.clean.cat' """ tile_num, obj_id, ra, dec, fw = U.get_data(catalog, (0, 1, 2, 3, 4)) u_m, u_em, g_m, g_em, r_m, r_em = U.get_data(catalog, (5, 6, 7, 8, 9, 10)) i_m, i_em, z_m, z_em, f395_m, f395_em = U.get_data( catalog, (11, 12, 13, 14, 15, 16)) f410_m, f410_em, f430_m, f430_em, f515_m, f515_em = U.get_data( catalog, (17, 18, 19, 20, 21, 22)) f660_m, f660_em, f861_m, f861_em = U.get_data(catalog, (23, 24, 25, 26)) header_cat = '# 1.tile_id 2.object_id 3.ra 4.dec 5.fwhm 6.uJAVA 7.uJAVA_err 8.gSDSS \ 9.gSDSS_err 10.rSDSS 11.rSDSS_err 12.iSDSS 13.iSDSS_err 14.zSDSS 15.zSDSS_err \ 16.J0395 17.J0395_err 18.J0410 19.J0410_err 20.J0430 21.J0430_err \ 22.J0515 23.J0515_err 24.J0660 25.J0660_err 26.J0861 27.J0861_err ' only_tiles = num_tiles(tile_num) # tiles w/o duplications n_tiles = len(only_tiles) # number of Tiles for ii in range(n_tiles): ref_tile = only_tiles[ii] g1 = N.less(abs(tile_num - ref_tile), 1) tile_cat = catalog[:-3] + '%i.gal.cat' % (ref_tile) tile_num_r, obj_id_r, ra_r, dec_r, fw_r = U.multicompress( g1, (tile_num, obj_id, ra, dec, fw)) u_m_r, u_em_r, g_m_r, g_em_r, r_m_r, r_em_r = U.multicompress( g1, (u_m, u_em, g_m, g_em, r_m, r_em)) i_m_r, i_em_r, z_m_r, z_em_r, f395_m_r, f395_em_r = U.multicompress( g1, (i_m, i_em, z_m, z_em, f395_m, f395_em)) f410_m_r, f410_em_r, f430_m_r, f430_em_r, f515_m_r, f515_em_r = U.multicompress( g1, (f410_m, f410_em, f430_m, f430_em, f515_m, f515_em)) f660_m_r, f660_em_r, f861_m_r, f861_em_r = U.multicompress( g1, (f660_m, f660_em, f861_m, f861_em)) gs = select_jplus_galaxies(fw_r, r_m_r) # out_cat = maincat[:-3]+'gal.cat' U.put_data( tile_cat, (tile_num_r[gs], obj_id_r[gs], ra_r[gs], dec_r[gs], fw_r[gs], u_m_r[gs], u_em_r[gs], g_m_r[gs], g_em_r[gs], r_m_r[gs], r_em_r[gs], i_m_r[gs], i_em_r[gs], z_m_r[gs], z_em_r[gs], f395_m_r[gs], f395_em_r[gs], f410_m_r[gs], f410_em_r[gs], f430_m_r[gs], f430_em_r[gs], f515_m_r[gs], f515_em_r[gs], f660_m_r[gs], f660_em_r[gs], f861_m_r[gs], f861_em_r[gs]), header_cat)
def master_global_PDF(hdf5list, m_max): """ This routine serves to extract the final P(z) from a list of HDF5 files. A magnitude-cut (m<19) is applied. :param hdf5list: list of HDF5 files :return: zz, p_r,p_b_p_a """ plots = 1 hdf5files = U.get_str(hdf5list, 0) n_fields = len(hdf5files) for ii in range(n_fields): if ii < 1: zz, p_red, p_blue, p_global = global_PDZ(hdf5files[ii], m_max) else: zz, p_red_temp, p_blue_temp, p_global_temp = global_PDZ( hdf5files[ii], m_max) p_red += p_red_temp p_blue += p_blue_temp p_global += p_global_temp if plots: plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(zz, p_red, 'r-', lw=5, alpha=0.7) plt.plot(zz, p_blue, 'b-', lw=5, alpha=0.7) plt.plot(zz, p_global, 'k--', lw=5, alpha=0.7) plt.grid() plt.xlim(0., zz.max()) plt.grid() plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['early', 'late', 'all'], loc='upper right', fontsize=20) plt.xlabel('$z$', size=30) output_filename = hdf5list[:-4] + 'master.PDF.mmax%.2fAB.mat' % (m_max) U.put_data(output_filename, (zz, p_red, p_blue, p_global), 'z P_r P_b P_a') return zz, p_red, p_blue, p_global
def alhambra_get2Dmatrix_HDF5(inputfile, cond, finalname=None, normed=1): """ Given a probs class, plot z versus T density plot. import alhambrahdf5 from alhambrahdf5 import * inputfile='/Volumes/CLASH/ALHAMBRA/f02p02_colorproext_1_ISO_phz_eB10.hdf5' bpz = '/Volumes/CLASH/ALHAMBRA/f02p02_colorproext_1_ISO_phz_eB11.Prior1peak.bpz' ids,mo = U.get_data(bpz,(0,11)) good = U.greater_equal(mo,18.) * U.less_equal(mo,23.) finalname='/Users/amb/Desktop/testmat/f02p02c01.18m25.norm.mat' mat = alhambra_get2Dmatrix_HDF5(inputfile,good,finalname,1) ----------- """ p = h5py.File(inputfile, mode='r') pdz = p.get('FullProbability') tt = p.get('type') z = p.get('redshift') # pdz = p.get('/Probs_z_T/Full_Probability') # z = p.get('/Probs_z_T/redshift') # tt = p.get('/Probs_z_T/type') # Example: pdz.shape (9651, 7000, 81) no = pdz.shape[0] # Number galaxies nz = pdz.shape[1] # Redshift base nt = pdz.shape[2] # Template base kk = 0 for ii in range(no): # print '%i out of %i'%(ii+1,no-1) if cond[ii] == True: if kk < 1: if normed == 1: pepe = U.sum(pdz[ii, :, :], axis=1) xx = pepe / U.sum(pepe) elif normed == 2: for ss in range(nt): a = pdz[ii, :, ss] if a.sum() > 1.0e-30: b = a / sum(a) else: xx = pdz[ii, :, :] else: if normed == 1: pepe = U.sum(pdz[ii, :, :], axis=1) xx += pepe / U.sum(pepe) elif normed == 2: for ss in range(nt): a = pdz[ii, :, ss] if a.sum() > 1.0e-30: b += a / sum(a) else: xx += pdz[ii, :, :] kk += 1 if normed == 2: xx = b / b.sum() if finalname == None: outname = inputfile + '.mat' else: outname = finalname if normed == 0: U.put_2Darray(outname, xx) if normed == 1: U.put_data(outname, (xx, xx)) if normed == 2: U.put_data(outname, (xx, xx)) return xx
dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.subplot(211) plt.title('SED: %s' % (sed[ss]), size=15) plt.plot(rf_wavel[clean_sample], delta_f[clean_sample], '+', alpha=0.2) plt.plot(base_wavel, average_corr, '-ro', lw=3) plt.grid() plt.ylim(0.5, 1.5) plt.xlim(min_wave_corr * 0.9, max_wave_corr * 1.1) plt.ylabel('$F_{th}/F_{ob}$', size=20) outfilename = final_sed_root_data + sed[ ss] + 'S82SPLUS_crf_res%iAA.dat' % (new_delta_lbda) U.put_data(outfilename, (base_wavel, average_corr), '# base_wavel average_corr') ## Here it applies the corrections to the original templates sed_wavel, sed_flux_orig = U.get_data(root_to_seds + sed[ss], (0, 1)) corr_ori_wavel = U.match_resol(base_wavel, average_corr, sed_wavel) if new_dim > min_ng: sed_flux_new = sed_flux_orig / (1. * corr_ori_wavel) else: sed_flux_new = sed_flux_orig / 1. if plots: plt.subplot(212) pepe = sct.lookcloser(sed_wavel, normal_wavel) plt.semilogy(sed_wavel, sed_flux_orig / sed_flux_orig[pepe - 1],
cmd += '-ABSOLUTE_MAGNITUDE yes ABSOLUTE_MAGNITUDE_FILTER %s '%(filters[ii][:-4]) cmd += '-ZP_ERRORS "0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02,0.02" ' print cmd os.system(cmd) filtros = ['uJAVA','F0378W','F0395W','F0410W','F0430W','gSDSS', 'F0515W','rSDSS','F0660W','iSDSS','F0861W','zSDSS'] ra,dec = U.get_data(catalog,(2,3)) MA1 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[0],filtros[0]),6) MA2 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[1],filtros[1]),6) MA3 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[2],filtros[2]),6) MA4 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[3],filtros[3]),6) MA5 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[4],filtros[4]),6) MA6 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[5],filtros[5]),6) MA7 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[6],filtros[6]),6) MA8 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[7],filtros[7]),7) MA9 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[8],filtros[8]),6) MA10 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[9],filtros[9]),6) MA11 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[10],filtros[10]),6) MA12 = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[11],filtros[11]),6) Mst,zs,Tb,Odds,mo = U.get_data(root2+'%s/Master.Mabs%s.bpz'%(filtros[7],filtros[7]),(6,11,4,5,12)) heada = '# RA Dec M_uJAVA M_J0378 M_J0395 M_J0410 M_J0430 M_gSDSS M_J0515 M_rSDSS ' heada += 'M_J0660 M_iSDSS M_J0861 M_zSDSS StellMass Specz SpT Odds rSDSS ' new_file = root2+'SPLUS_Master_MagAbs.cat' U.put_data(new_file,(ra,dec,MA1,MA2,MA3,MA4,MA5,MA6,MA7,MA8,MA9,MA10,MA11,MA12,Mst,zs,Tb,Odds,mo),heada)
def check_rms_SPLUS(segmentation,photometry,minrad,maxrad,totnum,plots,verbose): """ It describes the photometric noise in images by launching apertures in blank areas and describing the area vs rms dependency. Philosophy: Several apertures (of random radius and positions) will be created (over blank areas) on a 'segmentation' image to estimate the real photometric error of an input 'photometry' image. -------- segmentation: Segmentation-like image (SExtractor output) used to select 'blank' areas. photometry: scientific image over which estimate the real photometric errors minrad,maxrad = minimun & maximum radius of the used apertures (pixels). totnum = total number of apertures. area,rms = final outputs. --------- USAGE: --------- import script_dcluster_tools as to segmentation = 'f814.seg.fits' photometry = 'f814.fits' apertures,finalbackg,finalmeans,fluxes = to.check_rms_JPLUS(segmentation,photometry,1,21,5.0e+04,'yes',False) ---- """ if not os.path.exists(segmentation): print print 'Image %s does not exist!' %(segmentation) sys.exist() if not os.path.exists(photometry): print print 'Image %s does not exist!' %(photometry) sys.exist() if verbose==True: verba=1 else: verba=0 # Reading data from images if photometry[:-2]=='fz': photima = fits.open(photometry)[1].data else: photima = fits.open(photometry)[0].data if segmentation[:-2]=='fz': segima = fits.open(segmentation)[1].data else: segima = fits.open(segmentation)[0].data # Final root where to save the data final_path = os.path.os.path.dirname(photometry) base_name = os.path.os.path.basename(photometry) len_extension = len(base_name.split('.')[-1])+1 file_root = final_path+'/Apertures/%s'%(base_name[:-len_extension]) # Physical limits (pixel) for the segmentation image. xGC = N.shape(segima)[1]/2. yGC = N.shape(segima)[0]/2. # For CLASH, due to the rotating frames, a maximum radius is set. min_maxim_radius = min([xGC,yGC]) radialmax = min_maxim_radius # Maximum radial distance to the center [pixels] # Here the random position (X,Y) are limited in range. minpix_X = 1500 maxpix_X = N.shape(segima)[1]-1500 minpix_Y = 1500 maxpix_Y = N.shape(segima)[0]-1500 binhisto = 100 # Final vector with positions. x_values = N.arange(minpix_X,maxpix_X,1) y_values = N.arange(minpix_Y,maxpix_Y,1) # Total dimension for the input variables. maxdim = int(20*(10.+(4*(((maxrad)*(maxrad+1))/2.)))) # Defining other variables. XX = N.zeros((maxdim),float) YY = N.zeros((maxdim),float) XO = N.zeros((maxdim),float) YO = N.zeros((maxdim),float) RR = N.zeros(totnum) # Range of apertures to be launched. apertures = N.arange(minrad,maxrad,1) n_apertures = len(apertures) # Length definition for the final outputs. finalbackg = N.zeros(n_apertures,'float64') finalmeans = N.zeros(n_apertures,'float64') gausshisto = N.zeros((binhisto,2*n_apertures),dtype='float64') # Starting the analysis. mmm = 0 for app in range(n_apertures): raper = apertures[app] # minrad = maxrad = raper if verba: print 'Interation %i out of %i ' %(app+1,n_apertures) print '-------------------------' # New temporal variables (erased in every loop). fluxes = N.zeros(totnum,dtype='float64') sbackg = N.zeros(totnum,dtype='float64') ff = -1 # Now it runs until it gets "totnum" measurements. hh = -1 contador = 0 while contador < (totnum): kk = -1 # Random x,y numbers to estimate the position # to place the aperture. xo = N.random.random_integers(minpix_X,maxpix_X) yo = N.random.random_integers(minpix_Y,maxpix_Y) if verba: print 'xo,yo',xo,yo # Corresponding radial distance to the center. tempradii = N.sqrt((xo-xGC)*(xo-xGC)+(yo-yGC)*(yo-yGC)) if tempradii < (radialmax+1): # Now it is computed the shape of the aperture. Xr = N.zeros((raper*raper),float) Yr = N.zeros((raper*raper),float) for ii in range(raper): xvalue = xo+ii for jj in range(raper): kk += 1 yvalue = yo+jj Xr[kk] = xvalue Yr[kk] = yvalue # Here it checks the blanckness of the aperture. tempflux = area2noise(segima,photima,Xr,Yr) if raper<2 : if tempflux != -999. : fluxes[hh] = tempflux if verba: print 'Adding flux: ',tempflux hh += 1 contador += 1 if raper>1: if tempflux[0] != -999. : fluxes[hh] = tempflux.sum() - (finalmeans[0] * raper**2) #why? contador += 1 hh += 1 if verba: print 'hh',hh # Computing values from the sample. sigfluxes = U.std_robust(fluxes) good = U.less_equal(abs(fluxes),5.*sigfluxes) fluxes = U.compress(good,fluxes) # Storing the background dispersion & mean inside that aperture. finalbackg[app] = U.std(fluxes) finalmeans[app] = U.mean(fluxes) if plots == 'yes': plt.figure(1,figsize = (7,6),dpi=70, facecolor='w', edgecolor='k') plt.clf() # va1,va2 = N.histogram(fluxes,binhisto,normed=1) va1,va2,va3 = plt.hist(fluxes,binhisto,normed=1,facecolor='black',alpha=0.5,linewidth=1.5) baseh = va2[0:-1] + ((va2[1]-va2[0])/2.) nele = len(fluxes) mu = U.mean(fluxes) sig = U.std(fluxes) # yh = U.normpdf(va2,mu,sig) # plt.plot(va2,yh,'r-',linewidth=3,alpha=0.7) mu = U.mean_robust(fluxes) # repeated sig = U.std_robust(fluxes) # repeated # yh = U.normpdf(va2,mu,sig) # repeated # plt.plot(va2,yh,'r--',linewidth=3,alpha=0.7) # repeated plt.legend([('MEAN: %.4f ''\n'' RMS: %.4f '%(mu,sig)), 'Aperture: %i $pix$'%(raper*raper)], numpoints=1,loc='upper right',fontsize=14) plt.xlim(mu-4*sig,mu+4*sig) plt.xlabel('Aperture Flux [ADU]',size=20) plt.ylabel('Number Counts',size=20) plt.xticks(fontsize=17),plt.yticks(fontsize=17) nameima = photometry.split('/')[-1:][0] plt.ylim() figure2name = file_root+'_hfaper_%i.png' %(raper) plt.savefig(figure2name,dpi=150) plt.close() # Here it saves the info from the histogram. ind1 = app*2 ind2 = app*2+1 if verba: print 'ind1,ind2',ind1,ind2 gausshisto[:,ind1] = baseh # va2 gausshisto[:,ind2] = va1 # yh # At this point all apertures have been computed. # Now it will represent the sigma_vs_area dependency. sigmas = finalbackg #-abs(finalmeans) aa,bb = sigmafit(sigmas,sigmas[0],apertures) if plots == 'yes': plt.figure(2, figsize = (7,6),dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(apertures,sigmas[0]*apertures*(aa+bb*apertures),'k-',apertures,apertures*sigmas[0],'r-') plt.legend([('%.3f$\sqrt{N}$ (%.3f + %.3f$\sqrt{N}$)' %(sigmas[0],aa,bb)), '%.3f$\sqrt{N}$ | Poisson Distribution '%(sigmas[0])], numpoints=1,loc='upper left') plt.plot(apertures,sigmas,'ko') plt.xlim(0.,max(apertures)+1) plt.xlabel('$\sqrt{N}$',size=18) plt.ylabel('$\sigma$',size=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) nick2 = photometry.split('/')[-1:][0] figure1name = file_root+'_apersigma.png' plt.savefig(figure1name,dpi=150) plt.close() # Saving outputs in ASCII files. fileout = file_root+'.apertures.txt' header = '# AREA[pix] RMS(std[counts]) MEAN(mean_robust[counts])' U.put_data(fileout,(apertures,sigmas,finalmeans),header) #print 'Saving data... in %s' %(fileout) """
def appending_ids2catalogues(field, pointing, ccd): """ import alhambra_3arcs as A3 A3.appending_ids2catalogues(2,1,1) """ catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % ( field, field, pointing, ccd) idalh = U.get_str(catalhambra, 0) idalh2 = U.arange(len(idalh)) + 1 xalh, yalh = U.get_data(catalhambra, (6, 7)) cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % ( field, field, pointing, ccd) id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4)) print len(id3arcs) matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' if not os.path.exists(matchfile): idcol = idalh2 xcol = xalh ycol = yalh idsp = id3arcs xsp = x3arcs ysp = y3arcs pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5) # Compressing matches for ColorPro... print 'Compressing matches...' matchidcol = pepe[:, 0].astype(int) gdet_col = U.greater(matchidcol, 0) # Excluding 0's (non matched detections) matchidcol = U.compress(gdet_col, (matchidcol)) # Compressing matches for Spectroscopic... matchidsp = pepe[:, 1].astype(int) gdet_spz = U.greater(matchidsp, 0) # Excluding 0's (non matched detections) matchidsp = U.compress(gdet_spz, (matchidsp)) print 'len(idcol)', len(idcol) print 'len(idsp)', len(idsp) if len(matchidcol) == len(matchidsp): print 'Creating idredu & zsredu ' print 'Dimension of matchidsp ', len(matchidsp) idredu = U.zeros(len(matchidsp)) idspredu = U.zeros(len(matchidsp)) for ii in range(len(matchidsp)): colindex = A.id2pos(idcol, matchidcol[ii]) # Position for Index idcol spzindex = A.id2pos(idsp, matchidsp[ii]) # Position for Index idsp idredu[ii] = idcol[colindex] # ID for ColorPro idspredu[ii] = idsp[spzindex] # Specz for Specz matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' U.put_data(matchfile, (idredu, idspredu)) if os.path.exists(matchfile): pepa = open(matchfile[:-3] + 'bis.cat', 'w') idredu, idspredu = U.get_data(matchfile, (0, 1)) i11 = idredu.astype(int) - 1 i22 = idspredu.astype(int) lista = [] for ii in range(len(i11)): lista.append(idalh[i11[ii]]) pepa.write('%s %s \n' % (idalh[i11[ii]], i22[ii])) pepa.close() finalfinal = cat3arcs[:-3] + 'final.cat' if os.path.exists(finalfinal): A.deletefile(finalfinal) if not os.path.exists(finalfinal): print 'Preparing ', finalfinal idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0) append_IDs2_3arcs_catalogues(cat3arcs, idsa)
z_AB_1, f_AB_1 = U.get_data(sed_ab_filter_1, (0, 1)) z_AB_2, f_AB_2 = U.get_data(sed_ab_filter_2, (0, 1)) good_z_range = N.less_equal(z_AB_1, zmax) good_z_range *= N.greater(f_AB_1, 0) * N.greater(f_AB_2, 0) f_AB_redu_1 = f_AB_1[good_z_range] z_AB_redu_1 = z_AB_1[good_z_range] f_AB_redu_2 = f_AB_2[good_z_range] z_AB_redu_2 = z_AB_2[good_z_range] mag_AB_redu_1 = B.flux2mag(abs(f_AB_redu_1)) mag_AB_redu_2 = B.flux2mag(abs(f_AB_redu_2)) outfilename = root + 'tracks_sed%s_0.0z%.1f_%s%s.txt' % (sed, zmax, filtro1, filtro2) U.put_data(outfilename, (z_AB_redu_2, mag_AB_redu_1 - mag_AB_redu_2), 'z %s-%s' % (filtro1, filtro2)) """ z,gr_ell = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedEll3_A_0_0.0z0.5_gSDSSrSDSS.txt',(0,1));z,rz_ell = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedEll3_A_0_0.0z0.5_rSDSSzSDSS.txt',(0,1));z,gr_Sa = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedSa_A_1_0.0z0.5_gSDSSrSDSS.txt',(0,1)); z,rz_Sa = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedEll3_A_0_0.0z0.5_rSDSSzSDSS.txt',(0,1)); plt.plot(gr_Sa[::10],rz_Sa[::10],'-bo');plt.plot(gr_ell[::10],rz_ell[::10],'-ro') z,gr_ell = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedEll3_A_0_0.0z2.0_gSDSSrSDSS.txt',(0,1));z,rz_ell = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedEll3_A_0_0.0z2.0_rSDSSzSDSS.txt',(0,1));z,gr_Sa = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedSa_A_1_0.0z2.0_gSDSSrSDSS.txt',(0,1)); z,rz_Sa = U.get_data('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks_sedSa_A_1_0.0z2.0_rSDSSzSDSS.txt',(0,1)); plt.plot(gr_Sa[::10],rz_Sa[::10],'-bo');plt.plot(gr_ell[::10],rz_ell[::10],'-ro') plt.xlabel('$g-r$',size=25,labelpad=1) plt.ylabel('$r-z$',size=25,labelpad=1) plt.grid() plt.legend(['early','late'],loc='best',fontsize=30,numpoints=1) plt.xticks(fontsize=20) plt.yticks(fontsize=20) plt.title('0.0$<z<$2.0',size=20) plt.savefig('/Users/albertomolino/doctorado/articulos/SPLUS/StarGalaxy/AB/tracks.png',dpi=90)
x_sel = N.greater_equal(x, min_x + (hh * dx)) x_sel *= N.less_equal(x, min_x + (hh + 1) * dx) if jj < 1: y_sel = N.less_equal(y, min_y + dy) else: y_sel = N.greater_equal(y, min_y + dy) # Counting sources. counts[jj, hh] += len(x[x_sel * y_sel]) max_value = counts.max() cc = N.reshape(counts / (1. * max_value), (1, 16)) base = N.arange(16) + 1 #Saving data. U.put_data(out_filename + '.txt', (base, cc[0] / (1. * n_cats)), '# amp norm_counts') else: base, cc = U.get_data(out_filename + '.txt', (0, 1)) # Starting plot. plt.figure(2, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.bar(base - 0.2, cc[0] / (1.), 0.4, color='grey', alpha=0.8, linewidth=2) plt.xlabel('Amplifier', size=24) plt.ylabel('Relative Number Counts', size=26, labelpad=6) plt.xticks(fontsize=22) plt.yticks(fontsize=20) plt.xlim(0.1, 16.9) plt.ylim(0.9, 1.0) plt.grid()
plt.legend([label1, label2, label3, label4, label5], loc='upper left', fontsize=25) plt.plot(baseci2, baseci2, 'k--') plt.title('CONV_GAUSS_KERNEL = %s' % (gauss_factor), size=20) plt.grid() plt.xlabel('$C$', size=28, labelpad=3) plt.ylabel('$F(C)$', size=28, labelpad=3) plt.yticks(fontsize=18) plt.xticks(fontsize=18) final_plot_name = root + 'SC/' + 'Wittman.CGK%s.AB.png' % ( gauss_factor) plt.savefig(final_plot_name, dpi=100) final_file_name = root + 'SC/' + 'Wittman.CGK%s.AB.txt' % ( gauss_factor) U.put_data(final_file_name, (baseci2, v1, w1, y1, s1, r1), '# C F17 F18 F19 F20 F21') if check_types: good1 = N.less_equal(TBB2, 5.5) #tt[0:36] #red good2 = N.greater_equal(TBB2, 5.5) #tt[36:] #blue plt.figure(11) v1, v2, v3 = plt.hist(CIS2[good1], baseci, color='red', alpha=0.3, cumulative=1) w1, w2, w3 = plt.hist(CIS2[good2], baseci, color='blue', alpha=0.3,
def get_PDZerrDistribution_byTemplates(hdf5file, bpzfile, m_max): """ It returns the error distribution based on PDZs. --- import splus_s82_hdf5_tools as to root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/' root += 'S82/Dec2017/splus_cats_NGSL/' hdf5list = root+'hdf5.list' bpzlist = root+'bpz/master.STRIPE82_Photometry.m21.bpz.list' hdf5_files = U.get_str(hdf5list,0) n_hdf5 = len(hdf5_files) bpz_files = U.get_str(bpzlist,0) n_bpz = len(bpz_files) for ii in range(n_bpz): name = os.path.basename(hdf5_files[ii]) print name try: z,dp,df = to.get_PDZerrDistribution_byTemplates(hdf5_files[ii],bpz_files[ii],19) except: print 'Impossible to run on ',name """ plots = 1 # starting plots if necessary if plots: plt.figure(12, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') try: ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 11, 12, 4, 5)) except: ids, zb, zs, mo, tb, odd = U.get_data(bpzfile, (0, 1, 9, 10, 4, 5)) good = N.less_equal(mo, m_max) ids, zb, zs, mo, tb, odd = U.multicompress(good, (ids, zb, zs, mo, tb, odd)) ng = len(ids) #Readin the PDZs... p = h5py.File(hdf5file, mode='r') #pdzo = p.get('FullProbability') pdz = p.get('Likelihood') pdz = pdz[good, :, :] zz = p.get('redshift')[:] dz = (zz[2] - zz[1]) * 100. basez2 = N.arange(-0.2, 0.2, dz) basez2b = basez2[:-1] + ((basez2[1] - basez2[0]) / 2.) nz = len(basez2) res = 1 # Computing the z error distr. function # based on peak values. delta_z_peaks = (zb - zs) / (1. + zs) a1, a2 = N.histogram(delta_z_peaks, basez2) delta_z_pdzs = N.zeros(nz - 1) for ii in range(ng): pdz_mot = U.sum(pdz[ii, :, :], axis=1) pdz_mot_peak = pdz_mot / float(max(pdz_mot)) # To get rid of long tails in PDFs with low probabilities. pdz_mot_peak = N.where(pdz_mot_peak < 1.0e-4, 0., pdz_mot_peak) pdz_mot_norm = pdz_mot_peak / float(sum(pdz_mot_peak)) pdz_mot_norm = N.where(pdz_mot_norm < 0., 0., pdz_mot_norm) #pdz_mot_norm = pdz_mot/float(sum(pdz_mot)) pdz_mot_norm_resample = U.match_resol(zz - zs[ii], pdz_mot_norm, basez2b) pdz_mot_norm_resample = N.where(pdz_mot_norm_resample < 0., 0., pdz_mot_norm_resample) delta_z_pdzs += pdz_mot_norm_resample[:] """ if plots: plt.clf() plt.subplot(121) peak_zb_pos = N.argmax(pdz_mot_norm[::res]) print zz[peak_zb_pos] plt.plot(zz[::res]-zs[ii],pdz_mot_norm[::res],'-',lw=5,alpha=0.6) #plt.plot(zz[::res]-zz[peak_zb_pos],pdz_mot_norm[::res],'-',lw=5,alpha=0.6) plt.grid() plt.xlim(-0.2,0.2) #plt.ylim(0.001,0.1) plt.xlabel('$\delta_{z}$',size=30) plt.ylabel('P(z)',size=20,labelpad=+1) plt.legend(['R=%.2f''\n''T=%.1f''\n''O=%.1f'%(mo[ii],tb[ii],odd[ii])],loc='upper right') plt.title('zb = %.2f, zs = %.2f, dz/1+z = %.2f'%(zb[ii],zs[ii],delta_z_peaks[ii]),size=20) plt.subplot(122) plt.plot(basez2b,delta_z_pdzs,'k-',lw=5) plt.grid() plt.xlim(-0.2,0.2) #plt.ylim(0.001,0.1) plt.xlabel('$\delta_{z}$',size=30) plt.ylabel('P(z)',size=20,labelpad=+1) pausa = raw_input('press a bottom to continue') """ # New variables to handle data easily. # It scales the normalized PDFs by the ng! norm_dz_peaks = a1 / float(sum(a1)) norm_dz_pdfs = delta_z_pdzs / float(sum(delta_z_pdzs)) if plots: plt.figure(11, figsize=(8.5, 10.), dpi=80, facecolor='w', edgecolor='k') plt.clf() #plt.subplot(212) plt.plot(basez2b, norm_dz_peaks, 'b-', lw=8, alpha=0.6) plt.plot(basez2b, norm_dz_pdfs, 'r-', lw=5, alpha=0.9) plt.grid() plt.xlim(-0.2, 0.2) plt.ylabel('P(z)', size=20, labelpad=+1) plt.legend(['peaks', 'pdfs'], loc='upper left', fontsize=20) plt.xlabel('$\delta_{z}$', size=30) plot_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.png' % (m_max) plt.savefig(plot_filename, dpi=80) # Saving data into a file. output_filename = hdf5file[:-4] + 'deltaz.mmax%.2fAB.mat' % (m_max) U.put_data(output_filename, (basez2b, norm_dz_peaks, norm_dz_pdfs), 'z dz_peak dz_PDFs') return basez2b, norm_dz_peaks, norm_dz_pdfs
i_m, i_em, z_m, z_em, f395_m, f395_em = U.get_data(maincat,(11,12,13,14,15,16)) f410_m, f410_em, f430_m, f430_em, f515_m, f515_em = U.get_data(maincat,(17,18,19,20,21,22)) f660_m, f660_em, f861_m, f861_em, zpec = U.get_data(maincat,(23,24,25,26,28)) only_tiles = jct.num_tiles(tile_num) # tiles w/o duplications n_tiles = len(only_tiles) # number of Tiles print 'Total number of Tiles to calibrate: ',n_tiles for ii in range(n_tiles): ref_tile = only_tiles[ii] gs = N.less(abs(tile_num-ref_tile),1)# GoodSample tile_cat = tile_root+'%i.cat'%(ref_tile) U.put_data(tile_cat, (tile_num[gs],obj_id[gs],ra[gs],dec[gs],fw[gs], u_m[gs], u_em[gs], g_m[gs], g_em[gs], r_m[gs], r_em[gs], i_m[gs], i_em[gs], z_m[gs], z_em[gs], f395_m[gs], f395_em[gs], f410_m[gs], f410_em[gs], f430_m[gs], f430_em[gs], f515_m[gs], f515_em[gs], f660_m[gs], f660_em[gs], f861_m[gs], f861_em[gs], zpec[gs]), header_cat) if os.path.exists(tile_cat): ids = U.get_data(tile_cat,0) ngals = len(ids) seeing = jct.get_seeing_from_data(fw[gs],r_m[gs]) #Estimates the seeing tiles_info.write('%s %i %.3f \n'%(tile_cat,ngals,seeing)) if ngals>=ngal_min: cali_columns = tile_cat[:-3]+'cal.columns' if not os.path.exists(cali_columns): cmd1 = 'python %sfullcalibrator_amb.py %s '%(root2bpz,tile_cat) cmd1 += '-cols %s -outcol %s '%(columns11b,cali_columns) print cmd1
def spurious_detect_threshold(image, sexfile): """ Starting with an input configuration.sex file, it runs SExtractor on both sizes of the image varying the threshold value across a certain range. By doing this, it is possible to stablish an bottom threshold for which the percentage of spurious detections is not larger than a few percents. ====== MAKE SURE THERE IS NOT A BLANK LINE AT THE END OF THE SEx FILE !!!! ---- image = '/Volumes/amb/imagenes/f02/f02p01_F814W_4.swp.fits' sexfile = '/Volumes/amb/catalogos/reduction_v4/f02/f02p01_colorpro_4.sex' fspd,base = spurious_detect_threshold(image,sexfile,'yes','yes','yes','yes') """ verbose = 1 if os.path.exists(image) and os.path.exists(sexfile): print print 'MAKE SURE THERE IS NOT A BLANK LINE AT THE END OF THE SEx FILE !!!!' print min_value = 0.9 max_value = 1.5 interv = 0.05 base = N.arange(min_value, max_value + interv, interv) dim = len(base) spd = N.zeros((dim, 2), float) fspd = N.zeros(dim) newvals = N.zeros(2) imageinv = sdt.decapfile(image) + '_inv.fits' if not os.path.exists(imageinv): print 'Creating an inverse image...' coeff = -1. sdt.multiply_image_bya_number(image, coeff, imageinv) print 'Modifying SExtractor input file...' if os.path.exists(imageinv): for ii in range(dim): newsexcat = sdt.decapfile( sexfile) + '_thr%.2f.cat' % (base[ii]) newsexfile = sdt.decapfile( sexfile) + '_thr%.2f.sex' % (base[ii]) param = ['ANALYSIS_THRESH', 'DETECT_THRESH', 'CATALOG_NAME'] newvals = [base[ii], base[ii], newsexcat] # newvals[0] = base[ii] # newvals[1] = base[ii] if verbose: print 'base[%i]' % (ii), base[ii] print 'sexfile', sexfile print 'newsexfile', newsexfile print 'param', param print 'newvals', newvals # Modifiying THRESHOLD in conf.sex. sdt.modifyingSExfiles(sexfile, param, newvals, newsexfile) print 'Running SExtractor...' for ss in range(2): if ss == 0: image2 = image else: image2 = imageinv if os.path.exists(newsexfile): cmd2 = '' cmd2 = 'sex %s -c %s' % (image2, newsexfile) print cmd2 try: os.system(cmd2) except: print 'Impossible to run SExtractor !!' print print 'Measuring detections...' catout = newsexcat if os.path.exists(catout): # print 'YES, the catout exists...' id, x, y = U.get_data(catout, (0, 1, 2)) good = N.greater(x, 1500.) * N.less( x, 10300.) * N.greater(y, 1100.) * N.less( y, 10000.) id_redu = N.compress(good, id) print 'Compressing the sample..' spd[ii, ss] = len(id_redu) else: print '%s does not exists!!' % (catout) print 'Impossible to quantify percentage of spurious detections!!' print 'Estimating spurious detections...' for jj in range(dim): fspd[jj] = ((spd[jj, 1] * 1.) / (spd[jj, 0] * 1.)) * 100. print 'fspd', fspd print 'Plotting results....' plt.figure(1, figsize=(12, 7), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(base, fspd, '-ko', linewidth=2) plt.plot(base, base * 0. + 3., 'm--', linewidth=1.5) plt.xlabel('Threshold ($\sigma$)'), plt.ylabel( '% Spurious detections') plt.xlim(min_value - interv, max_value + interv) plt.grid() outname = sdt.decapfile(image) + '_thranal.png' plt.savefig(outname, dpi=150) plt.figure(2, figsize=(12, 7), dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(base, spd[:, 0], '-ko', linewidth=5) plt.xlabel('Threshold ($\sigma$)', size=15), plt.ylabel('Number Detected Sources', size=15) plt.xlim(min_value - interv, max_value + interv) plt.grid() figname = sdt.decapfile(image) + '_numdet.png' plt.savefig(figname, dpi=150) outname2 = sdt.decapfile(image) + '_thranal.txt' U.put_data(outname2, (fspd, base), '# fspd base ', '%.3f %.3f') plt.close() return fspd, base else: print 'Input image or input SExfile does not exist!' print image print sexfile
__author__ = 'albertomolino' import os, sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import numpy as N root_to_bpz = '/Users/albertomolino/codigos/bpz-1.99.2/' root_to_filters = root_to_bpz + 'FILTER/' final_root = root_to_filters + 'SPLUS_cutted/' if not os.path.exists(final_root): cmd8 = '/bin/mkdir %s ' % (final_root) os.system(cmd8) filters = U.get_str(root_to_filters + 'laura_splus2.list', 0) nf = len(filters) for ii in range(nf): xf, yf = U.get_data(root_to_filters + filters[ii], (0, 1)) yfr = N.where(yf < 1.0e-4, 0.0000, yf) new_file_name = final_root + filters[ii][:-4] + '.cut.res' U.put_data(new_file_name, (xf, yfr), '# w T')
good_photo *= np.less_equal(x_p, ymax) # Compress the photometric sample. ra_p, dec_p = U.multicompress(good_photo, (ra_p, dec_p)) # Common area area_ra = np.greater_equal(ra_s, min(ra_p)) area_ra *= np.less_equal(ra_s, max(ra_p)) area_dec = np.greater_equal(dec_s, min(dec_p)) area_dec *= np.less_equal(dec_s, max(dec_p)) good_area = area_ra * area_dec #Compressing ra_s, dec_s, z_s, r_s = U.multicompress(good_area, (ra_s, dec_s, z_s, r_s)) # Saving redu_spec = cats_names[ggg][:-3] + 'spez.areacommon.cat' U.put_data(redu_spec, (ra_s, dec_s, z_s, r_s), '# ra dec zs mr') # Here we find the missing galaxies cmd_cross_match = "java -jar /Users/albertomolino/codigos/Stilts/stilts.jar " cmd_cross_match += "tmatch2 ifmt1=ascii ifmt2=ascii in1=%s " % ( redu_spec) cmd_cross_match += "in2=%s out=%s ofmt=ascii matcher=sky values1='$1 $2' " % ( cats_names[ggg], splus_missing_speczcat) cmd_cross_match += "values2='$2 $3' params=3 join=1not2 find=best progress=log" os.system(cmd_cross_match) # Here we find the detected galaxies cmd_cross_match = "java -jar /Users/albertomolino/codigos/Stilts/stilts.jar " cmd_cross_match += "tmatch2 ifmt1=ascii ifmt2=ascii in1=%s " % ( redu_spec) cmd_cross_match += "in2=%s out=%s ofmt=ascii matcher=sky values1='$1 $2' " % (
__author__ = 'albertomolino' import sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import numpy as np import matplotlib.pyplot as plt root = '/Users/albertomolino/Postdoc/T80S_Pipeline/Commisioning/' root += 'S82/Dec2017/data_quality/depth/' cat = root + 'mr_petro_gals.cat' outfilename = cat[:-3] + 'depth.cat' total_area = 1. / 80. mmin = 14. mmax = 22. dm = 0.5 base = np.arange(mmin - (dm / 2.), mmax + (dm / 2.), dm) base2 = base[:-1] + (base[1] - base[0]) / 2. mr = U.get_data(cat, 0) m1, m2 = np.histogram(mr, base, density=False) plt.clf() plt.semilogy(base2, m1 * total_area, '-ko') U.put_data(outfilename, (base2, m1), '# basem mr', '%.2f %.2f')
# Starting loop. for ii in range(n_mags): print 'Processing detections with R<%i: ' % (mag_bins[ii]) final_pdf_file = final_root_hdf5 + 'master_R%i_PDF.txt' % (mag_bins[ii]) if not os.path.exists(final_pdf_file): for ss in range(n_cats): print 'reading file %i/%i ' % (ss + 1, n_cats) #1. Select potential galaxies with R<Ri catalog = global_cats[ss] p_gal = U.get_data(catalog, 132) # Probability of being a Galaxy. # Reading P(z) from HDF5 file. zz, pr, pb, pg = to.getPDF_by_mag_and_weights( hdf5_files[ss], mag_bins[ii], p_gal) # Storing P(z) if ss < 1: base_z = zz final_pdf_red = pr final_pdf_blue = pb final_pdf_global = pg else: final_pdf_red += pr final_pdf_blue += pb final_pdf_global += pg # Saving P(z|R<Ri) U.put_data(final_pdf_file, (zz, final_pdf_red, final_pdf_blue, final_pdf_global), '# z Pr Pb Pg ') ### Here we can do some plots.
import os, sys sys.path.append('/Users/albertomolino/doctorado/photo/programas/') import useful as U import phz_plots as P import bpz_tools as B #roots root_to_seds = '/Users/albertomolino/Desktop/laura/' final_root = root_to_seds + 'SimMags/' if not os.path.exists(final_root): cmd8 = '/bin/mkdir %s ' % (final_root) os.system(cmd8) #SEDs seds = U.get_str(root_to_seds + 'seds.list', 0) nsed = len(seds) #Filters #filter_list='laura_splus2.list' filter_list = 'laura_splus3.list' # Redshift z = 0. #stars #Getting magnitudes for ii in range(nsed): a, b = P.see_sed2resolution_AB(seds[ii], z, filter_list) aa = B.AB(a) newfile = final_root + 'BPZ.%s.mags.txt' % (seds[ii]) U.put_data(newfile, (b, aa - aa[7]), '# lambda mag')
def match_spz_sample(cluster): # TO CHECK finalcat1 = catalog2[:-3]+'CLASH.redu.cat' finalcat2 = catalog2[:-3]+'nada.cat' # if not os.path.exists(finalcat1): if not os.path.exists(finalcat2): # print 'Final catalog does not exist yet.' if os.path.exists(catalog1) and os.path.exists(catalog2): # It matches up detections to its Spectroscopic Sample. # Reading specz catalog print 'Reading info1 before matching...' speczsample = catalog1 idsp,xsp,ysp = U.get_data(speczsample,(0,3,4)) goodsp = U.greater_equal(xsp,1500) * U.less_equal(xsp,3500) goodsp *= U.greater_equal(ysp,1500) * U.less_equal(ysp,3500) idsp,xsp,ysp = U.multicompress(goodsp,(idsp,xsp,ysp)) print 'New dimension for specz catalogue: ',len(xsp) # rasp,decsp,xsp,ysp,zsp = get_data(speczsample,(0,1,2,3,4)) # xsp,ysp,zsp = get_data(speczsample,(1,2,7)) ####### idsp = U.arange(len(xsp))+1 # idsp = arange(len(rasp))+1 # Reading ColorPro catalog print 'Reading info2 before matching...' idcol,xcol,ycol = U.get_data(catalog2,(0,3,4)) print 'Dimension for input catalogue before compressing: ',len(idcol) gsp = U.greater_equal(xcol,1500) * U.less_equal(xcol,3500) gsp *= U.greater_equal(ycol,1500) * U.less_equal(ycol,3500) idcol,xcol,ycol = U.multicompress(gsp,(idcol,xcol,ycol)) print 'Dimension for input catalogue after compressing: ',len(idcol) # Using "matching_vects" to match up samples... print 'Matching samples....' pepe = CT.matching_vects(idcol,xcol,ycol,idsp,xsp,ysp,1.1) # We use now X,Y instead RA,Dec # Compressing matches for ColorPro... print 'Compressing matches...' matchidcol = pepe[:,0].astype(int) gdet_col = U.greater(matchidcol,0) # Excluding 0's (non matched detections) matchidcol = U.compress(gdet_col,(matchidcol)) # Compressing matches for Spectroscopic... matchidsp = pepe[:,1].astype(int) gdet_spz = U.greater(matchidsp,0) # Excluding 0's (non matched detections) matchidsp = U.compress(gdet_spz,(matchidsp)) print 'len(idcol)',len(idcol) print 'len(idsp)',len(idsp) if len(matchidcol) == len(matchidsp): print 'Creating idredu & zsredu ' print 'Dimension of matchidsp ',len(matchidsp) idredu = U.zeros(len(matchidsp)) idspredu = U.zeros(len(matchidsp)) for ii in range(len(matchidsp)): colindex = A.id2pos(idcol,matchidcol[ii]) # Position for Index idcol spzindex = A.id2pos(idsp,matchidsp[ii]) # Position for Index idsp idredu[ii] = idcol[colindex] # ID for ColorPro idspredu[ii] = idsp[spzindex] # Specz for Specz # A new smaller catalog will be created containing specz info as an extra column. print 'Selecting by rows... ' finalcat1 = catalog2[:-3]+'UDF.redu.cat' finalcat2 = catalog2[:-3]+'CLASH.redu.cat' U.put_data(catalog2[:-3]+'idsfrommatch.txt',(idredu,idspredu)) A.select_rows_bylist_sorted(catalog1,idspredu,finalcat1) A.select_rows_bylist_sorted(catalog2,idredu,finalcat2)
list_cali_columns = root2cats + 'calicolumns.list' if not os.path.exists(list_cali_columns): cmd = 'ls %s*.auto_cali.columns > %scalicolumns.list' % (root2cats, root2cats) os.system(cmd) # Reading list if os.path.exists(list_cali_columns): cols = U.get_str(list_cali_columns, 0) n_c = len(cols) zp_val = N.zeros((12, n_c), float) zp_final = N.zeros(12) for ss in range(n_c): vars, evars, posref, zpe, zpc = A.get_usefulcolumns(cols[ss]) #print zpc[:] #pausa = raw_input('paused') for ii in range(12): zp_val[ii, ss] = zpc[ii] for hh in range(12): zp_final[hh] = U.mean_robust(zp_val[hh, :]) # Saving results master_cali_columns = root2cats + 'master_calicolumns.txt' U.put_data(master_cali_columns, (N.arange(12) + 1, zp_final), '# Filter ZPc') else: print 'File %s does not exist!' % (master_cali_columns)