def get_SExt_assoc_files(pepe): """ It creates the associated catalogues with the detections to be included in the analysis. """ for ii in range(7): for jj in range(4): for kk in range(4): cat = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % ( ii + 2, ii + 2, jj + 1, kk + 1) if os.path.exists(cat): ids = U.get_str(cat, 0) x, y, ar, ra, dec, mm = U.get_data(cat, (6, 7, 8, 4, 5, 65)) nameout = root + '/f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.coo' % ( ii + 2, ii + 2, jj + 1, kk + 1) good = U.less_equal(abs(mm), 23.0) ids = U.compress(good, (ids)) x, y, ar, ra, dec, mm = U.multicompress( good, (x, y, ar, ra, dec, mm)) ne = len(x) fileout = open(nameout, 'w') fileout.write('# X Y AREA ID RA DEC F814W \n') print 'Analyzing ', cat for ss in range(ne): linea = '%.3f %.3f %i %s %f %f %.2f \n' % ( x[ss], y[ss], ar[ss], ids[ss], ra[ss], dec[ss], mm[ss]) fileout.write(linea) fileout.close()
def remove_detections_bysegmmaps(field, pointing, ccd): """ It uses the segmentation-maps to remove fake detections when masking out saturated stars. ---- import alhambra_fakedets as AF AF.remove_detections_bysegmmaps(2,1,1) """ root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field) root2images = '/Volumes/amb22/imagenes/f0%i/' % (field) catalog = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % ( field, pointing, ccd) ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5)) dim = len(ids) valor = U.zeros(dim) ima1 = root2images + 'f0%ip0%i_F814W_%i.swp.seg.fits' % (field, pointing, ccd) ima2 = root2images + 'f0%ip0%i_F814W_%i.swp.segnomask.fits' % ( field, pointing, ccd) segm1 = pyfits.open(ima1)[0].data segm2 = pyfits.open(ima2)[0].data for ii in range(dim): xo = x[ii] yo = y[ii] dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1] dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0] perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() / (dimx * dimy * 1.)) # Defining the sample to be keep. good = U.greater(valor, 0) idr = U.compress(good, ids) dim2 = len(idr) print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % ( dim, dim2, dim - dim2) finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % ( field, pointing, ccd) data1 = coeio.loaddata(catalog) # Loading the whole catalog content. head = coeio.loadheader(catalog) data2 = data1[good, :] coeio.savedata(data2, finalcat, dir="", header=head) # Saving & creating a new catalog.
def remove_fakeabsorptions_F814W(field, pointing, ccd): """ Using the rmsweight images, it gets rid of detections with imrs_F814W < 0.5. ------------------------------------------- import alhambra_fakedets as AF AF.remove_fakeabsorptions_F814W(2,1,1) """ root = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/' % (field) catalog = root + 'f0%ip0%i_colorproext_%i_ISO.cat' % (field, pointing, ccd) ids, x, y, area = U.get_data(catalog, (0, 3, 4, 5)) dim = len(ids) perc = U.zeros(dim) # Opening F814W Weight image ima = alh.alhambra_invrmsimagelist(field, pointing, ccd)[-1] datos = pyfits.open(ima)[0].data for ii in range(dim): if area[ii] > 1: size = int(round(U.sqrt(area[ii]) / 2.)) xo = x[ii] yo = y[ii] dimx = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[1] dimy = U.shape(datos[yo - size:yo + size, xo - size:xo + size])[0] perc[ii] = (datos[yo - size:yo + size, xo - size:xo + size].sum() / (dimx * dimy * 1.)) # Defining the sample to be keep. good = U.greater(perc, 0.5) idr = U.compress(good, ids) dim2 = len(idr) print 'Dimensions: Original: %i, Final: %i, Excluded: %i detections. ' % ( dim, dim2, dim - dim2) finalcat = root + 'f0%ip0%i_colorproext_%i_ISO.irmsF814W.free.cat' % ( field, pointing, ccd) data1 = coeio.loaddata(catalog) # Loading the whole catalog content. head = coeio.loadheader(catalog) data2 = data1[good, :] coeio.savedata(data2, finalcat, dir="", header=head) # Saving & creating a new catalog.
def match_spz_sample(cluster): # TO CHECK finalcat1 = catalog2[:-3]+'CLASH.redu.cat' finalcat2 = catalog2[:-3]+'nada.cat' # if not os.path.exists(finalcat1): if not os.path.exists(finalcat2): # print 'Final catalog does not exist yet.' if os.path.exists(catalog1) and os.path.exists(catalog2): # It matches up detections to its Spectroscopic Sample. # Reading specz catalog print 'Reading info1 before matching...' speczsample = catalog1 idsp,xsp,ysp = U.get_data(speczsample,(0,3,4)) goodsp = U.greater_equal(xsp,1500) * U.less_equal(xsp,3500) goodsp *= U.greater_equal(ysp,1500) * U.less_equal(ysp,3500) idsp,xsp,ysp = U.multicompress(goodsp,(idsp,xsp,ysp)) print 'New dimension for specz catalogue: ',len(xsp) # rasp,decsp,xsp,ysp,zsp = get_data(speczsample,(0,1,2,3,4)) # xsp,ysp,zsp = get_data(speczsample,(1,2,7)) ####### idsp = U.arange(len(xsp))+1 # idsp = arange(len(rasp))+1 # Reading ColorPro catalog print 'Reading info2 before matching...' idcol,xcol,ycol = U.get_data(catalog2,(0,3,4)) print 'Dimension for input catalogue before compressing: ',len(idcol) gsp = U.greater_equal(xcol,1500) * U.less_equal(xcol,3500) gsp *= U.greater_equal(ycol,1500) * U.less_equal(ycol,3500) idcol,xcol,ycol = U.multicompress(gsp,(idcol,xcol,ycol)) print 'Dimension for input catalogue after compressing: ',len(idcol) # Using "matching_vects" to match up samples... print 'Matching samples....' pepe = CT.matching_vects(idcol,xcol,ycol,idsp,xsp,ysp,1.1) # We use now X,Y instead RA,Dec # Compressing matches for ColorPro... print 'Compressing matches...' matchidcol = pepe[:,0].astype(int) gdet_col = U.greater(matchidcol,0) # Excluding 0's (non matched detections) matchidcol = U.compress(gdet_col,(matchidcol)) # Compressing matches for Spectroscopic... matchidsp = pepe[:,1].astype(int) gdet_spz = U.greater(matchidsp,0) # Excluding 0's (non matched detections) matchidsp = U.compress(gdet_spz,(matchidsp)) print 'len(idcol)',len(idcol) print 'len(idsp)',len(idsp) if len(matchidcol) == len(matchidsp): print 'Creating idredu & zsredu ' print 'Dimension of matchidsp ',len(matchidsp) idredu = U.zeros(len(matchidsp)) idspredu = U.zeros(len(matchidsp)) for ii in range(len(matchidsp)): colindex = A.id2pos(idcol,matchidcol[ii]) # Position for Index idcol spzindex = A.id2pos(idsp,matchidsp[ii]) # Position for Index idsp idredu[ii] = idcol[colindex] # ID for ColorPro idspredu[ii] = idsp[spzindex] # Specz for Specz # A new smaller catalog will be created containing specz info as an extra column. print 'Selecting by rows... ' finalcat1 = catalog2[:-3]+'UDF.redu.cat' finalcat2 = catalog2[:-3]+'CLASH.redu.cat' U.put_data(catalog2[:-3]+'idsfrommatch.txt',(idredu,idspredu)) A.select_rows_bylist_sorted(catalog1,idspredu,finalcat1) A.select_rows_bylist_sorted(catalog2,idredu,finalcat2)
def check_rms_SPLUS(segmentation,photometry,minrad,maxrad,totnum,plots,verbose): """ It describes the photometric noise in images by launching apertures in blank areas and describing the area vs rms dependency. Philosophy: Several apertures (of random radius and positions) will be created (over blank areas) on a 'segmentation' image to estimate the real photometric error of an input 'photometry' image. -------- segmentation: Segmentation-like image (SExtractor output) used to select 'blank' areas. photometry: scientific image over which estimate the real photometric errors minrad,maxrad = minimun & maximum radius of the used apertures (pixels). totnum = total number of apertures. area,rms = final outputs. --------- USAGE: --------- import script_dcluster_tools as to segmentation = 'f814.seg.fits' photometry = 'f814.fits' apertures,finalbackg,finalmeans,fluxes = to.check_rms_JPLUS(segmentation,photometry,1,21,5.0e+04,'yes',False) ---- """ if not os.path.exists(segmentation): print print 'Image %s does not exist!' %(segmentation) sys.exist() if not os.path.exists(photometry): print print 'Image %s does not exist!' %(photometry) sys.exist() if verbose==True: verba=1 else: verba=0 # Reading data from images if photometry[:-2]=='fz': photima = fits.open(photometry)[1].data else: photima = fits.open(photometry)[0].data if segmentation[:-2]=='fz': segima = fits.open(segmentation)[1].data else: segima = fits.open(segmentation)[0].data # Final root where to save the data final_path = os.path.os.path.dirname(photometry) base_name = os.path.os.path.basename(photometry) len_extension = len(base_name.split('.')[-1])+1 file_root = final_path+'/Apertures/%s'%(base_name[:-len_extension]) # Physical limits (pixel) for the segmentation image. xGC = N.shape(segima)[1]/2. yGC = N.shape(segima)[0]/2. # For CLASH, due to the rotating frames, a maximum radius is set. min_maxim_radius = min([xGC,yGC]) radialmax = min_maxim_radius # Maximum radial distance to the center [pixels] # Here the random position (X,Y) are limited in range. minpix_X = 1500 maxpix_X = N.shape(segima)[1]-1500 minpix_Y = 1500 maxpix_Y = N.shape(segima)[0]-1500 binhisto = 100 # Final vector with positions. x_values = N.arange(minpix_X,maxpix_X,1) y_values = N.arange(minpix_Y,maxpix_Y,1) # Total dimension for the input variables. maxdim = int(20*(10.+(4*(((maxrad)*(maxrad+1))/2.)))) # Defining other variables. XX = N.zeros((maxdim),float) YY = N.zeros((maxdim),float) XO = N.zeros((maxdim),float) YO = N.zeros((maxdim),float) RR = N.zeros(totnum) # Range of apertures to be launched. apertures = N.arange(minrad,maxrad,1) n_apertures = len(apertures) # Length definition for the final outputs. finalbackg = N.zeros(n_apertures,'float64') finalmeans = N.zeros(n_apertures,'float64') gausshisto = N.zeros((binhisto,2*n_apertures),dtype='float64') # Starting the analysis. mmm = 0 for app in range(n_apertures): raper = apertures[app] # minrad = maxrad = raper if verba: print 'Interation %i out of %i ' %(app+1,n_apertures) print '-------------------------' # New temporal variables (erased in every loop). fluxes = N.zeros(totnum,dtype='float64') sbackg = N.zeros(totnum,dtype='float64') ff = -1 # Now it runs until it gets "totnum" measurements. hh = -1 contador = 0 while contador < (totnum): kk = -1 # Random x,y numbers to estimate the position # to place the aperture. xo = N.random.random_integers(minpix_X,maxpix_X) yo = N.random.random_integers(minpix_Y,maxpix_Y) if verba: print 'xo,yo',xo,yo # Corresponding radial distance to the center. tempradii = N.sqrt((xo-xGC)*(xo-xGC)+(yo-yGC)*(yo-yGC)) if tempradii < (radialmax+1): # Now it is computed the shape of the aperture. Xr = N.zeros((raper*raper),float) Yr = N.zeros((raper*raper),float) for ii in range(raper): xvalue = xo+ii for jj in range(raper): kk += 1 yvalue = yo+jj Xr[kk] = xvalue Yr[kk] = yvalue # Here it checks the blanckness of the aperture. tempflux = area2noise(segima,photima,Xr,Yr) if raper<2 : if tempflux != -999. : fluxes[hh] = tempflux if verba: print 'Adding flux: ',tempflux hh += 1 contador += 1 if raper>1: if tempflux[0] != -999. : fluxes[hh] = tempflux.sum() - (finalmeans[0] * raper**2) #why? contador += 1 hh += 1 if verba: print 'hh',hh # Computing values from the sample. sigfluxes = U.std_robust(fluxes) good = U.less_equal(abs(fluxes),5.*sigfluxes) fluxes = U.compress(good,fluxes) # Storing the background dispersion & mean inside that aperture. finalbackg[app] = U.std(fluxes) finalmeans[app] = U.mean(fluxes) if plots == 'yes': plt.figure(1,figsize = (7,6),dpi=70, facecolor='w', edgecolor='k') plt.clf() # va1,va2 = N.histogram(fluxes,binhisto,normed=1) va1,va2,va3 = plt.hist(fluxes,binhisto,normed=1,facecolor='black',alpha=0.5,linewidth=1.5) baseh = va2[0:-1] + ((va2[1]-va2[0])/2.) nele = len(fluxes) mu = U.mean(fluxes) sig = U.std(fluxes) # yh = U.normpdf(va2,mu,sig) # plt.plot(va2,yh,'r-',linewidth=3,alpha=0.7) mu = U.mean_robust(fluxes) # repeated sig = U.std_robust(fluxes) # repeated # yh = U.normpdf(va2,mu,sig) # repeated # plt.plot(va2,yh,'r--',linewidth=3,alpha=0.7) # repeated plt.legend([('MEAN: %.4f ''\n'' RMS: %.4f '%(mu,sig)), 'Aperture: %i $pix$'%(raper*raper)], numpoints=1,loc='upper right',fontsize=14) plt.xlim(mu-4*sig,mu+4*sig) plt.xlabel('Aperture Flux [ADU]',size=20) plt.ylabel('Number Counts',size=20) plt.xticks(fontsize=17),plt.yticks(fontsize=17) nameima = photometry.split('/')[-1:][0] plt.ylim() figure2name = file_root+'_hfaper_%i.png' %(raper) plt.savefig(figure2name,dpi=150) plt.close() # Here it saves the info from the histogram. ind1 = app*2 ind2 = app*2+1 if verba: print 'ind1,ind2',ind1,ind2 gausshisto[:,ind1] = baseh # va2 gausshisto[:,ind2] = va1 # yh # At this point all apertures have been computed. # Now it will represent the sigma_vs_area dependency. sigmas = finalbackg #-abs(finalmeans) aa,bb = sigmafit(sigmas,sigmas[0],apertures) if plots == 'yes': plt.figure(2, figsize = (7,6),dpi=80, facecolor='w', edgecolor='k') plt.clf() plt.plot(apertures,sigmas[0]*apertures*(aa+bb*apertures),'k-',apertures,apertures*sigmas[0],'r-') plt.legend([('%.3f$\sqrt{N}$ (%.3f + %.3f$\sqrt{N}$)' %(sigmas[0],aa,bb)), '%.3f$\sqrt{N}$ | Poisson Distribution '%(sigmas[0])], numpoints=1,loc='upper left') plt.plot(apertures,sigmas,'ko') plt.xlim(0.,max(apertures)+1) plt.xlabel('$\sqrt{N}$',size=18) plt.ylabel('$\sigma$',size=20) plt.xticks(fontsize=15) plt.yticks(fontsize=15) nick2 = photometry.split('/')[-1:][0] figure1name = file_root+'_apersigma.png' plt.savefig(figure1name,dpi=150) plt.close() # Saving outputs in ASCII files. fileout = file_root+'.apertures.txt' header = '# AREA[pix] RMS(std[counts]) MEAN(mean_robust[counts])' U.put_data(fileout,(apertures,sigmas,finalmeans),header) #print 'Saving data... in %s' %(fileout) """
def get_alhambra_GOLD(field,pointing,ccd): """ import alhambragold as alhgold alhgold.get_alhambra_GOLD(2,1,1) """ root_catalogs = '/Volumes/amb22/catalogos/reduction_v4f/f0%i/'%(field) root_gold = '/Volumes/amb22/catalogos/reduction_v4f/GOLD/' catalog = root_catalogs+'alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd) if os.path.exists(catalog): data1 = coeio.loaddata(catalog) # Loading the whole catalog content. head1 = coeio.loadheader(catalog) # Loading the original header. nc1 = len(data1.T) dim1 = len(data1[:,0]) nh = len(head1) # Final catalog. catout = root_gold+'alhambra.gold.F0%iP0%iC0%i.ColorProBPZ.cat' %(field,pointing,ccd) outfile = open(catout,'w') # Reducing the length of the catalogs according to input ids ids = U.get_str(catalog,0) mo = U.get_data(catalog,65) cond1 = U.less(mo,23.000) data2 = data1[cond1,:] nraws = U.shape(data2)[0] ncols = U.shape(data2)[1] # Setting the IDs to its final values (including F814W+field+pointing+ccd) finalids = alh.getalhambrafinalids(field,pointing,ccd,'ISO') finalids2 = U.compress(cond1,finalids) # Restoring header... for ii in range(nh): outfile.write('%s \n'%(head1[ii])) formato = '%s %i %i %i %.4f %.4f %.3f %.3f %i %.2f %.2f %.4f %.3f %.3f %.1f %.2f %.3f %.2f %i ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f ' formato += '%i %i %.3f %i %.2f %i ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f ' formato += '%.3f %.3f %.3f %.3f %i %i ' form = formato.split() # Here it defines the format to be used. for jj in range(nraws): for ss in range(ncols): goodform = '' goodform = form[ss]+' ' if ss == 0: outfile.write(goodform%(int(finalids2[jj]))) else: outfile.write(goodform%(data2[jj,ss])) outfile.write(' \n') outfile.close()
def appending_ids2catalogues(field, pointing, ccd): """ import alhambra_3arcs as A3 A3.appending_ids2catalogues(2,1,1) """ catalhambra = root + 'f0%i/alhambra.F0%iP0%iC0%i.ColorProBPZ.cat' % ( field, field, pointing, ccd) idalh = U.get_str(catalhambra, 0) idalh2 = U.arange(len(idalh)) + 1 xalh, yalh = U.get_data(catalhambra, (6, 7)) cat3arcs = finalroot + 'f0%i/alhambra.f0%ip0%ic0%i.3arcs.cat' % ( field, field, pointing, ccd) id3arcs, x3arcs, y3arcs = U.get_data(cat3arcs, (0, 3, 4)) print len(id3arcs) matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' if not os.path.exists(matchfile): idcol = idalh2 xcol = xalh ycol = yalh idsp = id3arcs xsp = x3arcs ysp = y3arcs pepe = CT.matching_vects(idcol, xcol, ycol, idsp, xsp, ysp, 5) # Compressing matches for ColorPro... print 'Compressing matches...' matchidcol = pepe[:, 0].astype(int) gdet_col = U.greater(matchidcol, 0) # Excluding 0's (non matched detections) matchidcol = U.compress(gdet_col, (matchidcol)) # Compressing matches for Spectroscopic... matchidsp = pepe[:, 1].astype(int) gdet_spz = U.greater(matchidsp, 0) # Excluding 0's (non matched detections) matchidsp = U.compress(gdet_spz, (matchidsp)) print 'len(idcol)', len(idcol) print 'len(idsp)', len(idsp) if len(matchidcol) == len(matchidsp): print 'Creating idredu & zsredu ' print 'Dimension of matchidsp ', len(matchidsp) idredu = U.zeros(len(matchidsp)) idspredu = U.zeros(len(matchidsp)) for ii in range(len(matchidsp)): colindex = A.id2pos(idcol, matchidcol[ii]) # Position for Index idcol spzindex = A.id2pos(idsp, matchidsp[ii]) # Position for Index idsp idredu[ii] = idcol[colindex] # ID for ColorPro idspredu[ii] = idsp[spzindex] # Specz for Specz matchfile = cat3arcs[:-3] + 'idsfrommatch.txt' U.put_data(matchfile, (idredu, idspredu)) if os.path.exists(matchfile): pepa = open(matchfile[:-3] + 'bis.cat', 'w') idredu, idspredu = U.get_data(matchfile, (0, 1)) i11 = idredu.astype(int) - 1 i22 = idspredu.astype(int) lista = [] for ii in range(len(i11)): lista.append(idalh[i11[ii]]) pepa.write('%s %s \n' % (idalh[i11[ii]], i22[ii])) pepa.close() finalfinal = cat3arcs[:-3] + 'final.cat' if os.path.exists(finalfinal): A.deletefile(finalfinal) if not os.path.exists(finalfinal): print 'Preparing ', finalfinal idsa = U.get_str(matchfile[:-3] + 'bis.cat', 0) append_IDs2_3arcs_catalogues(cat3arcs, idsa)
def flagging_dobledetections(cat1,cat2): """ This serves to append an extra column (each to both inputted catalogs) indicating either a detection was repeated and with the lowest S/N of the two. Sources flagged as 1 are those detections to be excluded when combining both catalogs into a single one. -------- import alhambra_overlap as alhov cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat' cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat' alhov.flagging_dobledetections(cat1,cat2) """ id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14)) id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14)) ne1 = len(id1) ne2 = len(id2) g1 = U.greater_equal(ra1,min(ra2)) g2 = U.less_equal(ra2,max(ra1)) id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1)) id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2)) flag1 = U.zeros(ne1) flag2 = U.zeros(ne2) dim1 = len(id1r) dim2 = len(id2r) print 'dim1,dim2',dim1,dim2 if dim1>0 and dim2>0: print 'Matching samples....' pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312) # We use now X,Y instead RA,Dec # Purging null elements matchidcol = pepe[:,0].astype(int) good_det1 = U.greater(matchidcol,0) # Excluding 0's (non matched detections) matchidcol = U.compress(good_det1,(matchidcol)) matchidsp = pepe[:,1].astype(int) good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections) matchidsp = U.compress(good_det2,(matchidsp)) if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 : newdim = len(matchidsp) print 'Dimension of matching',newdim idr1 = U.zeros(newdim) idr2 = U.zeros(newdim) s2nr1 = U.zeros(newdim) s2nr2 = U.zeros(newdim) for ii in range(newdim): idr1index = ap.id2pos(id1r,matchidcol[ii]) idr2index = ap.id2pos(id2r,matchidsp[ii]) idr1[ii] = id1r[idr1index] s2nr1[ii] = s2n1r[idr1index] idr2[ii] = id2r[idr2index] s2nr2[ii] = s2n2r[idr2index] # Select/Purge detections according to its S/N marcador1 = U.zeros(newdim) marcador2 = U.zeros(newdim) for ss in range(newdim): cociente = s2nr1[ss]/s2nr2[ss] if cociente >= 1.: marcador1[ss] = 1. else: marcador2[ss] = 1. cond1 = U.less(marcador1,1) cond2 = U.less(marcador2,1) idr1b = U.compress(cond1,idr1) dim1rr = len(idr1b) idr2b = U.compress(cond2,idr2) dim2rr = len(idr2b) # Two new IDs (finalid1 & finalid2) are generated with # the final elements to be included in the output catalog. for hh1 in range(ne1): if id1[hh1] in idr1b: flag1[hh1] = 1 for hh2 in range(ne2): if id2[hh2] in idr2b: flag2[hh2] = 1 # A new smaller catalog will be created containing specz info as an extra column. outcat1 = ap.decapfile(cat1)+'.doubledetect.cat' outcat2 = ap.decapfile(cat2)+'.doubledetect.cat' print 'outcat1',outcat1 print 'outcat2',outcat2 ap.appendcol(cat1,flag1,'Flag2Detected',outcat1) ap.appendcol(cat2,flag2,'Flag2Detected',outcat2) # Renaming files ap.renamefile(cat1,cat1+'.old.cat') if not os.path.exists(cat1): ap.renamefile(outcat1,cat1) ap.renamefile(cat2,cat2+'.old.cat') if not os.path.exists(cat2): ap.renamefile(outcat2,cat2) else: print 'No common sources in betwen the catalogs' # A new smaller catalog will be created containing specz info as an extra column. outcat1 = ap.decapfile(cat1)+'.doubledetect.cat' outcat2 = ap.decapfile(cat2)+'.doubledetect.cat' print 'outcat1',outcat1 print 'outcat2',outcat2 ap.appendcol(cat1,flag1*0,'Flag2Detected',outcat1) ap.appendcol(cat2,flag2*0,'Flag2Detected',outcat2) # Renaming files ap.renamefile(cat1,cat1+'.old.cat') if not os.path.exists(cat1): ap.renamefile(outcat1,cat1) ap.renamefile(cat2,cat2+'.old.cat') if not os.path.exists(cat2): ap.renamefile(outcat2,cat2)
def purging_dobledetections(cat1,cat2): """ import alhambra_overlap from alhambra_overlap import * cat1 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p02_colorproext_1_ISO.cat' cat2 = '/Volumes/amb22/catalogos/reduction_v4e/f02/f02p01_colorproext_1_ISO.cat' purging_dobledetections(cat1,cat2) """ id1,ra1,dec1,x1,y1,s2n1 = U.get_data(cat1,(0,1,2,3,4,14)) id2,ra2,dec2,x2,y2,s2n2 = U.get_data(cat2,(0,1,2,3,4,14)) ne1 = len(id1) ne2 = len(id2) g1 = U.greater_equal(ra1,min(ra2)) g2 = U.less_equal(ra2,max(ra1)) id1r,ra1r,dec1r,x1r,y1r,s2n1r = U.multicompress(g1,(id1,ra1,dec1,x1,y1,s2n1)) id2r,ra2r,dec2r,x2r,y2r,s2n2r = U.multicompress(g2,(id2,ra2,dec2,x2,y2,s2n2)) dim1 = len(id1r) dim2 = len(id2r) print 'dim1,dim2',dim1,dim2 if dim1>0 and dim2>0: print 'Matching samples....' pepe = matching_vects_ddet(id1r,ra1r,dec1r,id2r,ra2r,dec2r,0.000312) # We use now X,Y instead RA,Dec # Purging null elements matchidcol = pepe[:,0].astype(int) good_det1 = U.greater(matchidcol,0) # Excluding 0's (non matched detections) matchidcol = U.compress(good_det1,(matchidcol)) matchidsp = pepe[:,1].astype(int) good_det2 = U.greater(matchidsp,0) # Excluding 0's (non matched detections) matchidsp = U.compress(good_det2,(matchidsp)) if len(matchidcol) == len(matchidsp) and len(matchidcol) >0 : newdim = len(matchidsp) print 'Dimension of matching',newdim idr1 = U.zeros(newdim) idr2 = U.zeros(newdim) s2nr1 = U.zeros(newdim) s2nr2 = U.zeros(newdim) for ii in range(newdim): idr1index = ap.id2pos(id1r,matchidcol[ii]) idr2index = ap.id2pos(id2r,matchidsp[ii]) idr1[ii] = id1r[idr1index] s2nr1[ii] = s2n1r[idr1index] idr2[ii] = id2r[idr2index] s2nr2[ii] = s2n2r[idr2index] # Select/Purge detections according to its S/N marcador1 = U.zeros(newdim) marcador2 = U.zeros(newdim) for ss in range(newdim): cociente = s2nr1[ss]/s2nr2[ss] if cociente >= 1.: marcador1[ss] = 1. else: marcador2[ss] = 1. cond1 = U.less(marcador1,1) cond2 = U.less(marcador2,1) idr1b = U.compress(cond1,idr1) dim1rr = len(idr1b) idr2b = U.compress(cond2,idr2) dim2rr = len(idr2b) print '' print 'Number of detections to be removed from cat1: ', dim1rr print 'Number of detections to be removed from cat2: ', dim2rr print '' # Two new IDs (finalid1 & finalid2) are generated with # the final elements to be included in the output catalog. finalid1 = U.zeros((ne1-dim1rr)) finalid2 = U.zeros((ne2-dim2rr)) kk1 = 0 for hh1 in range(ne1): if id1[hh1] not in idr1b: finalid1[kk1] = id1[hh1] kk1 += 1 print 'kk1',kk1 kk2 = 0 for hh2 in range(ne2): if id2[hh2] not in idr2b: if kk2 <= (ne2-dim2rr-1): finalid2[kk2] = id2[hh2] kk2+=1 print 'kk2',kk2 # A new smaller catalog will be created containing specz info as an extra column. outcat1 = ap.decapfile(cat1)+'.wo2detect.cat' outcat2 = ap.decapfile(cat2)+'.wo2detect.cat' print 'outcat1',outcat1 print 'outcat2',outcat2 ap.select_rows_bylist(cat1,finalid1,outcat1) ap.select_rows_bylist(cat2,finalid2,outcat2) else: print 'No common sources in betwen the catalogs'