def filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None): if options is None: options = o if options['restart'] and os.path.isfile(outname): warn('File ' + outname +' already exists, skipping source filtering step') else: matchedcat = Table.read(matchedcat) singlecat = Table.read(singlecat) fitsimage = fits.open(fitsimage) fieldra = fitsimage[0].header['CRVAL1'] fielddec = fitsimage[0].header['CRVAL2'] fitsimage.close() print 'Originally',len(matchedcat),'sources' matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0) print '%i sources after filtering for 3.0 deg from centre' % len(matchedcat) matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR! print '%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat) # not implemented yet! #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten() #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname) matchedcat=select_isolated_sources(matchedcat,30.0) print '%i sources after filtering for isolated sources in LOFAR' % len(matchedcat) matchedcat.write(outname)
def filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None): if options is None: options = o if options['restart'] and os.path.isfile(outname): warn('File ' + outname +' already exists, skipping source filtering step') else: matchedcat = Table.read(matchedcat) singlecat = Table.read(singlecat) fitsimage = fits.open(fitsimage) fieldra = fitsimage[0].header['CRVAL1'] fielddec = fitsimage[0].header['CRVAL2'] fitsimage.close() print('Originally',len(matchedcat),'sources') matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0) print('%i sources after filtering for 3.0 deg from centre' % len(matchedcat)) matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR! print('%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat)) # not implemented yet! #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten() #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname) matchedcat=select_isolated_sources(matchedcat,30.0) print('%i sources after filtering for isolated sources in LOFAR' % len(matchedcat)) matchedcat.write(outname)
def make_catalogue(name,c_ra,c_dec,radius,cats,outnameprefix=''): # cats needs to be a list of catalogues with a filename, short # name, group ID and matching radius in arcsec. # group IDs are a set of objects -- can be anything -- such that we require at least one flux from each group. # Each catalogue needs RA, DEC, Total_flux and E_Total_flux. t=Table.read(name,format='ascii.commented_header',header_start=-1) print 'Total table length is',len(t) if len(t)==0: raise RuntimeError('No sources in table from pybdsm') t=filter_catalogue(t,c_ra,c_dec,radius) print 'Filtered within',radius,'degrees:',len(t) if len(t)==0: raise RuntimeError('No sources in central part of image') t=t[t['Total_flux']>0.15] print 'Bright sources:',len(t) if len(t)==0: raise RuntimeError('No bright sources for crossmatching') # Filter for isolated sources t=select_isolated_sources(t,100) print 'Remove close neighbours:',len(t) if len(t)==0: raise RuntimeError('No sources in table before crossmatching') ctab=[] groups=[] for n,sh,group,cmrad in cats: tab=Table.read(n) ctab.append(filter_catalogue(tab,c_ra,c_dec,radius)) groups.append(group) print 'Table',sh,'has',len(ctab[-1]),'entries' groups=set(groups) # now do cross-matching for g in groups: t['g_count_'+str(g)]=0 for i,(n,sh,group,cmrad) in enumerate(cats): tab=ctab[i] match_catalogues(t,tab,cmrad,sh,group=group) # # t[sh+'_flux']=np.nan # t[sh+'_e_flux']=np.nan # for r in t: # dist=np.sqrt((np.cos(c_dec*np.pi/180.0)*(tab['RA']-r['RA']))**2.0+(tab['DEC']-r['DEC'])**2.0)*3600.0 # stab=tab[dist<cmrad] # if len(stab)==1: # # got a unique match # r[sh+'_flux']=stab[0]['Total_flux'] # r[sh+'_e_flux']=stab[0]['E_Total_flux'] # r['g_count_'+str(group)]+=1 # Now reject sources that have no match in a given group for g in groups: t=t[t['g_count_'+str(g)]>0] if len(t)==0: raise RuntimeError('No crossmatches exist after group matching') t.write(outnameprefix+'crossmatch-1.fits',overwrite=True)
def do_dr_checker(tname,imname,peak=0.1,majlimit=0.003,cutout=60,blank=10,verbose=False,write_subims=False): drlist=[] t=Table.read(tname) t=select_isolated_sources(t,cutout) filter=(t['Peak_flux']>peak) if verbose: print np.sum(filter) filter&=(t['DC_Maj']<majlimit) t=t[filter] if verbose: print len(t) for i,r in enumerate(t): h=extract_subim(imname,r['RA'],r['DEC'],cutout/3600.0,verbose=False) ys,xs=h[0].data.shape h[0].data[int(ys/2)-blank:int(ys/2)+blank,int(xs/2)-blank:int(xs/2)+blank]=np.nan offpeak=np.nanmax(np.abs(h[0].data)) if verbose: print r['Peak_flux'],offpeak,r['Peak_flux']/offpeak drlist.append(r['Peak_flux']/offpeak) if write_subims: h.writeto('testim-%i.fits' %i,clobber=True) return np.array(drlist)
def make_catalogue(name, c_ra, c_dec, radius, cats, outnameprefix=''): # cats needs to be a list of catalogues with a filename, short # name, group ID and matching radius in arcsec. # group IDs are a set of objects -- can be anything -- such that we require at least one flux from each group. # Each catalogue needs RA, DEC, Total_flux and E_Total_flux. t = Table.read(name, format='ascii.commented_header', header_start=-1) print('Total table length is', len(t)) if len(t) == 0: raise RuntimeError('No sources in table from pybdsm') t = filter_catalogue(t, c_ra, c_dec, radius) print('Filtered within', radius, 'degrees:', len(t)) if len(t) == 0: raise RuntimeError('No sources in central part of image') t = t[t['Total_flux'] > 0.15] print('Bright sources:', len(t)) if len(t) == 0: raise RuntimeError('No bright sources for crossmatching') # Filter for isolated sources t = select_isolated_sources(t, 100) print('Remove close neighbours:', len(t)) if len(t) == 0: raise RuntimeError('No sources in table before crossmatching') ctab = [] groups = [] for n, sh, group, cmrad in cats: tab = Table.read(n) ctab.append(filter_catalogue(tab, c_ra, c_dec, radius)) groups.append(group) print('Table', sh, 'has', len(ctab[-1]), 'entries') groups = set(groups) # now do cross-matching for g in groups: t['g_count_' + str(g)] = 0 for i, (n, sh, group, cmrad) in enumerate(cats): tab = ctab[i] match_catalogues(t, tab, cmrad, sh, group=group) # # t[sh+'_flux']=np.nan # t[sh+'_e_flux']=np.nan # for r in t: # dist=np.sqrt((np.cos(c_dec*np.pi/180.0)*(tab['RA']-r['RA']))**2.0+(tab['DEC']-r['DEC'])**2.0)*3600.0 # stab=tab[dist<cmrad] # if len(stab)==1: # # got a unique match # r[sh+'_flux']=stab[0]['Total_flux'] # r[sh+'_e_flux']=stab[0]['E_Total_flux'] # r['g_count_'+str(group)]+=1 # Now reject sources that have no match in a given group for g in groups: t = t[t['g_count_' + str(g)] > 0] if len(t) == 0: raise RuntimeError('No crossmatches exist after group matching') t.write(outnameprefix + 'crossmatch-1.fits', overwrite=True)