예제 #1
0
 def onclick(self,event):
     xp=event.xdata
     yp=event.ydata
     ra,dec=self.f.pixel2world(xp,yp)
     if event.button==2:
         # do something which depends on mode
         if self.mode=='o':
             self.optra=ra
             self.optdec=dec
             print('Optical ID at',ra,dec)
             self.redraw()
         elif self.mode=='m':
             sep=separation(ra,dec,self.ots['RA'],self.ots['DEC'])
             index=np.argmin(sep)
             name=self.ots[index]['Source_Name']
             if self.ots[index]['Source_Name'] in self.components:
                 self.components.remove(name)
                 print('removed component',name)
             else:
                 self.components.append(name)
                 print('added component',name)
             self.redraw()
         elif self.mode=='z':
             if not(np.isnan(self.oldra)):
                 self.size=separation(ra,dec,self.oldra,self.olddec)*3600
                 print('Size measured as',self.size,'arcsec')
             self.oldra=ra
             self.olddec=dec
         elif self.mode=='i':
             sep=separation(ra,dec,self.ots['RA'],self.ots['DEC'])
             index=np.argmin(sep)
             print(self.ots[index])
         else:
             raise NotImplementedError('Mode not recognised')
def select_multi(i):
    if np.isnan(optras[i]): return []
    thishp = hd[hps[i]]  # the ones with matching healpix
    if len(thishp) == 1: return []
    dist = separation(optras[i], optdecs[i], thishp[1], thishp[2])
    filt = dist < 1.5 / 3600.0
    sm = np.sum(filt)
    if sm > 1:
        return list(thishp[0][filt])
    else:
        return []
예제 #3
0
 def find(self, ra, dec, returnhdu=False):
     dist = separation(ra, dec, self.ras, self.decs)
     ranks = sorted(range(len(dist)), key=lambda i: dist[i])
     for r in ranks:
         w = self.wcs[r]
         xsize, ysize = self.sizes[r]
         try:
             x, y = w.wcs_world2pix(ra, dec, 0)
         except TypeError:
             x, y, _, _ = w.wcs_world2pix(ra, dec, 0, 0, 0)
         if x >= 0 and y >= 0 and x < xsize and y < ysize:
             break
     else:
         raise RuntimeError('Cannot find suitable map')
     if returnhdu:
         return self.hdus[r]
     else:
         return self.files[r]
예제 #4
0
 def clickon(self, ra, dec):
     # do something which depends on mode
     if self.mode == 'o' or self.mode == 'O':
         self.ga.set_optid(self.c, ra, dec)
         print '\nOptical ID at', ra, dec
         if self.mode == 'o': self.redraw()
     if self.mode == 'm' or self.mode == 'O':
         sep = separation(ra, dec, self.ots['RA'], self.ots['DEC'])
         index = np.argmin(sep)
         name = self.ots[index]['Gaus_id']
         if self.ga.ismember(self.c, name):
             self.ga.remove(name)
             print '\nremoved component', name, 'from source', self.c
         else:
             self.ga.add(self.c, name)
             print '\nadded component', name, 'to source', self.c
         self.redraw()
         if self.mode == 'O':
             self.set_mode('m')
예제 #5
0
t=Table.read('HETDEX-LGZ-cat-v0.7-filtered.fits')
t2m=Table.read('/data/lofar/mjh/hetdex_v4/2MASX_hetdex_fix.fits') # Wendy's table
sep2m=60 # arcsec

count=0
problems=np.zeros(len(t),dtype=bool)
for i in range(len(t)):
    r=t[i]
    if r['Hostbroken_prob']<0.4:
        continue
    print r['Source_Name']
    count+=1
    if np.isnan(r['optRA']):
        # host broken up but no host. Hmm...
        print 'No host!'
        sep=3600.0*separation(r['RA'],r['DEC'],t2m['ra'],t2m['dec'])
        j=np.argmin(sep)
        if sep[j]>sep2m:
            print 'No 2MASS source within',sep2m,'arcsec'
            problems[i]=True
            continue
    else:
        sep=3600.0*separation(r['optRA'],r['optDec'],t2m['ra'],t2m['dec'])
        j=np.argmin(sep)
        if sep[j]>sep2m:
            print 'No 2MASS source within',sep2m,'arcsec'
            problems[i]=True
            continue

    print '2MASS source found:',t2m[j]['designation']
    r['optRA']=t2m[j]['ra']
        if size > maxsize:
            # revert just to original
            ra, dec = r['RA'], r['DEC']
            tcopy = ctable
            ra, dec, size = find_bbox(tcopy, scale=scale)

        if size > maxsize:
            size = maxsize
        if size < minsize:
            size = minsize
        size = (int(0.5 + size / 10)) * 10
        print 'size is', size

        size /= 3600.0

        seps = separation(ra, dec, ot['RA'], ot['DEC'])
        ots = ot[seps < size]
        print ra, dec
        print ots['RA', 'DEC']

        ls = []
        cs = []
        for nr in ots:
            if nr['Parent_Source'] == sourcename:
                ls.append('solid')
                cs.append('green')
            else:
                ls.append('dashed')
                cs.append('red')

        # here we use Montage to make a regridded Spitzer image so that
    def __init__(self,comps,ncomps,t):
        
        self.polys=[]
        self.colors=[]
        self.alphas=[]
        self.error=False

        # construct shape of old components
        ra=np.mean(comps['Comp_RA'])
        dec=np.mean(comps['Comp_DEC'])


        ellist=[]
        for r in comps:
            n_ra=r['Comp_RA']
            n_dec=r['Comp_DEC']
            x=3600*np.cos(dec*np.pi/180.0)*(ra-n_ra)
            y=3600*(n_dec-dec)
            newp=ellipse(x,y,r['Comp_Maj'],r['Comp_Min'],r['Comp_PA'])
            ellist.append(newp)
        cp=cascaded_union(ellist)

        minx,miny,maxx,maxy=cp.bounds
        field=np.max([[maxx-minx],[maxy-miny]])
        self.field=field

        # find and remove small old components _not_ in the field --
        # catches compact sources in extended emission

        sep=3600.0*separation(ra,dec,ncomps['RA'],ncomps['DEC'])
        ncomps=ncomps[sep<field]
        for r in ncomps:
            if r['Maj']>24:
                continue
            n_ra=r['RA']
            n_dec=r['DEC']
            x=3600*np.cos(dec*np.pi/180.0)*(ra-n_ra)
            y=3600*(n_dec-dec)
            newp=ellipse(x,y,r['Maj'],r['Min'],r['PA'])
            self.add_poly(newp,'green',0.1)
            cp=cp.difference(newp)

        self.add_poly(cp,'red',0.5)
        
        # compute nearby new components
        sep=3600.0*separation(ra,dec,t['RA'],t['DEC'])
        t=t[sep<field]
        print '        .... total nearby components',len(t)
        # set up ellipses for association. units are arcsec

        result=[]
        for r in t:
            n_ra=r['RA']
            n_dec=r['DEC']
            x=3600*np.cos(dec*np.pi/180.0)*(ra-n_ra)
            y=3600*(n_dec-dec)
            newp=ellipse(x,y,r['Maj'],r['Min'],r['PA'])
            error=False
            try:
                inter=cp.intersection(newp)
            except shapely.geos.TopologicalError:
                print 'Error!',x,y,r['Maj'],r['Min'],r['PA']
                error=True    
            self.add_poly(newp,'blue',0.2)
            if error:
                result.append(True)
                self.error=True
            elif inter.area>(0.4*newp.area):
                # do new sources overlap with old
                result.append(True)
                self.add_poly(newp,'yellow',0.2)
            else:
                # is old source subsumed by one big new source
                inter=newp.intersection(cp)
                if inter.area>(0.9*cp.area):
                    result.append(True)
                    self.add_poly(newp,'yellow',0.2)
                else:
                    result.append(False)
        self.filtered=t[result]
dups = []
for n, r in zip(zn, zr):
    if r == 'Duplicate ID non-zoom source':
        dups.append(n)

print(len(dups))

t = Table.read('sources-v0.1.fits')
filt = np.zeros_like(t, dtype=bool)

for d in dups:
    filt |= t['Source_Name'] == d

dt = t[filt]
print(len(dt))

dt.write('duplicates.fits', overwrite=True)
keynames = set([])
for r in dt:
    dist = separation(r['optRA'], r['optDec'], dt['optRA'], dt['optDec'])
    st = dt[dist < 1.5 / 3600.0]
    print(r['Source_Name'], len(st))
    # if there are two (only) sources in st, then we have a pair that we can send to automatic processing. First source will be the lead source and we will store the name of the paired source in the table.
    if len(st) == 2 and st[0]['Source_Name'] not in keynames:
        cur.execute('insert into %s values ("%s","%s",NULL)' %
                    (table, st[0]['Source_Name'], st[1]['Source_Name']))
        keynames.add(st[0]['Source_Name'])

con.close()
예제 #9
0
                elif command == 'i':
                    display_mode = 'optical'
                    break
                elif command == 'r':
                    del (ga.optids[I.c])
                    ga.unchanged = False
                    I.redraw()
                elif command == 'S':
                    display_mode = 'Spitzer'
                    break
                elif command in ['m', 'o']:
                    I.set_mode(command)
                elif command == 't':
                    # Automatically select the other component of two
                    if len(mlt) != 2:
                        print 'There are more than two possibilities!'
                    else:
                        cra, cdec = ga.optpos(I.c)
                        if cra is None or cdec is None:
                            print 'No existing optical ID!'
                        else:
                            sep = separation(cra, cdec, mlt['ra'], mlt['dec'])
                            index = np.argmax(sep)
                            ra = mlt[index]['ra']
                            dec = mlt[index]['dec']
                            I.c = 2
                            I.set_mode('O')
                            I.clickon(ra, dec)

            plt.close()
예제 #10
0
from separation import separation

a = [1, 2, 3, 2, 1]
print(a)
a = separation(a)
print(a)
예제 #11
0
                                od[p]=newopt
                                print('Dynamically loaded healpix',p)
                                tables.append(newopt)
                            else:
                                print('Healpix',p,'not found')
                    gals=vstack(tables)
                    if 'RA' in gals.colnames:
                        gals['RA'].name='ra'
                    if 'DEC' in gals.colnames:
                        gals['DEC'].name='dec'

                pwg=gals[(np.abs(gals['ra']-ra)<rsize) & (np.abs(gals['dec']-dec)<dsize)]
            else:
                pwg=None

            ots=ot[separation(ra,dec,ot['RA'],ot['DEC'])<(size*scalefactor)]

            #pshdu=extract_subim(imagedir+'/downloads/'+psmaps[i],ra,dec,size*2,hduid=1)
            print('Lofarfile is',lofarfile)
            lhdu=extract_subim(lofarfile,ra,dec,size*scalefactor)
            if mode=='wise':
                whdu=extract_subim(wisefile,ra,dec,size*scalefactor)
            else:
                if legacyfile is None:
                    print('Legacy image does not exist, using WISE')
                    mode='wise'
                    whdu=extract_subim(wisefile,ra,dec,size*scalefactor)
                else:
                    whdu=extract_subim(legacyfile,ra,dec,size*scalefactor)
                    if np.all(np.isnan(whdu[0].data)):
                        print('Legacy image is blanked, using WISE')
예제 #12
0
        # zoom out
        size *= 2

        size /= 3600.0
        '''
        gals=Table.read(imagedir+'/tier1_ps1_wise_hetdex.fits')
        gals['raMean'].name='ra'
        gals['decMean'].name='dec'
        pg=gals[(np.abs(gals['ra']-ra)<size) & (np.abs(gals['dec']-dec)<size)]
        del(gals)

        '''
        pwg = gals[(np.abs(gals['ra'] - ra) < size)
                   & (np.abs(gals['dec'] - dec) < size)]

        ots = ot[separation(ra, dec, ot['RA'], ot['DEC']) < (size * 3)]

        #pshdu=extract_subim(imagedir+'/downloads/'+psmaps[i],ra,dec,size*2,hduid=1)
        print 'Lofarfile is', lofarfile
        lhdu = extract_subim(lofarfile, ra, dec, size * 3)
        firsthdu = extract_subim(imagedir + '/downloads/' + firstmaps[i], ra,
                                 dec, size * 3)
        whdu = extract_subim(imagedir + '/downloads/' + wisemaps[i], ra, dec,
                             size)
        try:
            peak == r['Peak_flux'] / 1000.0
        except:
            peak = None

        f = show_overlay(lhdu,
                         whdu,
예제 #13
0
                r['Source_Name'] = sname
                r['E_RA'] = np.sqrt(np.mean(clist['E_RA']**2.0))
                r['E_DEC'] = np.sqrt(np.mean(clist['E_DEC']**2.0))
                r['Source_Name'] = sname
                r['Total_flux'] = np.sum(clist['Total_flux'])
                r['E_Total_flux'] = np.sqrt(np.sum(clist['E_Total_flux']**2.0))
                maxpk = np.argmax(clist['Peak_flux'])
                r['Peak_flux'] = clist[maxpk]['Peak_flux']
                r['E_Peak_flux'] = clist[maxpk]['E_Peak_flux']
                r['S_Code'] = 'M'
                r['Isl_rms'] = np.mean(clist['Isl_rms'])
                r['Mosaic_ID'] = clist[maxpk]['Mosaic_ID']
                seps = []
                for c in clist:
                    seps.append(
                        separation(c['RA'], c['DEC'], clist['RA'],
                                   clist['DEC']))
                maxsep = np.max(seps) * scale
                maxsize = np.max(clist['Maj'])
                maxsize = max((maxsep, maxsize))
                if size is not None:
                    if size > maxsize:
                        maxsize = size
                    if size > r['New_size']:
                        r['New_size'] = size

                print '      sizes:', maxsep, maxsize
                r['Size'] = maxsize

            if ora is not None and do_optical:
                # check opt position
                if oldr is not None:
예제 #14
0
import numpy as np
import pandas as pd
from separation import separation
from sklearn import svm

[
    complete, january, february, march, april, may, june, july, august,
    september, october, november, december
] = separation()

x_train = []
x_test = []
y_train = []
y_test = []
## For jan
for i in range(1, len(january) - 20):
    data = january[i]
    x_train.append(data[0].replace('-', ''))
    y_train.append(data[1])

for i in range(len(january) - 20, len(january)):
    data = january[i]
    x_test.append(data[0].replace('-', ''))
    y_test.append(data[1])

x = np.asarray(x_train).astype(np.float)
y = np.asarray(y_train).astype(np.float)
# xt = np.reshape(x_test,(-1,1)).astype(np.float)
# yt = np.reshape(y_test,(-1,1)).astype(np.float)

#clf = svm.SVR(kernel='rbf', C=1, gamma=0.1)
예제 #15
0
def make_structure(field, warn=False):
    print 'Reading data...'

    if field == 'bootes':
        fluxscale = 0.859
        ct = Table.read(
            '/beegfs/lofar/deepfields/Bootes_LR/new_fdeep_matches/Bootes_ML_RUN_fin_overlap_srl_workflow_th.fits'
        )
        ct_nt = Table.read(
            '/beegfs/lofar/deepfields/Bootes_LR/Bootes_ML_RUN_fin_overlap_srl_workflow_fixed.fits'
        )
        allgals = Table.read(
            '/beegfs/lofar/deepfields/Bootes_merged_optical/Bootes_merged_pos.fits'
        )
        gt = Table.read(
            '/beegfs/lofar/deepfields/Bootes_LR/new_fdeep_matches/Bootes_ML_RUN_fin_overlap_gaul_workflow_th.fits'
        )
        preselect_dir = '/beegfs/lofar/deepfields/Bootes_preselect'
        lgz_dir = '/beegfs/lofar/deepfields/lgz/bootes'
        blend_dirs = [
            '/beegfs/lofar/deepfields/Bootes_blend',
            '/beegfs/lofar/deepfields/preselect_blend/bootes/blend',
            lgz_dir + '/blend'
        ]
        noid_files = [
            '/beegfs/lofar/deepfields/lgz/bootes/noid/noid.txt',
            '/beegfs/lofar/deepfields/lgz/bootes/noid2/noid2.txt',
            '/beegfs/lofar/deepfields/lgz/bootes/noid10/noid10.txt'
        ]
    elif field == 'lockman':
        fluxscale = 0.920
        ct = Table.read(
            '/beegfs/lofar/deepfields/Lockman_LR/updated_LR_cols/LH_ML_RUN_fin_overlap_srl_workflow_th.fits'
        )
        ct_nt = Table.read(
            '/beegfs/lofar/deepfields/Lockman_LR/LH_ML_RUN_fin_overlap_srl_workflow.fits'
        )
        allgals = Table.read(
            '/beegfs/lofar/deepfields/Lockman_edited_cats/optical/Lockman_merged_pos.fits'
        )
        gt = Table.read(
            '/beegfs/lofar/deepfields/Lockman_LR/updated_LR_cols/LH_ML_RUN_fin_overlap_gaul_workflow_th.fits'
        )
        preselect_dir = '/beegfs/lofar/deepfields/Lockman_preselect'
        lgz_dir = '/beegfs/lofar/deepfields/lgz/lockman'
        blend_dirs = [
            '/beegfs/lofar/deepfields/Lockman_blend',
            '/beegfs/lofar/deepfields/preselect_blend/lockman/blend',
            lgz_dir + '/blend'
        ]
        noid_files = [
            '/beegfs/lofar/deepfields/lgz/lockman/noid/noid.txt',
            '/beegfs/lofar/deepfields/lgz/lockman/noid2/noid2.txt',
            '/beegfs/lofar/deepfields/lgz/lockman/noid10/noid10.txt'
        ]
    elif field == 'en1':
        fluxscale = None
        ct = Table.read(
            '/beegfs/lofar/deepfields/ELAIS_N1_LR/new_optcat_matches/EN1_ML_RUN_fin_overlap_srl_workflow_th.fits'
        )
        ct_nt = Table.read(
            '/beegfs/lofar/deepfields/ELAIS_N1_LR/EN1_ML_RUN_fin_overlap_srl_workflow_fixed.fits'
        )
        allgals = Table.read(
            '/beegfs/lofar/deepfields/ELAIS_N1_optical/catalogues/EN1_merged_pos_all.fits'
        )
        gt = Table.read(
            '/beegfs/lofar/deepfields/ELAIS_N1_LR/new_optcat_matches/EN1_ML_RUN_fin_overlap_gaul_workflow_th.fits'
        )
        preselect_dir = '/beegfs/lofar/deepfields/ELAIS-N1_preselect'
        lgz_dir = '/beegfs/lofar/deepfields/lgz/en1'
        blend_dirs = [
            '/beegfs/lofar/deepfields/ELAIS-N1_blend',
            '/beegfs/lofar/deepfields/preselect_blend/en1/blend',
            lgz_dir + '/blend'
        ]
        noid_files = [
            '/beegfs/lofar/deepfields/lgz/en1/noid/noid.txt',
            '/beegfs/lofar/deepfields/lgz/en1/noid2/noid2.txt',
            '/beegfs/lofar/deepfields/lgz/en1/noid10/noid10.txt'
        ]
    else:
        print 'Not in correct working directory'
        sys.exit(1)

    s = Source()
    if fluxscale is not None:
        s.set_stage('Rescale fluxes')
        for t in [ct, ct_nt, gt]:
            for c in t.colnames:
                if 'flux' in c or 'rms' in c or 'mean' in c:
                    t[c] *= fluxscale

    s.set_stage('Ingest components')
    for r in ct:
        name = r['Source_Name']
        s.create_component(name, r)
        s.cd[name]['Children'] = []

    s.set_stage('Ingest Gaussians')
    for r in gt:
        name = r['Source_Name']
        s.create_gaussian(name, r)
        cname = ct[r['Source_id']]['Source_Name']
        s.gd[name]['Parent'] = cname
        s.cd[cname]['Children'].append(name)

    s.set_stage('Create initial sources')
    for component in s.cd:
        s.promote_component(component)

    # Apply pre-filter

    s.set_stage('Prefilter')
    group = []
    source = []
    wc = preselect_dir + '/*/workflow.txt'
    g = glob.glob(wc)
    for f in g:
        print f
        lines = open(f).readlines()
        for l in lines:
            l = l.rstrip()
            bits = l.split(',')
            group.append(int(bits[0]))
            source.append(bits[1])

    for sname, g in zip(source, group):
        s.sd[sname]['Prefilter'] = g
        if g == 5:
            s.delete_source(sname, 'Artefact')  # Artefact
        elif g == 3:  # Don't accept ID
            print 'Removing ID from', sname
            s.sd[sname]['lr_fin'] = np.nan
            s.sd[sname]['lr_index_fin'] = np.nan
            s.sd[sname]['lr_ra_fin'] = np.nan
            s.sd[sname]['lr_dec_fin'] = np.nan
        elif g == 2:
            print 'Looking up', sname, 'in no-threshold table'
            filt = (ct_nt['Source_Name'] == sname)
            if np.sum(filt) == 0:
                raise RuntimeError('No entry for this source!')
            else:
                print 'Assigning position...'
                r = ct_nt[filt][0]
                s.sd[sname]['lr_fin'] = r['lr_fin']
                index = int(r['lr_index_fin'])
                s.sd[sname]['optRA'] = allgals[index]['ra']
                s.sd[sname]['optDec'] = allgals[index]['dec']
        elif g == 4:
            s.sd[sname]['Zoom_prob'] = 1.0

    s.set_stage('Ingest LGZ')
    lgz_source = Table.read(lgz_dir + '/LGZ-cat.fits')
    lgz_source['Dec'].name = 'DEC'
    lgz_comps = Table.read(lgz_dir + '/LGZ-comps.fits')
    # create LGZ entries
    for r in lgz_source:
        sname = r['Source_Name']
        s.create_source(sname, r)  # will add to what's on record for
        # the source, if it already exists
        if 'lr_ra_fin' in s.sd[sname]:
            s.sd[sname]['old_ra'] = s.sd[sname]['lr_ra_fin']
            s.sd[sname]['old_dec'] = s.sd[sname]['lr_dec_fin']
        s.sd[sname]['lr_ra_fin'] = np.nan
        s.sd[sname]['lr_dec_fin'] = np.nan
        s.sd[sname]['Children'] = []
        # Mark this source for LGZ assembly, which we'll do after TZI
        s.sd[sname]['LGZ_assembly_required'] = True
    for r in lgz_comps:
        name = r['Comp_Name']
        sname = r['Source_Name']
        s.sd[sname]['Children'].append(name)
        s.cd[name]['Parent'] = sname
        if r['Assoc'] != 0:
            # remove sources that are now part of an association
            s.delete_source(name, 'LGZ association', descend=False)

    # Blends
    s.set_stage('Blend processing')

    for bd in blend_dirs:
        patches = glob.glob(bd + '/ILT*.txt')
        if len(patches) == 0:
            raise RuntimeError(
                'No patches found in directory %s, did you get the pathname wrong?'
                % bd)
        for f in patches:
            name = f.replace(bd + '/', '').replace('.txt', '')
            print 'Blend file for', name, 'is', f
            if name not in s.sd:
                print name, 'not in source list'
                sys.exit(2)
            # whatever happens to the source, these issues are dealt with...
            s.sd[name]['Blend_prob'] = 0
            s.sd[name]['Hostbroken_prob'] = 0
            s.sd[name]['Imagemissing_prob'] = 0
            s.sd[name]['Blend_file'] = f
            lines = [l.rstrip() for l in open(f).readlines()]
            # parse the file
            if lines[0] == '## Flagged':
                s.sd[name]['Zoom_prob'] = 1.0  # send to TZI
            elif lines[0] == '## Unchanged':
                # source stays as it is, BUT lr must be accepted if present
                if not np.isnan(s.sd[name]['lr_ra_fin']):
                    print 'Accepting LR position for this source'
                    s.sd[name]['optRA'] = s.sd[name]['lr_ra_fin']
                    s.sd[name]['optDec'] = s.sd[name]['lr_dec_fin']
                elif 'old_ra' in s.sd[name]:
                    print 'Accepting pre-LGZ LR position for this source'
                    s.sd[name]['optRA'] = s.sd[name]['old_ra']
                    s.sd[name]['optDec'] = s.sd[name]['old_dec']

            elif lines[0] == '## Components':
                print name, ': output file to be processed!'
                s.sd[name]['lr_ra_fin'] = np.nan
                s.sd[name]['lr_dec_fin'] = np.nan
                components = len(s.cd[name]['Children'])
                print 'Component has', components, 'Gaussians:', s.cd[name][
                    'Children']
                # parse the component ID part
                child_ids = []
                gaussian_names = []
                gids = []
                for i in range(components):
                    bits = lines[i + 1].split()
                    gid = int(bits[0])
                    gidi = np.argmax(gt['Gaus_id'] == gid)
                    gname = gt[gidi]['Source_Name']
                    # sanity check
                    if gname not in s.cd[name]['Children']:
                        print s.cd[name]
                        raise RuntimeError('Gaussian %s not in child list' %
                                           gname)
                    gids.append(gidi)
                    gaussian_names.append(gname)
                    child_ids.append(int(bits[1]))
                child_ids = np.array(child_ids)
                gids = np.array(gids)
                optid = {}
                for l in lines[components + 3:]:
                    bits = l.split()
                    id = int(bits[0])
                    ra = float(bits[1])
                    dec = float(bits[2])
                    optid[id] = (ra, dec)
                if np.all(child_ids == 0):
                    print 'No unflagged components!'
                    # probably should never happen
                    s.delete_source(name, 'All components removed')
                elif np.all(child_ids == 1):
                    print 'Components unchanged'
                    # Using the LGZ names
                    if 1 in optid:
                        ra, dec = optid[1]
                        s.sd[name]['optRA'] = ra
                        s.sd[name]['optDec'] = dec
                        s.sd[name]['OptID_Name'] = 'Altered'
                    else:
                        # opt id was removed
                        s.sd[name]['OptID_Name'] = "None"
                        s.sd[name]['optRA'] = np.nan
                        s.sd[name]['optDec'] = np.nan
                        s.sd[name]['noID'] = 11
                else:
                    print "It's complicated"
                    # This means that the component has been split
                    # into more than one set of Gaussians, possibly
                    # each with an optical ID.
                    # we delete the parent source now -- if it needs to be recreated it will be.
                    s.delete_source(name,
                                    'Removed by blend file',
                                    descend=False)

                    ss = set(child_ids)
                    for source_id in ss:
                        gaussians = [
                            n for n, index in zip(gaussian_names, child_ids)
                            if index == source_id
                        ]
                        these_gids = gids[child_ids == source_id]
                        if source_id == 0:
                            # These Gaussians have not been included
                            # in any source. Since the whole source is
                            # not composed of artefacts, we should
                            # just be able to flag the Gaussian(s).
                            for g in gaussians:
                                s.delete_gaussian(
                                    g, 'Gaussian not included in blend')
                        else:
                            if len(gaussians) == 1:
                                print 'Promoting single Gaussian to component and source'
                                # Single Gaussian should be promoted to a source
                                gname = gaussians[0]
                                parent = s.gd[gname]['Parent']
                                if 'Promoted' not in s.cd[parent]['Created']:
                                    print gname, 'has parent', parent, ' -- marking deleted'
                                    s.cd[parent][
                                        'Deleted'] = 'Removed by blend file (single)'
                                else:
                                    print 'Not deleting parent', parent
                                s.cd[gname] = deepcopy(s.gd[gname])
                                s.cd[gname][
                                    'Created'] = 'Promoted from single Gaussian'
                                s.cd[gname]['Children'] = [gname]
                                s.gd[gname]['Parent'] = gname
                                s.promote_component(gname)
                                sname = gname
                            else:
                                print 'Assembling several Gaussians to component and source'
                                # Several Gaussians need to be
                                # assembled into a source, which will
                                # have a new name and other new
                                # properties. Use the Gaussian table for this
                                # ** NEW ** promote all Gaussians to components
                                clist = gt[these_gids]
                                r = assemble_source(clist)
                                sname = r['Source_Name']
                                if len(clist) == 1:
                                    r['Assoc'] = 0
                                else:
                                    r['Assoc'] = len(clist)
                                r['Assoc_Qual'] = 1
                                r['ID_Qual'] = 1
                                r['Blend_prob'] = 0
                                r['Created'] = 'Deblend'
                                r['Blend_file'] = f
                                r['Children'] = gaussians
                                print 'Creating source', sname, 'with children', gaussians
                                print clist['Source_Name']
                                if sname in s.sd:
                                    print 'Source exists! Previous source was created by', s.sd[
                                        sname][
                                            'Created'], 'and has children', s.sd[
                                                sname]['Children']
                                    if s.sd[sname]['Created'] == 'Deblend':
                                        print 'Previous blend file was', s.sd[
                                            sname][
                                                'Blend_file'], ': this file is', f
                                    raise RuntimeError('Duplicate source name',
                                                       sname)
                                s.sd[sname] = r
                                for g in gaussians:
                                    # logic here and above deals with the case where a component created elsewhere in the loop has the same name as a parent component of a Gaussian that would normally be deleted. As the old parent has already been overwritten the deletion is not necessary.
                                    parent = s.gd[g]['Parent']
                                    if 'Promoted' not in s.cd[parent][
                                            'Created']:
                                        print g, 'has parent', parent, ' -- marking deleted'
                                        s.cd[parent][
                                            'Deleted'] = 'Removed by blend file (multiple)'
                                    else:
                                        print 'Not deleting parent', parent
                                    s.cd[g] = deepcopy(s.gd[g])
                                    s.cd[g][
                                        'Created'] = 'Promoted from Gaussian'
                                    s.cd[g]['Deblended_from'] = name
                                    s.gd[g]['Parent'] = g
                                    s.cd[g]['Children'] = [g]
                                    s.cd[g]['Parent'] = sname

                            s.sd[sname]['lr_ra_fin'] = np.nan
                            s.sd[sname]['lr_dec_fin'] = np.nan
                            if source_id in optid:
                                ra, dec = optid[source_id]
                                s.sd[sname]['optRA'] = ra
                                s.sd[sname]['optDec'] = dec
                                s.sd[sname]['OptID_Name'] = 'Altered'
                                s.sd[sname]['NoID'] = 0
                            else:
                                s.sd[sname]['NoID'] = 11
                                s.sd[sname]['OptID_Name'] = "None"
                                s.sd[sname]['optRA'] = np.nan
                                s.sd[sname]['optDec'] = np.nan

            else:
                raise RuntimeError('Cannot parse input file...' + lines[0])

    s.set_stage('Too zoomed in')

    g = glob.glob(lgz_dir + '/zoom/ILTJ*.txt')
    for f in g:
        print 'Zoomfile', f
        source = f.replace('.txt', '').replace(lgz_dir + '/zoom/', '')
        parsefile(source, s, dir=lgz_dir + '/zoom/')
        s.sd[source]['Created'] = 'Too zoomed in'
        s.sd[source]['Zoomfile'] = f
        s.sd[source]['LGZ_assembly_required'] = True

    # component table won't now change, so generate it so it can be
    # passed to assemble_source
    print 'Building new component table'
    columns = [('Source_Name', None), ('RA', None), ('DEC', None),
               ('E_RA', None), ('E_DEC', None), ('Total_flux', None),
               ('E_Total_flux', None), ('Peak_flux', None),
               ('E_Peak_flux', None), ('S_Code', None), ('Maj', np.nan),
               ('Min', np.nan), ('PA', np.nan), ('E_Maj', np.nan),
               ('E_Min', np.nan), ('E_PA', np.nan), ('DC_Maj', np.nan),
               ('DC_Min', np.nan), ('DC_PA', np.nan), ('Isl_rms', np.nan),
               ('Created', None), ('Parent', None)]
    new_ct = generate_table(s.cd, columns)

    s.set_stage('LGZ post-processing')
    s.zoomneeded = []
    sources = s.sd.keys()  # copy because we rename as we go
    for name in sources:
        if 'Deleted' not in s.sd[name] and 'LGZ_assembly_required' in s.sd[
                name]:
            if len(s.sd[name]
                   ['Children']) == 1 and s.sd[name]['Children'][0] == name:
                del s.sd[name]['LGZ_assembly_required']
                continue  # source is single component
            print 'Need to assemble source', name, 'from children', s.sd[name][
                'Children']
            cids = []
            error = False
            for cname in s.sd[name]['Children']:
                filt = (new_ct['Source_Name'] == cname)
                if not np.any(filt):
                    print 'Source created by', s.sd[name]['Created']
                    error = True
                    print 'Child %s does not exist!' % cname
                else:
                    cids.append(np.argmax(filt))
            if len(cids) == 0:
                s.delete_source(name, 'All components removed')
                continue
            if error:
                warn_or_die(warn, 'Source is partial, needs zoom file fix')
                s.zoomneeded.append(name)
            clist = new_ct[cids]
            r = assemble_source(clist)
            if 'Manual_Size' in s.sd[name]:
                print 'Adding manual size measurement'
                r['LGZ_Size'] = s.sd[name]['Manual_Size']
            sname = r['Source_Name']
            if sname != name:
                print 'Renaming old source', name, 'created by', s.sd[name][
                    'Created'], 'to', sname
                r['Renamed_from'] = name
                s.delete_source(name, 'Renamed', descend=False)

            for key in s.sd[name]:
                if key not in r:
                    r[key] = s.sd[name][key]
            for comp in r['Children']:
                s.cd[comp]['Parent'] = sname

            if name != sname:
                components = list(s.cd)
                for comp in components:
                    if s.cd[comp]['Parent'] == name:
                        s.delete_component(comp, 'Orphaned')

            r['Assoc'] = len(r['Children'])
            s.create_source(sname, r)
            if 'Deleted' in s.sd[sname]:
                del (s.sd[sname]['Deleted'])
            if 'Art_prob' in s.sd[sname] and s.sd[sname]['Art_prob'] > 0.5:
                s.delete_source(sname, 'LGZ artefact')
            del s.sd[sname]['LGZ_assembly_required']

    # finally sort out optical positions
    for source in s.sd:
        if 'Deleted' in s.sd[source]:
            continue
        if 'optRA' in s.sd[source] or 'lr_ra_fin' not in s.sd[source]:
            # RH 'or' because we may have a TZI source with no opt ID
            s.sd[source]['Position_from'] = 'Visual inspection'
        else:
            s.sd[source]['Position_from'] = 'LR'
            s.sd[source]['optRA'] = s.sd[source]['lr_ra_fin']
            s.sd[source]['optDec'] = s.sd[source]['lr_dec_fin']

    if noid_files is not None:
        s.set_stage('NoID')
        for noid_file in noid_files:
            lines = open(noid_file).readlines()
            group = []
            source = []
            for l in lines:
                l = l.rstrip()
                bits = l.split(',')
                group.append(int(bits[0]))
                source.append(bits[1])
            for sname, g in zip(source, group):
                if sname not in s.sd:
                    print 'Source', name, 'already deleted, skipping'
                    continue
                #if 'optRA' in s.sd[sname] and not np.isnan(s.sd[sname]['optRA']):
                #    print 'Source',sname,'in noid list but has id, skipping'
                #    continue
                s.sd[sname]['NoID'] = g
                if g == 6:
                    s.delete_source(sname, 'Artefact')  # Artefact
                elif g == 8:
                    if 'Zoomfile' not in s.sd[sname]:
                        if 'Renamed_from' in s.sd[sname]:
                            name = s.sd[sname]['Renamed_from']
                        else:
                            name = sname
                        print 'Adding', name, 'to zoom list'
                        s.zoomneeded.append(name)
                    else:
                        s.sd[sname]['NoID'] = 3
                elif g == 7 or g == 9:
                    if 'Zoomfile' not in s.sd[sname]:
                        if 'LGZ' in s.sd[sname]['Created']:
                            if 'Renamed_from' in s.sd[sname]:
                                name = s.sd[sname]['Renamed_from']
                            else:
                                name = sname
                            print 'Adding', name, 'to zoom list'
                            s.zoomneeded.append(name)
                    else:
                        s.sd[sname]['NoID'] = 3

    s.set_stage('Opt ID overlap check')
    names = []
    optras = []
    optdecs = []
    for name in s.sd:
        if 'Deleted' in s.sd[name]:
            continue
        if 'optRA' in s.sd[name]:
            names.append(name)
            optras.append(s.sd[name]['optRA'])
            optdecs.append(s.sd[name]['optDec'])
    t = Table([names, optras, optdecs],
              names=['Source_Name', 'optRA', 'optDec'])
    print 'Made table of length', len(t)
    count = 0
    for r in t:
        if np.isnan(r['optRA']): continue
        dist = separation(r['optRA'], r['optDec'], t['optRA'], t['optDec'])
        filt = dist < 1.5 / 3600.0
        sm = np.sum(filt)
        if sm > 1:
            tf = t[filt]
            for sname in tf['Source_Name']:
                print 'Checking duplicate ID source', sname
                if 'Zoomfile' not in s.sd[sname]:
                    name = sname
                    if 'LGZ' in s.sd[sname]['Created']:
                        if 'Renamed_from' in s.sd[sname]:
                            name = s.sd[sname]['Renamed_from']
                    print 'Adding', name, 'to zoom list'
                    s.zoomneeded.append(name)
                else:
                    print '*** problem -- source', sname, 'with zoom file', s.sd[
                        sname]['Zoomfile'], 'has duplicate ID'
            count += sm
    print 'Total number of problem duplicates is', count

    s.set_stage('Assoc check')
    # At this point Assoc should be correct except for sources where
    # e.g. a zoom file has over-ridden a de-blend. Fix those
    for source in s.sd:
        if 'Deleted' in s.sd[source]:
            continue
        if len(s.sd[source]['Children']) > 1:
            if 'Assoc' not in s.sd[source]:
                print 'Fixing', source, 'created by', s.sd[source][
                    'Created'], 'which has assoc unset but', len(
                        s.sd[source]['Children']), 'children'
            elif s.sd[source]['Assoc'] != len(s.sd[source]['Children']):
                print 'Fixing', source, 'created by', s.sd[source][
                    'Created'], 'which has assoc =', s.sd[source][
                        'Assoc'], 'but', len(
                            s.sd[source]['Children']), 'children'
                s.sd[source]['Assoc'] = len(s.sd[source]['Children'])
        elif len(s.sd[source]['Children']) == 1:
            if 'Assoc' in s.sd[source] and s.sd[source]['Assoc'] != 0:
                print 'Fixing', source, 'created by', s.sd[source][
                    'Created'], 'which has assoc =', s.sd[source][
                        'Assoc'], 'but', len(
                            s.sd[source]['Children']), 'children'
                s.sd[source]['Assoc'] = 0
    return s
예제 #16
0
    ct = Table.read(
        '/beegfs/lofar/deepfields/ELAIS_N1_LR/new_optcat_matches/EN1_ML_RUN_fin_overlap_srl_workflow_th.fits'
    )
else:
    print 'Not in correct working directory'
    sys.exit(1)

g = sorted(glob.glob('final-v*.fits'))
infile = g[-1]
print 'Processing', infile

t = Table.read(infile)
seps = []

for r in t:
    sep = 3600.0 * separation(r['RA'], r['DEC'], r['ALPHA_J2000'],
                              r['DELTA_J2000'])
    seps.append(sep)

t['sep'] = seps
t.write(infile.replace('.fits', '-withsep.fits'), overwrite=True)

filter = t['FLAG_WORKFLOW'] == 1
filter &= t['sep'] > 3

nt = t[filter]

filter = np.array([False] * len(ct))
for r in nt:
    filter |= (ct['Source_Name'] == r['Source_Name'])

ct[filter].write('bigsep.fits', overwrite=True)