Exemplo n.º 1
0
def do_archive(o, archivelist):

    if o['mslist'] is None:
        die('MS list must be specified')

    with open(o['mslist'], 'r') as f:
        msnames = [l.strip() for l in f.readlines()]

    # find version number in case this has been run already

    prefix = 'old'
    count = 1
    while os.path.isfile(msnames[0] + '/' + prefix + '.killms_p1.sols.npz'):
        count += 1
        prefix = 'old%i' % count

    print 'prefix is', prefix

    for ms in msnames:
        for s in ['p1', 'ap1']:
            if s in archivelist:
                for file in ['npz', 'parset']:
                    rename(
                        ms + '/killMS.killms_' + s + '.sols.' + file,
                        ms + '/' + prefix + '.killms_' + s + '.sols.' + file)

    if o['full_mslist'] is not None:
        with open(o['full_mslist'], 'r') as f:
            msnames = [l.strip() for l in f.readlines()]
        for ms in msnames:
            for s in ['f_ap1', 'f_ap2']:
                if s in archivelist:
                    for file in ['npz', 'parset']:
                        rename(
                            ms + '/killMS.killms_' + s + '.sols.' + file, ms +
                            '/' + prefix + '.killms_' + s + '.sols.' + file)
Exemplo n.º 2
0
def main(msin, config_path):
    o = options(config_path, option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')

    # fix up the new list-type options
    for i, cat in enumerate(o['list']):
        try:
            o[cat] = o['filenames'][i]
        except:
            pass
        try:
            o[cat + '_matchrad'] = o['radii'][i]
        except:
            pass
        try:
            o[cat + '_fluxfactor'] = o['fluxfactor'][i]
        except:
            pass

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # pybdsm source finding
    sfind_image(o['catprefix'], o['pbimage'], o['sfind_pixel_fraction'])

    # matching with catalogs
    for cat in o['list']:
        print 'Doing catalogue', cat
        crossmatch_image(o['catprefix'] + '.cat.fits', cat)
        filter_catalog(o['catprefix'] + '.cat.fits',
                       o['catprefix'] + '.cat.fits_' + cat + '_match.fits',
                       o['pbimage'],
                       o['catprefix'] + '.cat.fits_' + cat +
                       '_match_filtered.fits',
                       cat,
                       options=o)

    # Filter catalogs (only keep isolated compact sources within 3deg of pointing centre)

    # Astrometric plots
    if 'FIRST' in o['list']:
        report('Plotting position offsets')
        plot_position_offset(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_positions.png' % o['catprefix'],
            'FIRST',
            options=o)

        t = Table.read(o['catprefix'] + '.cat.fits_FIRST_match_filtered.fits')
        bsra = np.percentile(bootstrap(t['FIRST_dRA'], np.mean, 10000),
                             (16, 84))
        bsdec = np.percentile(bootstrap(t['FIRST_dDEC'], np.mean, 10000),
                              (16, 84))
        mdra = np.mean(t['FIRST_dRA'])
        mddec = np.mean(t['FIRST_dDEC'])
        print 'Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mdra, bsra[0], bsra[1])
        print 'Mean delta DEC is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mddec, bsdec[0], bsdec[1])

        report('Plotting flux ratios')
        # Flux ratio plots (only compact sources)
        plot_flux_ratios(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_fluxerrors.png' % o['catprefix'],
            options=o)

    report('Plotting flux scale comparison')
    # Flux scale comparison plots
    if 'TGSS' in o['list']:
        plot_flux_errors(
            '%s.cat.fits_TGSS_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_TGSS_match_filtered_fluxratio.png' % o['catprefix'],
            'TGSS',
            options=o)
        t = Table.read(o['catprefix'] + '.cat.fits_TGSS_match_filtered.fits')
        ratios = t['Total_flux'] / (t['TGSS_Total_flux'] /
                                    o['TGSS_fluxfactor'])
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/TGSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    if 'NVSS' in o['list']:
        t = Table.read(o['catprefix'] + '.cat.fits_NVSS_match_filtered.fits')
        t = t[t['Total_flux'] > 10e-3]
        ratios = t['Total_flux'] / t['NVSS_Total_flux']
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    # Noise estimate
    hdu = fits.open(o['pbimage'])

    imagenoise = get_rms(hdu)
    print 'An estimate of the image noise is %.3f muJy/beam' % (imagenoise *
                                                                1E6)
    return 0
Exemplo n.º 3
0
        if 'Facet' not in t.columns:
            r.add_facet_labels(t)
        plot_offsets(t,r.clist,'red')
        if savefig is not None:
            plt.savefig(savefig)

if __name__=='__main__':
    # Main loop
    if len(sys.argv)<2:
        warn('quality_pipeline.py must be called with at least one parameter file\nor a command-line option list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.')
        print_options(option_list)
        sys.exit(1)

    o=options(sys.argv[1:],option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')
    if o['nonpbimage'] is None:
        die('nonpbimage must be specified')
    if o['list'] is not None:
        # fix up the new list-type options
        for i,cat in enumerate(o['list']):
            try:
                o[cat]=o['filenames'][i]
            except:
                pass
            try:
                o[cat+'_matchrad']=o['radii'][i]
            except:
                pass
            try:
                o[cat+'_fluxfactor']=o['fluxfactor'][i]
Exemplo n.º 4
0
name = sys.argv[1]
try:
    qsubfile = sys.argv[2]
except:
    qsubfile = '/home/mjh/git/ddf-pipeline/pipeline.qsub'

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if not download_dataset('https://lofar-webdav.grid.sara.nl',
                        '/SKSP/' + name + '/'):
    die('Download failed to get the right number of files')

report('Unpacking data')
unpack()

report('Deleting tar files')
os.system('rm *.tar.gz')

report('Making ms lists')
if make_list():
    report('Submit job')
    os.system('qsub -N ddfp-' + name + ' -v WD=' + rootdir + '/' + name + ' ' +
              qsubfile)
else:
    die('make_list could not construct the MS list')
Exemplo n.º 5
0
        if savefig is not None:
            plt.savefig(savefig)


if __name__ == '__main__':
    # Main loop
    if len(sys.argv) < 2:
        warn(
            'quality_pipeline.py must be called with at least one parameter file\nor a command-line option list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.'
        )
        print_options(option_list)
        sys.exit(1)

    o = options(sys.argv[1:], option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')
    if o['nonpbimage'] is None:
        die('nonpbimage must be specified')
    if o['list'] is not None:
        # fix up the new list-type options
        for i, cat in enumerate(o['list']):
            try:
                o[cat] = o['filenames'][i]
            except:
                pass
            try:
                o[cat + '_matchrad'] = o['radii'][i]
            except:
                pass
            try:
                o[cat + '_fluxfactor'] = o['fluxfactor'][i]
Exemplo n.º 6
0
def do_run_pipeline(name, basedir):

    if name[0] != 'P' and name[0] != 'L':
        die('This code should be used only with field or observation names',
            database=False)

    do_field = (name[0] == 'P')

    try:
        qsubfile = sys.argv[2]
    except:
        qsubfile = '/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'

    workdir = basedir + '/' + name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')

    report('Downloading data')
    if do_field:
        success = download_field(name, basedir=basedir)
    else:
        success = download_dataset('https://lofar-webdav.grid.sara.nl',
                                   '/SKSP/' + name + '/',
                                   basedir=basedir)

    if not success:
        die('Download failed, see earlier errors', database=False)

    report('Unpacking data')
    try:
        unpack(workdir=workdir)
    except RuntimeError:
        if do_field:
            update_status(name, 'List failed', workdir=workdir)
        raise
    if do_field:
        update_status(name, 'Unpacked', workdir=workdir)

    report('Deleting tar files')
    os.system('rm ' + workdir + '/*.tar.gz')
    os.system('rm ' + workdir + '/*.tar')

    averaged = False
    report('Checking structure')
    g = glob.glob(workdir + '/*.ms')
    msl = MSList(None, mss=g)
    uobsids = set(msl.obsids)
    for thisobs in uobsids:
        # check one MS with each ID
        for m, ch, o in zip(msl.mss, msl.channels, msl.obsids):
            if o == thisobs:
                channels = len(ch)
                print 'MS', m, 'has', channels, 'channels'
                if channels > 20:
                    update_status(name, 'Averaging', workdir=workdir)
                    print 'Averaging needed for', thisobs, '!'
                    averaged = True
                    average(wildcard=workdir + '/*' + thisobs + '*')
                    os.system('rm -r ' + workdir + '/*' + thisobs +
                              '*pre-cal.ms')
                break

    report('Making ms lists')
    success = make_list(workdir=workdir)
    if do_field:
        list_db_update(success, workdir=workdir)
    if not success:
        die('make_list could not construct the MS list', database=False)

    report('Creating custom config file from template')
    make_custom_config(name, workdir, do_field, averaged)

    # now run the job
    do_run_job(name, basedir=basedir, qsubfile=None, do_field=do_field)
Exemplo n.º 7
0
def make_mosaic(args):
    if args.scale is not None:
        if len(args.scale) != len(args.directories):
            die('Scales provided must match directories')

    if args.noise is not None:
        if len(args.noise) != len(args.directories):
            die('Noises provided must match directories')

    if args.rootname:
        rootname=args.rootname+'-'
    else:
        rootname=''

    if args.exact:
        reproj=reproject_exact
    else:
        reproj=reproject_interp_chunk_2d

    if args.do_lowres:
        intname='image_full_low_m.int.restored.fits'
        appname='image_full_low_m.app.restored.fits'
    elif args.use_shifted:
        intname='image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'
        appname='image_full_ampphase_di_m.NS_shift.app.facetRestored.fits'
    else:
        intname='image_full_ampphase_di_m.NS.int.restored.fits'
        appname='image_full_ampphase_di_m.NS.app.restored.fits'

    # astromap blanking if required
    bth=None
    try:
        bth=float(args.astromap_blank)
    except:
        pass

    threshold=float(args.beamcut)
    hdus=[]
    app=[]
    astromaps=[]
    wcs=[]
    print 'Reading files...'
    noise=[]
    name=[]
    for d in args.directories:
        name.append(d.split('/')[-1])
        hdu=fits.open(d+'/'+intname)
        if args.find_noise:
	    print 'Estimating noise for', d+'/' + intname
	    if args.do_lowres:
	            noise.append(get_rms(hdu,boxsize=1500))
	    else:
	            noise.append(get_rms(hdu))
        hdus.append(flatten(hdu))
        app.append(flatten(fits.open(d+'/'+appname)))
        if bth:
            astromaps.append(flatten(fits.open(d+'/astromap.fits')))

    if args.find_noise:
        args.noise=noise
        print 'Noise values are:'
        for t,n in zip(name,noise):
            print t,n

    print 'Computing noise/beam factors...'
    for i in range(len(app)):
        np.seterr(divide='ignore')
	app[i].data=np.divide(app[i].data,hdus[i].data)
        app[i].data[app[i].data<threshold]=0
        # at this point this is the beam factor: we want 1/sigma**2.0, so divide by central noise and square
        if args.noise is not None:
            if args.scale is not None:
                app[i].data/=args.noise[i]*args.scale[i]
            else:
                app[i].data/=args.noise[i]

        app[i].data=app[i].data**2.0

        if args.scale is not None:
            hdus[i].data*=args.scale[i]

    if args.shift:
        print 'Finding shifts (NOTE THIS CODE IS OBSOLETE)...'
        # shift according to the FIRST delta ra/dec from quality pipeline
        dras=[]
        ddecs=[]
        for d in args.directories:
            t=Table.read(d+'/image_full_ampphase1m.cat.fits_FIRST_match_filtered.fits')
            dras.append(np.mean(t['FIRST_dRA']))
            ddecs.append(np.mean(t['FIRST_dDEC']))
        print 'Applying shifts:',dras,ddecs
        for i in range(len(app)):
            for hdu in [hdus[i],app[i]]:
                ra=hdu.header['CRVAL1']
                dec=hdu.header['CRVAL2']
                hdu.header['CRVAL1']-=dras[i]/(3600.0*np.cos(np.pi*dec/180.0))
                hdu.header['CRVAL2']-=ddecs[i]/3600.0

    for i in range(len(app)):
        wcs.append(WCS(hdus[i].header))

    # astromap blanking
    if bth:
        print 'Blanking using astrometry quality maps with threshold',bth,'arcsec'
        for i in range(len(app)):
            outname=rootname+'astroblank-'+name[i]+'.fits'
            if args.load and os.path.isfile(outname):
                print 'Loading previously blanked image'
                hdu=fits.open(outname)
                hdus[i].data=hdu[0].data
            else:
                print 'Blanking image',i
                dmaxy,dmaxx=hdus[i].data.shape
                count=0
                am=astromaps[i]
                awcs=WCS(am.header)
                maxy,maxx=am.data.shape
                for y in range(maxy):
                    for x in range(maxx):
                        value=am.data[y,x]
                        if np.isnan(value):
                            if y<maxy-1:
                                value=am.data[y+1,x]
                        if value>bth:
                            ra,dec=[float(f) for f in awcs.wcs_pix2world(x,y,0)]
                            rx,ry=[int(p) for p in wcs[i].wcs_world2pix(ra,dec,0)]
                            rxp=rx+21 # astromap pix size, with margin
                            ryp=ry+21
                            if rx<0: rx=0
                            if ry<0: ry=0
                            if rxp>dmaxx: rxp=dmaxx
                            if ryp>dmaxy: ryp=dmaxy
                            hdus[i].data[ry:ryp,rx:rxp]=np.nan
                            count+=1
                print '... blanked',count*900.0/3600,'square arcmin'
                outname=rootname+'astroblank-'+name[i]+'.fits'
                if args.save: hdus[i].writeto(outname,clobber=True)
            app[i].data[np.isnan(hdus[i].data)]=np.nan

    # If the header is directly passed in, use it
    try:
        header=args.header
        xsize=header['NAXIS1']
        ysize=header['NAXIS2']
        print 'Mosaic using header passed from calling program'
    except:
        header=None
    if header is None:
        if args.load_layout:
            with open(rootname+'mosaic-header.pickle') as f:
                header=pickle.load(f)
            xsize=header['NAXIS1']
            ysize=header['NAXIS2']
            print 'Mosaic using loaded header'
        else:
            print 'Creating the mosaic header'
            ras=np.array([w.wcs.crval[0] for w in wcs])
            decs=np.array([w.wcs.crval[1] for w in wcs])

            mra=np.mean(ras)
            mdec=np.mean(decs)
            print 'Will make mosaic at',mra,mdec

            # we make a reference WCS and use it to find the extent in pixels
            # needed for the combined image

            rwcs=WCS(naxis=2)
            rwcs.wcs.ctype=wcs[0].wcs.ctype
            rwcs.wcs.cdelt=wcs[0].wcs.cdelt
            rwcs.wcs.crval=[mra,mdec]
            rwcs.wcs.crpix=[1,1]

            xmin=0
            xmax=0
            ymin=0
            ymax=0
            for a,w in zip(app,wcs):
                ys,xs=np.where(a.data)
                axmin=xs.min()
                aymin=ys.min()
                axmax=xs.max()
                aymax=ys.max()
                del(xs)
                del(ys)
                print 'non-zero',axmin,aymin,axmax,aymax
                for x,y in ((axmin,aymin),(axmax,aymin),(axmin,aymax),(axmax,aymax)):
                    ra,dec=[float(f) for f in w.wcs_pix2world(x,y,0)]
                    #print ra,dec
                    nx,ny=[float (f) for f in rwcs.wcs_world2pix(ra,dec,0)]
                    print nx,ny
                    if nx<xmin: xmin=nx
                    if nx>xmax: xmax=nx
                    if ny<ymin: ymin=ny
                    if ny>ymax: ymax=ny

            print 'co-ord range:', xmin, xmax, ymin, ymax

            xsize=int(xmax-xmin)
            ysize=int(ymax-ymin)

            rwcs.wcs.crpix=[-int(xmin)+1,-int(ymin)+1]
            print 'checking:', rwcs.wcs_world2pix(mra,mdec,0)
            print rwcs

            header=rwcs.to_header()
            header['NAXIS']=2
            header['NAXIS1']=xsize
            header['NAXIS2']=ysize

            with open(rootname+'mosaic-header.pickle','w') as f:
                pickle.dump(header,f)

    isum=np.zeros([ysize,xsize])
    wsum=np.zeros_like(isum)
    mask=np.zeros_like(isum,dtype=np.bool)
    print 'now making the mosaic'
    for i in range(len(hdus)):
        print 'image',i,'(',name[i],')'
        outname=rootname+'reproject-'+name[i]+'.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu=fits.open(outname)
            r=hdu[0].data
        else:
            print 'reprojecting...'
            r, footprint = reproj(hdus[i], header, hdu_in=0, parallel=False)
            r[np.isnan(r)]=0
            hdu = fits.PrimaryHDU(header=header,data=r)
            if args.save: hdu.writeto(outname,clobber=True)
        print 'weights',i,'(',name[i],')'
        outname=rootname+'weight-'+name[i]+'.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu=fits.open(outname)
            w=hdu[0].data
            mask|=(w>0)
        else:
            print 'reprojecting...'
            w, footprint = reproj(app[i], header, hdu_in=0, parallel=False)
            mask|=~np.isnan(w)
            w[np.isnan(w)]=0
            hdu = fits.PrimaryHDU(header=header,data=w)
            if args.save: hdu.writeto(outname,clobber=True)
        print 'add to mosaic...'
        isum+=r*w
        wsum+=w

    if not(args.no_write):
        isum/=wsum
        # mask now contains True where a non-nan region was present in either map
        isum[~mask]=np.nan
        for ch in ('BMAJ', 'BMIN', 'BPA'):
            header[ch]=hdus[0].header[ch]
        header['ORIGIN']='ddf-pipeline '+version()

        hdu = fits.PrimaryHDU(header=header,data=isum)
        hdu.writeto(rootname+'mosaic.fits',clobber=True)

        hdu = fits.PrimaryHDU(header=header,data=wsum)
        hdu.writeto(rootname+'mosaic-weights.fits',clobber=True)
Exemplo n.º 8
0
def make_mosaic(args):
    if args.scale is not None:
        if len(args.scale) != len(args.directories):
            die('Scales provided must match directories')

    if args.noise is not None:
        if len(args.noise) != len(args.directories):
            die('Noises provided must match directories')

    if args.rootname:
        rootname = args.rootname + '-'
    else:
        rootname = ''

    if args.exact:
        reproj = reproject_exact
    else:
        reproj = reproject_interp_chunk_2d

    if args.do_lowres:
        intname = 'image_full_low_m.int.restored.fits'
        appname = 'image_full_low_m.app.restored.fits'
    elif args.use_shifted:
        intname = 'image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'
        appname = 'image_full_ampphase_di_m.NS_shift.app.facetRestored.fits'
    else:
        intname = 'image_full_ampphase_di_m.NS.int.restored.fits'
        appname = 'image_full_ampphase_di_m.NS.app.restored.fits'

    # astromap blanking if required
    bth = None
    try:
        bth = float(args.astromap_blank)
    except:
        pass

    threshold = float(args.beamcut)
    hdus = []
    app = []
    astromaps = []
    wcs = []
    print 'Reading files...'
    noise = []
    name = []
    for d in args.directories:
        name.append(d.split('/')[-1])
        hdu = fits.open(d + '/' + intname)
        if args.find_noise:
            print 'Estimating noise for', d + '/' + intname
            if args.do_lowres:
                noise.append(get_rms(hdu, boxsize=1500))
            else:
                noise.append(get_rms(hdu))
        hdus.append(flatten(hdu))
        app.append(flatten(fits.open(d + '/' + appname)))
        if bth:
            astromaps.append(flatten(fits.open(d + '/astromap.fits')))

    if args.find_noise:
        args.noise = noise
        print 'Noise values are:'
        for t, n in zip(name, noise):
            print t, n

    print 'Computing noise/beam factors...'
    for i in range(len(app)):
        np.seterr(divide='ignore')
        app[i].data = np.divide(app[i].data, hdus[i].data)
        app[i].data[app[i].data < threshold] = 0
        # at this point this is the beam factor: we want 1/sigma**2.0, so divide by central noise and square
        if args.noise is not None:
            if args.scale is not None:
                app[i].data /= args.noise[i] * args.scale[i]
            else:
                app[i].data /= args.noise[i]

        app[i].data = app[i].data**2.0

        if args.scale is not None:
            hdus[i].data *= args.scale[i]

    if args.shift:
        print 'Finding shifts (NOTE THIS CODE IS OBSOLETE)...'
        # shift according to the FIRST delta ra/dec from quality pipeline
        dras = []
        ddecs = []
        for d in args.directories:
            t = Table.read(
                d +
                '/image_full_ampphase1m.cat.fits_FIRST_match_filtered.fits')
            dras.append(np.mean(t['FIRST_dRA']))
            ddecs.append(np.mean(t['FIRST_dDEC']))
        print 'Applying shifts:', dras, ddecs
        for i in range(len(app)):
            for hdu in [hdus[i], app[i]]:
                ra = hdu.header['CRVAL1']
                dec = hdu.header['CRVAL2']
                hdu.header['CRVAL1'] -= dras[i] / (3600.0 *
                                                   np.cos(np.pi * dec / 180.0))
                hdu.header['CRVAL2'] -= ddecs[i] / 3600.0

    for i in range(len(app)):
        wcs.append(WCS(hdus[i].header))

    # astromap blanking
    if bth:
        print 'Blanking using astrometry quality maps with threshold', bth, 'arcsec'
        for i in range(len(app)):
            outname = rootname + 'astroblank-' + name[i] + '.fits'
            if args.load and os.path.isfile(outname):
                print 'Loading previously blanked image'
                hdu = fits.open(outname)
                hdus[i].data = hdu[0].data
            else:
                print 'Blanking image', i
                dmaxy, dmaxx = hdus[i].data.shape
                count = 0
                am = astromaps[i]
                awcs = WCS(am.header)
                maxy, maxx = am.data.shape
                for y in range(maxy):
                    for x in range(maxx):
                        value = am.data[y, x]
                        if np.isnan(value):
                            if y < maxy - 1:
                                value = am.data[y + 1, x]
                        if value > bth:
                            ra, dec = [
                                float(f) for f in awcs.wcs_pix2world(x, y, 0)
                            ]
                            rx, ry = [
                                int(p)
                                for p in wcs[i].wcs_world2pix(ra, dec, 0)
                            ]
                            rxp = rx + 21  # astromap pix size, with margin
                            ryp = ry + 21
                            if rx < 0: rx = 0
                            if ry < 0: ry = 0
                            if rxp > dmaxx: rxp = dmaxx
                            if ryp > dmaxy: ryp = dmaxy
                            hdus[i].data[ry:ryp, rx:rxp] = np.nan
                            count += 1
                print '... blanked', count * 900.0 / 3600, 'square arcmin'
                outname = rootname + 'astroblank-' + name[i] + '.fits'
                if args.save: hdus[i].writeto(outname, clobber=True)
            app[i].data[np.isnan(hdus[i].data)] = np.nan

    # If the header is directly passed in, use it
    try:
        header = args.header
        xsize = header['NAXIS1']
        ysize = header['NAXIS2']
        print 'Mosaic using header passed from calling program'
    except:
        header = None
    if header is None:
        if args.load_layout:
            with open(rootname + 'mosaic-header.pickle') as f:
                header = pickle.load(f)
            xsize = header['NAXIS1']
            ysize = header['NAXIS2']
            print 'Mosaic using loaded header'
        else:
            print 'Creating the mosaic header'
            ras = np.array([w.wcs.crval[0] for w in wcs])
            decs = np.array([w.wcs.crval[1] for w in wcs])

            mra = np.mean(ras)
            mdec = np.mean(decs)
            print 'Will make mosaic at', mra, mdec

            # we make a reference WCS and use it to find the extent in pixels
            # needed for the combined image

            rwcs = WCS(naxis=2)
            rwcs.wcs.ctype = wcs[0].wcs.ctype
            rwcs.wcs.cdelt = wcs[0].wcs.cdelt
            rwcs.wcs.crval = [mra, mdec]
            rwcs.wcs.crpix = [1, 1]

            xmin = 0
            xmax = 0
            ymin = 0
            ymax = 0
            for a, w in zip(app, wcs):
                ys, xs = np.where(a.data)
                axmin = xs.min()
                aymin = ys.min()
                axmax = xs.max()
                aymax = ys.max()
                del (xs)
                del (ys)
                print 'non-zero', axmin, aymin, axmax, aymax
                for x, y in ((axmin, aymin), (axmax, aymin), (axmin, aymax),
                             (axmax, aymax)):
                    ra, dec = [float(f) for f in w.wcs_pix2world(x, y, 0)]
                    #print ra,dec
                    nx, ny = [float(f) for f in rwcs.wcs_world2pix(ra, dec, 0)]
                    print nx, ny
                    if nx < xmin: xmin = nx
                    if nx > xmax: xmax = nx
                    if ny < ymin: ymin = ny
                    if ny > ymax: ymax = ny

            print 'co-ord range:', xmin, xmax, ymin, ymax

            xsize = int(xmax - xmin)
            ysize = int(ymax - ymin)

            rwcs.wcs.crpix = [-int(xmin) + 1, -int(ymin) + 1]
            print 'checking:', rwcs.wcs_world2pix(mra, mdec, 0)
            print rwcs

            header = rwcs.to_header()
            header['NAXIS'] = 2
            header['NAXIS1'] = xsize
            header['NAXIS2'] = ysize

            with open(rootname + 'mosaic-header.pickle', 'w') as f:
                pickle.dump(header, f)

    isum = np.zeros([ysize, xsize])
    wsum = np.zeros_like(isum)
    mask = np.zeros_like(isum, dtype=np.bool)
    print 'now making the mosaic'
    for i in range(len(hdus)):
        print 'image', i, '(', name[i], ')'
        outname = rootname + 'reproject-' + name[i] + '.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu = fits.open(outname)
            r = hdu[0].data
        else:
            print 'reprojecting...'
            r, footprint = reproj(hdus[i], header, hdu_in=0, parallel=False)
            r[np.isnan(r)] = 0
            hdu = fits.PrimaryHDU(header=header, data=r)
            if args.save: hdu.writeto(outname, clobber=True)
        print 'weights', i, '(', name[i], ')'
        outname = rootname + 'weight-' + name[i] + '.fits'
        if args.load and os.path.exists(outname):
            print 'loading...'
            hdu = fits.open(outname)
            w = hdu[0].data
            mask |= (w > 0)
        else:
            print 'reprojecting...'
            w, footprint = reproj(app[i], header, hdu_in=0, parallel=False)
            mask |= ~np.isnan(w)
            w[np.isnan(w)] = 0
            hdu = fits.PrimaryHDU(header=header, data=w)
            if args.save: hdu.writeto(outname, clobber=True)
        print 'add to mosaic...'
        isum += r * w
        wsum += w

    if not (args.no_write):
        isum /= wsum
        # mask now contains True where a non-nan region was present in either map
        isum[~mask] = np.nan
        for ch in ('BMAJ', 'BMIN', 'BPA'):
            header[ch] = hdus[0].header[ch]
        header['ORIGIN'] = 'ddf-pipeline ' + version()

        hdu = fits.PrimaryHDU(header=header, data=isum)
        hdu.writeto(rootname + 'mosaic.fits', clobber=True)

        hdu = fits.PrimaryHDU(header=header, data=wsum)
        hdu.writeto(rootname + 'mosaic-weights.fits', clobber=True)
Exemplo n.º 9
0
        print_options(option_list)
        sys.exit(1)

    o = options(sys.argv[1:], option_list)

    if o['catch_signal']:
        catcher = Catcher()
    else:
        catcher = None

    uvrange = [o['image_uvmin'], o['uvmax']]
    killms_uvrange = [0, 1000]
    if o['solutions_uvmin'] is not None:
        killms_uvrange[0] = o['solutions_uvmin']
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # Set column name for first steps
    colname = o['colname']

    # Clear the shared memory
    run('CleanSHM.py', dryrun=o['dryrun'])

    # Check imaging weights -- needed before DDF
    new = check_imaging_weight(o['mslist'])

    if o['clearcache'] or new or o['redofrom']:
        # Clear the cache, we don't know where it's been. If this is a
Exemplo n.º 10
0
def run_bootstrap(o):

    colname = 'DATA_DI_CORRECTED'

    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn(
            "You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located"
        )
        sys.exit(2)

    o["tgss"] = o["tgss"].replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"] = [
        l.replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
        for l in o["catalogues"]
    ]
    lCat = o["catalogues"] + [o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist" % fCat)
            sys.exit(2)

    cl = len(o['catalogues'])
    if o['names'] is None:
        o['names'] = [
            os.path.basename(x).replace('.fits', '') for x in o['catalogues']
        ]
    if o['radii'] is None:
        o['radii'] = [10] * cl
    if o['groups'] is None:
        o['groups'] = range(cl)
    if (len(o['frequencies']) != cl or len(o['radii']) != cl
            or len(o['names']) != cl or len(o['groups']) != cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list'
            )

    low_uvrange = [o['image_uvmin'], 2.5 * 206.0 / o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize = o['low_imsize']  # allow over-ride
    else:
        low_imsize = o['imsize'] * o['cellsize'] / o['low_cell']

    low_robust = o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py', dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m = MSList(o['mslist'])
    Uobsid = set(m.obsids)

    for obsid in Uobsid:

        warn('Running bootstrap for obsid %s' % obsid)

        freqs = []
        omslist = []
        for ms, ob, f in zip(m.mss, m.obsids, m.freqs):
            if ob == obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs) < 4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme'
                )

        # sort to work in frequency order

        freqs, omslist = (list(x) for x in zip(
            *sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f, ms in zip(freqs, omslist):
            print ms, f

        # generate the sorted input mslist
        with open('temp_mslist.txt', 'w') as f:
            for line in omslist:
                f.write(line + '\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_' + obsid + '_init',
                  'temp_mslist.txt',
                  cleanmask=None,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=0,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=True)
        external_mask = 'bootstrap_external_mask.fits'
        make_external_mask(external_mask,
                           'image_bootstrap_' + obsid + '_init.dirty.fits',
                           use_tgss=True,
                           clobber=False,
                           cellsize='low_cell',
                           options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_' + obsid,
                  'temp_mslist.txt',
                  cleanmask=external_mask,
                  reuse_psf=True,
                  reuse_dirty=True,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=5,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_' + obsid +
                          '.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img = bdsm.process_image('image_bootstrap_' + obsid +
                                     '.cube.int.restored.fits',
                                     thresh_pix=5,
                                     rms_map=True,
                                     atrous_do=True,
                                     atrous_jmax=2,
                                     group_by_isl=True,
                                     rms_box=(80, 20),
                                     adaptive_rms_box=True,
                                     adaptive_thresh=80,
                                     rms_box_bright=(35, 7),
                                     mean_map='zero',
                                     spectralindex_do=True,
                                     specind_maxchan=1,
                                     debug=True,
                                     kappa_clip=3,
                                     flagchan_rms=False,
                                     flagchan_snr=False,
                                     incl_chan=True,
                                     spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',
                              format='ascii',
                              incl_chan='true')
            img.export_image(img_type='rms', img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid + 'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0] + '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra < 0):
                ra += 2 * np.pi
            ra *= 180.0 / np.pi
            dec *= 180.0 / np.pi

            cats = zip(o['catalogues'], o['names'], o['groups'], o['radii'])
            make_catalogue('image_bootstrap_' + obsid +
                           '.cube.int.restored.pybdsm.srl',
                           ra,
                           dec,
                           2.5,
                           cats,
                           outnameprefix=obsid)

        freqlist = open(obsid + 'frequencies.txt', 'w')
        for n, f in zip(o['names'], o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' %
                           (f, n, n))
        for i, f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' %
                           (f, i + 1, i + 1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid + 'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject = -1  # avoid error if we fail somewhere
        if os.path.isfile(obsid + 'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject = find_outliers.run_all(1, name=obsid)

        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
            if nreject == 0:
                shutil.copyfile(obsid + 'crossmatch-results-1.npy',
                                obsid + 'crossmatch-results-2.npy')
        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not (o['dryrun']):
            warn('Applying corrections to MS list')
            scale = np.load(obsid + 'crossmatch-results-2.npy')[:, 0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)

            bigmslist = [s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]

            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy = t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy = None
                t.close()
                if dummy is not None:
                    warn('Table ' + ms +
                         ' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms + '/SPECTRAL_WINDOW',
                                 readonly=True,
                                 ack=False)
                    frq = t[0]['REF_FREQUENCY']
                    factor = spl(frq)
                    print frq, factor
                    t = pt.table(ms, readonly=False)
                    desc = t.getcoldesc(o['colname'])
                    desc['name'] = 'SCALED_DATA'
                    t.addcols(desc)
                    d = t.getcol(o['colname'])
                    d *= factor
                    t.putcol('SCALED_DATA', d)
                    try:
                        dummy = t.getcoldesc(colname)
                    except RuntimeError:
                        dummy = None
                    if dummy is not None:
                        desc = t.getcoldesc(colname)
                        newname = colname + '_SCALED'
                        desc['name'] = newname
                        t.addcols(desc)
                        d = t.getcol(colname)
                        d *= factor
                        t.putcol(newname, d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus = []
        for obsid in Uobsid:
            hdus.append(
                fits.open('image_bootstrap_' + obsid + '.app.restored.fits'))
        for i in range(1, len(Uobsid)):
            hdus[0][0].data += hdus[i][0].data
        hdus[0][0].data /= len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
Exemplo n.º 11
0
def run_bootstrap(o):
    
    colname='DATA_DI_CORRECTED'
    
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn("You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located")
        sys.exit(2)

    o["tgss"]=o["tgss"].replace("$$",os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"]=[l.replace("$$",os.environ["DDF_PIPELINE_CATALOGS"]) for l in o["catalogues"]]
    lCat=o["catalogues"]+[o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist"%fCat)
            sys.exit(2)

    cl=len(o['catalogues'])
    if o['names'] is None:
        o['names']=[os.path.basename(x).replace('.fits','') for x in o['catalogues']]
    if o['radii'] is None:
        o['radii']=[10]*cl
    if o['groups'] is None:
        o['groups']=range(cl)
    if (len(o['frequencies'])!=cl or len(o['radii'])!=cl or
        len(o['names'])!=cl or len(o['groups'])!=cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list')

    low_uvrange=[o['image_uvmin'],2.5*206.0/o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize=o['low_imsize'] # allow over-ride
    else:
        low_imsize=o['imsize']*o['cellsize']/o['low_cell']

    low_robust=o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py',dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m=MSList(o['mslist'])
    Uobsid = set(m.obsids)
    
    for obsid in Uobsid:
        
        warn('Running bootstrap for obsid %s' % obsid)

        freqs=[]
        omslist=[]
        for ms,ob,f in zip(m.mss,m.obsids,m.freqs):
            if ob==obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs)<4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme')

        # sort to work in frequency order

        freqs,omslist = (list(x) for x in zip(*sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f,ms in zip(freqs,omslist):
            print ms,f

        # generate the sorted input mslist
        with open('temp_mslist.txt','w') as f:
            for line in omslist:
                f.write(line+'\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_'+obsid+'_init','temp_mslist.txt',
                  cleanmask=None,cleanmode='SSD',ddsols='DDS0',
                  applysols='P',majorcycles=0,robust=low_robust,
                  uvrange=low_uvrange,beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,cellsize=o['low_cell'],
                  options=o,colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=True)
        external_mask='bootstrap_external_mask.fits'
        make_external_mask(external_mask,'image_bootstrap_'+obsid+'_init.dirty.fits',use_tgss=True,clobber=False,cellsize='low_cell',options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_'+obsid,'temp_mslist.txt',
                  cleanmask=external_mask,reuse_psf=True,reuse_dirty=True,
                  cleanmode='SSD',ddsols='DDS0',applysols='P',
                  majorcycles=5,robust=low_robust,uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],imsize=low_imsize,
                  cellsize=o['low_cell'],options=o,
                  colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img=bdsm.process_image('image_bootstrap_'+obsid+'.cube.int.restored.fits',thresh_pix=5,rms_map=True,atrous_do=True,atrous_jmax=2,group_by_isl=True,rms_box=(80,20), adaptive_rms_box=True, adaptive_thresh=80, rms_box_bright=(35,7),mean_map='zero',spectralindex_do=True,specind_maxchan=1,debug=True,kappa_clip=3,flagchan_rms=False,flagchan_snr=False,incl_chan=True,spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',format='ascii',incl_chan='true')
            img.export_image(img_type='rms',img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid+'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0]+ '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra<0):
                ra+=2*np.pi
            ra*=180.0/np.pi
            dec*=180.0/np.pi

            cats=zip(o['catalogues'],o['names'],o['groups'],o['radii'])
            make_catalogue('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl',ra,dec,2.5,cats,outnameprefix=obsid)
    
        freqlist=open(obsid+'frequencies.txt','w')
        for n,f in zip(o['names'],o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' % (f,n,n))
        for i,f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' % (f,i+1,i+1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid+'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject=-1 # avoid error if we fail somewhere
        if os.path.isfile(obsid+'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject=find_outliers.run_all(1, name=obsid)
    
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
          if nreject==0:
              shutil.copyfile(obsid+'crossmatch-results-1.npy',obsid+'crossmatch-results-2.npy')
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not(o['dryrun']):
            warn('Applying corrections to MS list')
            scale=np.load(obsid+'crossmatch-results-2.npy')[:,0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)
            
            bigmslist=[s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]
            
            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy=t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy=None
                t.close()
                if dummy is not None:
                    warn('Table '+ms+' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
                    frq=t[0]['REF_FREQUENCY']
                    factor=spl(frq)
                    print frq,factor
                    t=pt.table(ms,readonly=False)
                    desc=t.getcoldesc(o['colname'])
                    desc['name']='SCALED_DATA'
                    t.addcols(desc)
                    d=t.getcol(o['colname'])
                    d*=factor
                    t.putcol('SCALED_DATA',d)
                    try:
                        dummy=t.getcoldesc(colname)
                    except RuntimeError:
                        dummy=None
                    if dummy is not None:
                        desc=t.getcoldesc(colname)
                        newname=colname+'_SCALED'
                        desc['name']=newname
                        t.addcols(desc)
                        d=t.getcol(colname)
                        d*=factor
                        t.putcol(newname,d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus=[]
        for obsid in Uobsid:
            hdus.append(fits.open('image_bootstrap_'+obsid+'.app.restored.fits'))
        for i in range(1,len(Uobsid)):
            hdus[0][0].data+=hdus[i][0].data
        hdus[0][0].data/=len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
Exemplo n.º 12
0
from auxcodes import report, warn, die
from surveys_db import use_database, update_status
from download import download_dataset
from download_field import download_field
from unpack import unpack, unpack_db_update
from make_mslists import make_list, list_db_update
import sys
import os

rootdir = '/disks/paradata/shimwell/LoTSS-DR2/ongoing-leiden-runs'
os.chdir(rootdir)

name = sys.argv[1]

if name[0] != 'P' and name[0] != 'L':
    die('This code should be used only with field or observation names',
        database=False)

do_field = (name[0] == 'P')

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if do_field:
    success = download_field(name)
else:
    success = download_dataset('https://lofar-webdav.grid.sara.nl',
                               '/SKSP/' + name + '/')
Exemplo n.º 13
0
        prefix = 'old%i' % count

    print 'prefix is', prefix

    for ms in msnames:
        for s in ['p1', 'ap1']:
            if s in archivelist:
                for file in ['npz', 'parset']:
                    rename(
                        ms + '/killMS.killms_' + s + '.sols.' + file,
                        ms + '/' + prefix + '.killms_' + s + '.sols.' + file)

    if o['full_mslist'] is not None:
        with open(o['full_mslist'], 'r') as f:
            msnames = [l.strip() for l in f.readlines()]
        for ms in msnames:
            for s in ['f_ap1', 'f_ap2']:
                if s in archivelist:
                    for file in ['npz', 'parset']:
                        rename(
                            ms + '/killMS.killms_' + s + '.sols.' + file, ms +
                            '/' + prefix + '.killms_' + s + '.sols.' + file)


if __name__ == '__main__':
    if len(sys.argv) < 2:
        die('This script takes one argument, the name of the config file')

    o = options(sys.argv[1])
    do_archive(o, ['p1', 'ap1', 'f_ap1', 'f_ap2'])
Exemplo n.º 14
0
from auxcodes import report,warn,die
from surveys_db import use_database,update_status
from download import download_dataset
from download_field import download_field
from unpack import unpack,unpack_db_update
from make_mslists import make_list,list_db_update
import sys
import os

rootdir='/disks/paradata/shimwell/LoTSS-DR2/ongoing-leiden-runs'
os.chdir(rootdir)

name=sys.argv[1]

if name[0]!='P' and name[0]!='L':
    die('This code should be used only with field or observation names',database=False)

do_field=(name[0]=='P')

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if do_field:
    success=download_field(name)
else:
    success=download_dataset('https://lofar-webdav.grid.sara.nl','/SKSP/'+name+'/')
Exemplo n.º 15
0
def run_bootstrap(o):
    
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    cl=len(o['catalogues'])
    if o['names'] is None:
        o['names']=[os.path.basename(x).replace('.fits','') for x in o['catalogues']]
    if o['radii'] is None:
        o['radii']=[10]*cl
    if o['groups'] is None:
        o['groups']=range(cl)
    if (len(o['frequencies'])!=cl or len(o['radii'])!=cl or
        len(o['names'])!=cl or len(o['groups'])!=cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list')

    low_robust=-0.25
    low_uvrange=[0.1,25.0]

    # Clear the shared memory
    run('CleanSHM.py',dryrun=o['dryrun'])


    # We use the individual ms in mslist.
    mslist=[s.strip() for s in open(o['mslist']).readlines()]
    
    obsids = [os.path.basename(ms).split('_')[0] for ms in mslist]
    Uobsid = set(obsids)
    
    for obsid in Uobsid:
        
        warn('Running bootstrap for obsid %s' % obsid)

        # Get the frequencies -- need to take this from the MSs
        
        omslist = [ms for ms in mslist if obsid in ms]

        freqs=[]
        for ms in omslist:
            t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
            freqs.append(t[0]['REF_FREQUENCY'])


        # sort to work in frequency order

        freqs,omslist = (list(x) for x in zip(*sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f,m in zip(freqs,omslist):
            print m,f


        # Clean in cube mode
        with open('temp_mslist.txt','w') as f:
            for line in omslist:
                f.write(line+'\n')
        ddf_image('image_bootstrap_'+obsid,'temp_mslist.txt',cleanmode='SSD',ddsols='killms_p1',applysols='P',majorcycles=4,robust=low_robust,uvrange=low_uvrange,beamsize=20,imsize=o['bsimsize'],cellsize=o['bscell'],options=o,colname=o['colname'],automask=True,automask_threshold=15,smooth=True,cubemode=True)

        if os.path.isfile('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img=bdsm.process_image('image_bootstrap_'+obsid+'.cube.int.restored.fits',thresh_pix=5,rms_map=True,atrous_do=True,atrous_jmax=2,group_by_isl=True,rms_box=(80,20), adaptive_rms_box=True, adaptive_thresh=80, rms_box_bright=(35,7),mean_map='zero',spectralindex_do=True,specind_maxchan=1,debug=True,kappa_clip=3,flagchan_rms=False,flagchan_snr=False,incl_chan=True,spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',format='ascii',incl_chan='true')
            img.export_image(img_type='rms',img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid+'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0]+ '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra<0):
                ra+=2*np.pi
            ra*=180.0/np.pi
            dec*=180.0/np.pi

            cats=zip(o['catalogues'],o['names'],o['groups'],o['radii'])
            make_catalogue('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl',ra,dec,2.5,cats,outnameprefix=obsid)
    
        freqlist=open(obsid+'frequencies.txt','w')
        for n,f in zip(o['names'],o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' % (f,n,n))
        for i,f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' % (f,i+1,i+1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid+'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject=-1 # avoid error if we fail somewhere
        if os.path.isfile(obsid+'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject=find_outliers.run_all(1, name=obsid)
    
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
          if nreject==0:
              shutil.copyfile(obsid+'crossmatch-results-1.npy',obsid+'crossmatch-results-2.npy')
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not(o['dryrun']):
            warn('Applying corrections to MS list')
            scale=np.load(obsid+'crossmatch-results-2.npy')[:,0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)
            
            bigmslist=[s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]
            
            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy=t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy=None
                t.close()
                if dummy is not None:
                    warn('Table '+ms+' has already been corrected, skipping')
                else:
                    t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
                    frq=t[0]['REF_FREQUENCY']
                    factor=spl(frq)
                    print frq,factor
                    t=pt.table(ms,readonly=False)
                    desc=t.getcoldesc(o['colname'])
                    desc['name']='SCALED_DATA'
                    t.addcols(desc)
                    d=t.getcol(o['colname'])
                    d*=factor
                    t.putcol('SCALED_DATA',d)
                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus=[]
        for obsid in Uobsid:
            hdus.append(fits.open('image_bootstrap_'+obsid+'.app.restored.fits'))
        for i in range(1,len(Uobsid)):
            hdus[0][0].data+=hdus[i][0].data
        hdus[0][0].data/=len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
Exemplo n.º 16
0
def do_run_pipeline(name,basedir,qsubfile=None,do_field=True):
    '''
    set do_field False for the now obsolete behaviour of downloading
    and imaging a particular observation

    '''
    if qsubfile is None:
        qsubfile='/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'

    workdir=basedir+'/'+name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')

    report('Downloading data')
    if do_field:
        success=download_field(name,basedir=basedir)
    else:
        success=download_dataset('https://lofar-webdav.grid.sara.nl','/SKSP/'+name+'/',basedir=basedir)

    if not success:
        die('Download failed, see earlier errors',database=False)

    report('Unpacking data')
    try:
        unpack(workdir=workdir)
    except RuntimeError:
        if do_field:
            update_status(name,'Unpack failed',workdir=workdir)
        raise
    if do_field:
        update_status(name,'Unpacked',workdir=workdir)

    report('Deleting tar files')
    os.system('rm '+workdir+'/*.tar.gz')
    os.system('rm '+workdir+'/*.tar')

    averaged=False
    report('Checking structure')
    g=glob.glob(workdir+'/*.ms')
    msl=MSList(None,mss=g)
    dysco=np.any(msl.dysco)
    uobsids=set(msl.obsids)
    for thisobs in uobsids:
        # check one MS with each ID
        for m,ch,o,hc in zip(msl.mss,msl.channels,msl.obsids,msl.hascorrected):
            if o==thisobs:
                if not(hc):
                    print('MS',m,'has no corrected_data column, force use of DATA')
                    averaged=True
                channels=len(ch)
                print('MS',m,'has',channels,'channels')
                if channels>20:
                    update_status(name,'Averaging',workdir=workdir)
                    print('Averaging needed for',thisobs,'!')
                    averaged=True
                    average(wildcard=workdir+'/*'+thisobs+'*')
                    os.system('rm -r '+workdir+'/*'+thisobs+'*pre-cal.ms')
                break
    
    report('Making ms lists')
    success=make_list(workdir=workdir)
    if do_field:
        list_db_update(success,workdir=workdir)
    if not success:
        die('make_list could not construct the MS list',database=False)
        
    report('Creating custom config file from template')
    make_custom_config(name,workdir,do_field,averaged)
    
    # now run the job
    do_run_job(name,basedir=basedir,qsubfile=None,do_field=do_field,dysco=dysco)