Example #1
0
def make_mask(imagename,
              thresh,
              verbose=False,
              options=None,
              external_mask=None,
              catcher=None):
    if catcher: catcher.check()

    # mask_use specifies a mask file to use
    if options is None:
        options = o  # attempt to get global

    fname = imagename + '.mask.fits'
    runcommand = "MakeMask.py --RestoredIm=%s --Th=%s --Box=50,2" % (imagename,
                                                                     thresh)
    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping MakeMask step')
        if verbose:
            print 'Would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('MM-' + imagename + '.log', options=options),
            quiet=options['quiet'])
        if external_mask is not None:
            merge_mask(fname, external_mask, fname)
Example #2
0
def make_list(workdir='.',force=False):
    g=sorted(glob.glob(workdir+'/*.ms'))
    full_mslist=[]
    start_times=[]
    for ms in g:
        ff=check_flagged(ms)
        t0,t1=get_timerange(ms)
        print(ms,ff)
        if ff<0.8:
            full_mslist.append(os.path.basename(ms))
            start_times.append(t0)
    full_mslist = np.array(full_mslist)
            
    # check for multiple observations
    Ustart_times = np.unique(start_times)

    if len(full_mslist)<18:
        warn('Too few MS found for normal running: only %i' % len(full_mslist))
        if not force: return False

    if len(full_mslist)<24:
        warn('Warning -- only %i ms found' % len(full_mslist))
        
    # ensure lists contain all ms from all observations and same subset for each observation
    write_full_mslist = np.array(())
    write_mslist = np.array(())
    for start_time in Ustart_times:        
        write_full_mslist = np.hstack((write_full_mslist,full_mslist[start_times==start_time]))
        write_mslist = np.hstack((write_mslist,full_mslist[start_times==start_time][2::4]))

    open(workdir+'/big-mslist.txt','w').writelines(ms+'\n' for ms in write_full_mslist)
    open(workdir+'/mslist.txt','w').writelines(ms+'\n' for ms in write_mslist)
    return True
def filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o

    if options['restart'] and os.path.isfile(outname):
        warn('File ' + outname +' already exists, skipping source filtering step')
    else:

        matchedcat = Table.read(matchedcat)
        singlecat = Table.read(singlecat)

        fitsimage = fits.open(fitsimage)

        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()

        print('Originally',len(matchedcat),'sources')
        matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0)

        print('%i sources after filtering for 3.0 deg from centre' % len(matchedcat))

        matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR!

        print('%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat))

        # not implemented yet!
        #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten()
        #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname)

        matchedcat=select_isolated_sources(matchedcat,30.0)
        print('%i sources after filtering for isolated sources in LOFAR' % len(matchedcat))

        matchedcat.write(outname)
def filter_catalog(singlecat,matchedcat,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o

    if options['restart'] and os.path.isfile(outname):
        warn('File ' + outname +' already exists, skipping source filtering step')
    else:

        matchedcat = Table.read(matchedcat)
        singlecat = Table.read(singlecat)

        fitsimage = fits.open(fitsimage)

        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()

        print 'Originally',len(matchedcat),'sources'
        matchedcat=filter_catalogue(matchedcat,fieldra,fielddec,3.0)

        print '%i sources after filtering for 3.0 deg from centre' % len(matchedcat)

        matchedcat=matchedcat[matchedcat['DC_Maj']<10.0] # ERROR!

        print '%i sources after filtering for sources over 10arcsec in LOFAR' % len(matchedcat)

        # not implemented yet!
        #tooextendedsources_aux = np.array(np.where(matchedcat[1].data[options['%s_match_majkey2'%auxcatname]] > options['%s_filtersize'%auxcatname])).flatten()
        #print '%s out of %s sources filtered out as over %sarcsec in %s'%(np.size(tooextendedsources_aux),len(allsources),options['%s_filtersize'%auxcatname],auxcatname)

        matchedcat=select_isolated_sources(matchedcat,30.0)
        print '%i sources after filtering for isolated sources in LOFAR' % len(matchedcat)

        matchedcat.write(outname)
Example #5
0
def make_list(workdir='.'):
    g=sorted(glob.glob(workdir+'/*.ms'))
    full_mslist=[]
    start_times=[]
    for ms in g:
        ff=check_flagged(ms)
        t0,t1=get_timerange(ms)
        print ms,ff
        if ff<0.8:
            full_mslist.append(os.path.basename(ms))
            start_times.append(t0)
    full_mslist = np.array(full_mslist)
            
    # check for multiple observations
    Ustart_times = np.unique(start_times)

    if len(full_mslist)<18:
        warn('Too few MS found for normal running: only %i' % len(full_mslist))
        return False

    if len(full_mslist)<24:
        warn('Warning -- only %i ms found' % len(full_mslist))
        
    # ensure lists contain all ms from all observations and same subset for each observation
    write_full_mslist = np.array(())
    write_mslist = np.array(())
    for start_time in Ustart_times:        
        write_full_mslist = np.hstack((write_full_mslist,full_mslist[start_times==start_time]))
        write_mslist = np.hstack((write_mslist,full_mslist[start_times==start_time][2::4]))

    open(workdir+'/big-mslist.txt','w').writelines(ms+'\n' for ms in write_full_mslist)
    open(workdir+'/mslist.txt','w').writelines(ms+'\n' for ms in write_mslist)
    return True
Example #6
0
def make_external_mask(fname,
                       templatename,
                       use_tgss=True,
                       options=None,
                       extended_use=None,
                       clobber=False):
    if options is None:
        options = o  # attempt to get global
    if options['restart'] and os.path.isfile(fname) and not clobber:
        warn('External mask already exists, not creating it')
    else:
        report('Make blank external mask')
        hdus = fits.open(templatename)
        hdus[0].data = np.zeros_like(hdus[0].data, dtype=np.int32)
        hdus.writeto(fname, clobber=True)
        hdus.close()
        if use_tgss and options['tgss'] is not None:
            report('Merging the mask with TGSS catalogue')
            # TGSS path is provided, this means we want to add the positions of bright TGSS sources to the mask
            modify_mask(fname,
                        fname,
                        options['tgss'],
                        options['tgss_radius'],
                        options['tgss_flux'],
                        do_extended=options['tgss_extended'],
                        cellsize=options['cellsize'],
                        pointsize=options['tgss_pointlike'])

        if options['region'] is not None:
            report('Merging with mask with user-specified region')
            add_manual_mask(fname, options['region'], fname)

        if options['extended_size'] is not None and extended_use is not None:
            report('Merging with automatic extended mask')
            merge_mask(fname, extended_use, fname)
Example #7
0
def killms_data(imagename,
                mslist,
                outsols,
                clusterfile=None,
                colname='CORRECTED_DATA',
                niterkf=6,
                dicomodel=None,
                uvrange=None,
                wtuv=None,
                robust=None,
                catcher=None,
                options=None):

    if options is None:
        options = o  # attempt to get global if it exists

    cache_dir = find_cache_dir(options)

    # run killms individually on each MS -- allows restart if it failed in the middle
    filenames = [l.strip() for l in open(mslist, 'r').readlines()]
    for f in filenames:
        if catcher: catcher.check()
        checkname = f + '/killMS.' + outsols + '.sols.npz'
        if options['restart'] and os.path.isfile(checkname):
            warn('Solutions file ' + checkname +
                 ' already exists, not running killMS step')
        else:
            runcommand = "killMS.py --MSName %s --SolverType KAFCA --PolMode Scalar --BaseImageName %s --dt %f --BeamMode LOFAR --LOFARBeamMode=A --NIterKF %i --CovQ 0.1 --LambdaKF=%f --NCPU %i --OutSolsName %s --NChanSols %i --PowerSmooth=%f --InCol %s --DDFCacheDir=%s" % (
                f, imagename, options['dt'], niterkf, options['LambdaKF'],
                options['NCPU_killms'], outsols, options['NChanSols'],
                options['PowerSmooth'], colname, cache_dir)
            if robust is None:
                runcommand += ' --Weighting Natural'
            else:
                runcommand += ' --Weighting Briggs --Robust=%f' % robust
            if uvrange is not None:
                if wtuv is not None:
                    runcommand += ' --WTUV=%f --WeightUVMinMax=%f,%f' % (
                        wtuv, uvrange[0], uvrange[1])
                else:
                    runcommand += ' --UVMinMax=%f,%f' % (uvrange[0],
                                                         uvrange[1])
            if clusterfile is not None:
                runcommand += ' --NodesFile ' + clusterfile
            if dicomodel is not None:
                runcommand += ' --DicoModel ' + dicomodel
            if options['nobar']:
                runcommand += ' --DoBar=0'

            rootfilename = outsols.split('/')[-1]
            f = f.replace("/", "_")
            run(runcommand,
                dryrun=options['dryrun'],
                log=logfilename('KillMS-' + f + '_' + rootfilename + '.log',
                                options=options),
                quiet=options['quiet'])
Example #8
0
def update_frequencies(rootname, freq):
    g = glob.glob(rootname + '*.fits')
    for f in g:
        with fits.open(f) as hdu:
            if 'CRVAL4' in hdu[0].header:
                warn('Updating FITS header for %s to freq of %f' % (f, freq))
                hdu[0].header['CRVAL4'] = freq
                hdu[0].header['RESTFRQ'] = freq
                hdu.writeto(f, overwrite=True)
            del hdu[0].data
Example #9
0
def do_plot_facet_offsets(t, regfile, savefig=None):
    ''' convenience function to plot offsets '''
    if savefig is not None and os.path.isfile(savefig):
        warn('Figure file %s exists, not re-making it' % savefig)
    else:
        cra, cdec = get_centpos()
        r = RegPoly(regfile, cra, cdec)
        if isinstance(t, str):
            t = Table.read(t)
        if 'Facet' not in t.columns:
            r.add_facet_labels(t)
        plot_offsets(t, r.clist, 'red')
        if savefig is not None:
            plt.savefig(savefig)
def do_plot_facet_offsets(t,regfile,savefig=None):
    ''' convenience function to plot offsets '''
    if savefig is not None and os.path.isfile(savefig):
        warn('Figure file %s exists, not re-making it' % savefig)
    else:
        cra,cdec=get_centpos()
        r=RegPoly(regfile,cra,cdec)
        if isinstance(t,str):
            t=Table.read(t)
        if 'Facet' not in t.columns:
            r.add_facet_labels(t)
        plot_offsets(t,r.clist,'red')
        if savefig is not None:
            plt.savefig(savefig)
Example #11
0
def crossmatch_image(lofarcat, auxcatname, options=None):
    if options is None:
        options = o
    auxcat = options[auxcatname]
    if options['restart'] and os.path.isfile(lofarcat + '_' + auxcatname +
                                             '_match.fits'):
        warn('File ' + lofarcat + '_' + auxcatname +
             '_match.fits already exists, skipping source matching step')
    else:
        t = Table.read(lofarcat)
        tab = Table.read(auxcat)
        match_catalogues(t, tab, o[auxcatname + '_matchrad'], auxcatname)
        t = t[~np.isnan(t[auxcatname + '_separation'])]
        t.write(lofarcat + '_' + auxcatname + '_match.fits')
Example #12
0
def mask_dicomodel(indico, maskname, outdico, catcher=None):
    if catcher: catcher.check()

    if o['restart'] and os.path.isfile(outdico):
        warn('File ' + outdico +
             ' already exists, skipping MaskDicoModel step')
        return False
    else:
        runcommand = "MaskDicoModel.py --MaskName=%s --InDicoModel=%s --OutDicoModel=%s" % (
            maskname, indico, outdico)
        run(runcommand,
            dryrun=o['dryrun'],
            log=logfilename('MaskDicoModel-' + maskname + '.log'),
            quiet=o['quiet'])
        return True
Example #13
0
def make_model(maskname, imagename, catcher=None):
    # returns True if the step was run, False if skipped
    if catcher: catcher.check()

    fname = imagename + '.npy'
    if o['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping MakeModel step')
        return False
    else:
        runcommand = "MakeModel.py --MaskName=%s --BaseImageName=%s --NCluster=%i --DoPlot=0" % (
            maskname, imagename, o['ndir'])
        run(runcommand,
            dryrun=o['dryrun'],
            log=logfilename('MakeModel-' + maskname + '.log'),
            quiet=o['quiet'])
        return True
Example #14
0
def smooth_solutions(mslist, ddsols, interval, catcher=None):
    outsols = ddsols + '.Smooth'
    filenames = [l.strip() for l in open(mslist, 'r').readlines()]
    for f in filenames:
        if catcher: catcher.check()
        checkname = f + '/killMS.' + outsols + '.sols.npz'
        if o['restart'] and os.path.isfile(checkname):
            warn('Solutions file ' + checkname +
                 ' already exists, not running smoothing step')
        else:
            smoothsols.main(options=dotdict({
                'MSName': f,
                'Order': 2,
                'Plot': False,
                'SolsFile': ddsols,
                'WSize': interval
            }))
    return outsols
def crossmatch_image(lofarcat,auxcatname,options=None,catdir='.'):

    if options is None:
        options = o
    auxcat = options[auxcatname]
    crossmatchname=lofarcat + '_' + auxcatname + '_match.fits'
    if options['restart'] and os.path.isfile(crossmatchname):
        warn('File ' + crossmatchname+ ' already exists, skipping source matching step')
        t=Table.read(crossmatchname)
        matches=len(t)
        del(t)
    else:
        t=Table.read(lofarcat)
        tab=Table.read(catdir+'/'+auxcat)
        matches=match_catalogues(t,tab,o[auxcatname+'_matchrad'],auxcatname)
        t=t[~np.isnan(t[auxcatname+'_separation'])]
        t.write(lofarcat+'_'+auxcatname+'_match.fits')
    return matches
def crossmatch_image(lofarcat,auxcatname,options=None,catdir='.'):

    if options is None:
        options = o
    auxcat = options[auxcatname]
    crossmatchname=lofarcat + '_' + auxcatname + '_match.fits'
    if options['restart'] and os.path.isfile(crossmatchname):
        warn('File ' + crossmatchname+ ' already exists, skipping source matching step')
        t=Table.read(crossmatchname)
        matches=len(t)
        del(t)
    else:
        t=Table.read(lofarcat)
        tab=Table.read(catdir+'/'+auxcat)
        matches=match_catalogues(t,tab,o[auxcatname+'_matchrad'],auxcatname)
        t=t[~np.isnan(t[auxcatname+'_separation'])]
        t.write(lofarcat+'_'+auxcatname+'_match.fits')
    return matches
Example #17
0
def check_imaging_weight(mslist_name):

    # returns a boolean that says whether it did something
    result = False
    report('Checking for IMAGING_WEIGHT in input MSS')
    mslist = [s.strip() for s in open(mslist_name).readlines()]
    for ms in mslist:
        t = pt.table(ms)
        try:
            dummy = t.getcoldesc('IMAGING_WEIGHT')
        except RuntimeError:
            dummy = None
        t.close()
        if dummy is not None:
            warn('Table ' + ms + ' already has imaging weights')
        else:
            pt.addImagingColumns(ms)
            result = True
    return result
Example #18
0
def ddf_shift(imagename, shiftfile, catcher=None, options=None, verbose=False):
    if catcher: catcher.check()
    if options is None:
        options = o  # attempt to get global if it exists

    cache_dir = find_cache_dir(options)

    runcommand = 'DDF.py ' + imagename + '.parset --Output-Name=' + imagename + '_shift --Image-Mode=RestoreAndShift --Output-ShiftFacetsFile=' + shiftfile + ' --Predict-InitDicoModel ' + imagename + '.DicoModel --Cache-SmoothBeam=force --Cache-Dir=' + cache_dir

    fname = imagename + '_shift.app.facetRestored.fits'
    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping DDF-shift step')
        if verbose:
            print 'would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('DDF-' + imagename + '_shift.log',
                            options=options),
            quiet=options['quiet'])
def sfind_image(catprefix,pbimage,nonpbimage,sfind_pixel_fraction,options=None):

    if options is None:
        options = o
    f = fits.open(nonpbimage)
    imsizex = f[0].header['NAXIS1']
    imsizey = f[0].header['NAXIS2']
    f.close()
    kwargs={}
    if o['sfind_pixel_fraction']<1.0:
        lowerx,upperx = int(((1.0-sfind_pixel_fraction)/2.0)*imsizex),int(((1.0-sfind_pixel_fraction)/2.0)*imsizex + sfind_pixel_fraction*imsizex)
        lowery,uppery = int(((1.0-sfind_pixel_fraction)/2.0)*imsizey),int(((1.0-sfind_pixel_fraction)/2.0)*imsizey + sfind_pixel_fraction*imsizey)
        kwargs['trim_box']=(lowerx,upperx,lowery,uppery)

    if options['restart'] and os.path.isfile(catprefix +'.cat.fits'):
        warn('File ' + catprefix +'.cat.fits already exists, skipping source finding step')
    else:
        img = bdsm.process_image(pbimage, detection_image=nonpbimage, thresh_isl=4.0, thresh_pix=5.0, rms_box=(160,50), rms_map=True, mean_map='zero', ini_method='intensity', adaptive_rms_box=True, adaptive_thresh=150, rms_box_bright=(60,15), group_by_isl=False, group_tol=10.0,output_opts=True, output_all=True, atrous_do=True,atrous_jmax=4, flagging_opts=True, flag_maxsize_fwhm=0.5,advanced_opts=True, blank_limit=None,**kwargs)
        img.write_catalog(outfile=catprefix +'.cat.fits',catalog_type='srl',format='fits',correct_proj='True')
        img.export_image(outfile=catprefix +'.rms.fits',img_type='rms',img_format='fits',clobber=True)
        img.export_image(outfile=catprefix +'.resid.fits',img_type='gaus_resid',img_format='fits',clobber=True)
        img.export_image(outfile=catprefix +'.pybdsmmask.fits',img_type='island_mask',img_format='fits',clobber=True)
        img.write_catalog(outfile=catprefix +'.cat.reg',catalog_type='srl',format='ds9',correct_proj='True')
def sfind_image(catprefix,pbimage,nonpbimage,sfind_pixel_fraction,options=None):

    if options is None:
        options = o
    f = fits.open(nonpbimage)
    imsizex = f[0].header['NAXIS1']
    imsizey = f[0].header['NAXIS2']
    f.close()
    kwargs={}
    if options['sfind_pixel_fraction']<1.0:
        lowerx,upperx = int(((1.0-sfind_pixel_fraction)/2.0)*imsizex),int(((1.0-sfind_pixel_fraction)/2.0)*imsizex + sfind_pixel_fraction*imsizex)
        lowery,uppery = int(((1.0-sfind_pixel_fraction)/2.0)*imsizey),int(((1.0-sfind_pixel_fraction)/2.0)*imsizey + sfind_pixel_fraction*imsizey)
        kwargs['trim_box']=(lowerx,upperx,lowery,uppery)

    if options['restart'] and os.path.isfile(catprefix +'.cat.fits'):
        warn('File ' + catprefix +'.cat.fits already exists, skipping source finding step')
    else:
        img = bdsm.process_image(pbimage, detection_image=nonpbimage, thresh_isl=4.0, thresh_pix=5.0, rms_box=(160,50), rms_map=True, mean_map='zero', ini_method='intensity', adaptive_rms_box=True, adaptive_thresh=150, rms_box_bright=(60,15), group_by_isl=False, group_tol=10.0,output_opts=True, output_all=True, atrous_do=True,atrous_jmax=4, flagging_opts=True, flag_maxsize_fwhm=0.5,advanced_opts=True, ncores=options['NCPU'], blank_limit=None,**kwargs)
        img.write_catalog(outfile=catprefix +'.cat.fits',catalog_type='srl',format='fits',correct_proj='True')
        img.export_image(outfile=catprefix +'.rms.fits',img_type='rms',img_format='fits',clobber=True)
        img.export_image(outfile=catprefix +'.resid.fits',img_type='gaus_resid',img_format='fits',clobber=True)
        img.export_image(outfile=catprefix +'.pybdsmmask.fits',img_type='island_mask',img_format='fits',clobber=True)
        img.write_catalog(outfile=catprefix +'.cat.reg',catalog_type='srl',format='ds9',correct_proj='True')
def run_reprocess(wd=None):
    # by default assume we're in the working directory at this point
    if wd is not None:
        os.chdir(wd)
    update_status(None,'Running')
    solsfile = glob.glob('DDS3_full*smoothed.npz')
    if len(solsfile) < 1:
        die('Cannot find the correct solution file -- exiting')
    solsfile = str(solsfile[0])
    o = options('reprocess-vlow.cfg',option_list)
    cubefiles=['image_full_vlow_QU.cube.dirty.fits','image_full_vlow_QU.cube.dirty.corr.fits']
    cthreads=[]
    flist=[]
    ddf_kw = {}
    do_polcubes('DATA','[DDS3_full_smoothed,DDS3_full_slow]',[o['image_uvmin'],1.600000],'image_full_vlow',ddf_kw,beamsize=o['vlow_psf_arcsec'],imsize=o['vlow_imsize'],cellsize=o['vlow_cell'],robust=o['vlow_robust'],options=o,catcher=None)
    if o['compress_polcubes']:
        for cubefile in cubefiles:
            if o['restart'] and os.path.isfile(cubefile+'.fz'):
                warn('Compressed cube file '+cubefile+'.fz already exists, not starting compression thread')
            else:
                report('Starting compression thread for '+cubefile)
                thread = threading.Thread(target=compress_fits, args=(cubefile,o['fpack_q']))
                thread.start()
                cthreads.append(thread)
                flist.append(cubefile)
    if o['compress_polcubes']:
        # cthreads and flist exist
        for thread in cthreads:
            if thread.isAlive():
                warn('Waiting for a compression thread to finish')
                thread.join()
        if o['delete_compressed']:
            for f in flist:
                warn('Deleting compressed file %s' % f)
                os.remove(f)
    update_status(None,'Complete')
Example #22
0
def do_run_pipeline(name, basedir):

    if name[0] != 'P' and name[0] != 'L':
        die('This code should be used only with field or observation names',
            database=False)

    do_field = (name[0] == 'P')

    try:
        qsubfile = sys.argv[2]
    except:
        qsubfile = '/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'

    workdir = basedir + '/' + name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')

    report('Downloading data')
    if do_field:
        success = download_field(name, basedir=basedir)
    else:
        success = download_dataset('https://lofar-webdav.grid.sara.nl',
                                   '/SKSP/' + name + '/',
                                   basedir=basedir)

    if not success:
        die('Download failed, see earlier errors', database=False)

    report('Unpacking data')
    try:
        unpack(workdir=workdir)
    except RuntimeError:
        if do_field:
            update_status(name, 'List failed', workdir=workdir)
        raise
    if do_field:
        update_status(name, 'Unpacked', workdir=workdir)

    report('Deleting tar files')
    os.system('rm ' + workdir + '/*.tar.gz')
    os.system('rm ' + workdir + '/*.tar')

    averaged = False
    report('Checking structure')
    g = glob.glob(workdir + '/*.ms')
    msl = MSList(None, mss=g)
    uobsids = set(msl.obsids)
    for thisobs in uobsids:
        # check one MS with each ID
        for m, ch, o in zip(msl.mss, msl.channels, msl.obsids):
            if o == thisobs:
                channels = len(ch)
                print 'MS', m, 'has', channels, 'channels'
                if channels > 20:
                    update_status(name, 'Averaging', workdir=workdir)
                    print 'Averaging needed for', thisobs, '!'
                    averaged = True
                    average(wildcard=workdir + '/*' + thisobs + '*')
                    os.system('rm -r ' + workdir + '/*' + thisobs +
                              '*pre-cal.ms')
                break

    report('Making ms lists')
    success = make_list(workdir=workdir)
    if do_field:
        list_db_update(success, workdir=workdir)
    if not success:
        die('make_list could not construct the MS list', database=False)

    report('Creating custom config file from template')
    make_custom_config(name, workdir, do_field, averaged)

    # now run the job
    do_run_job(name, basedir=basedir, qsubfile=None, do_field=do_field)
Example #23
0
        cra, cdec = get_centpos()
        r = RegPoly(regfile, cra, cdec)
        if isinstance(t, str):
            t = Table.read(t)
        if 'Facet' not in t.columns:
            r.add_facet_labels(t)
        plot_offsets(t, r.clist, 'red')
        if savefig is not None:
            plt.savefig(savefig)


if __name__ == '__main__':
    # Main loop
    if len(sys.argv) < 2:
        warn(
            'quality_pipeline.py must be called with at least one parameter file\nor a command-line option list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.'
        )
        print_options(option_list)
        sys.exit(1)

    o = options(sys.argv[1:], option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')
    if o['nonpbimage'] is None:
        die('nonpbimage must be specified')
    if o['list'] is not None:
        # fix up the new list-type options
        for i, cat in enumerate(o['list']):
            try:
                o[cat] = o['filenames'][i]
            except:
def plot_position_offset(catalog,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o
    if os.path.isfile(outname):
        warn('Plot file %s exists, not making it' % outname)
    else:
        scat = Table.read(catalog)

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()

        x = scat[auxcatname+'_dRA']
        y = scat[auxcatname+'_dDEC']

        nullfmt = NullFormatter()         # no labels

        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.65
        bottom_h = left_h = left + width + 0.02

        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]

        # start with a rectangular Figure
        plt.figure(1, figsize=(8, 8))

        axScatter = plt.axes(rect_scatter)
        plt.xlabel('$RA_{\\rm LOFAR} - RA_{\\rm %s}$ (arcsec)' % auxcatname)
        plt.ylabel('$DEC_{\\rm LOFAR} - DEC_{\\rm %s}$ (arcsec)' % auxcatname)
        plt.xticks(rotation=270)
        axScatter.plot(np.zeros(40),np.arange(-20,20),'k--')
        axScatter.plot(np.arange(-20,20),np.zeros(40),'k--')
        axHistx = plt.axes(rect_histx)
        axHisty = plt.axes(rect_histy)
        plt.xticks(rotation=270)

        # no labels
        axHistx.xaxis.set_major_formatter(nullfmt)
        axHisty.yaxis.set_major_formatter(nullfmt)

        # the scatter plot:
        axScatter.scatter(x, y,marker='+',s=2,alpha=0.3)

        binwidth = 0.25
        xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
        lim = (int(xymax/binwidth) + 1) * binwidth

        axScatter.set_xlim((-lim, lim))
        axScatter.set_ylim((-lim, lim))

        bins = np.arange(-lim, lim + binwidth, binwidth)
        axHistx.hist(x, bins=bins)
        axHisty.hist(y, bins=bins, orientation='horizontal')

        axHistx.set_xlim(axScatter.get_xlim())
        axHisty.set_ylim(axScatter.get_ylim())

        axScatter.add_artist(Ellipse((np.median(x),np.median(y)),np.std(x),np.std(y),angle=0,linewidth=2,fill=False,color='k',zorder=10000))

        plt.savefig(outname)
        plt.close('all')
        plt.cla()
        plt.clf()
def plot_flux_errors(catalog,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o
    if os.path.isfile(outname.replace('.png','_integrated.png')):
        warn('Plot file %s exists, not making it' % outname)
    else:

        scat = Table.read(catalog)

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()
        radialseps = sepn(scat['RA']*deg2rad,scat['DEC']*deg2rad,fieldra*deg2rad,fielddec*deg2rad)*rad2deg

        plt.plot(np.sort(scat['Total_flux']),np.sort(scat['Total_flux'])*np.median(np.array(scat[auxcatname+'_Total_flux']/options[auxcatname+'_fluxfactor'])/np.array(scat['Total_flux'])),'r--')
        plt.plot(scat['Total_flux'],scat[auxcatname+'_Total_flux']/options[auxcatname+'_fluxfactor'],'ko')
        plt.xlabel('Integrated LOFAR flux (Jy)')
        plt.ylabel('Integrated '+auxcatname+' flux (Jy)')
        plt.xlim(xmin=0.01,xmax=0.5)
        plt.ylim(ymin=0.01,ymax=0.5)
        plt.semilogx()
        plt.semilogy()
        equality = np.arange(0,10,0.01)
        plt.plot(equality,equality,'k-')
        plt.savefig(outname.replace('.png','_integrated.png'))
        plt.close('all')
        plt.cla()
        plt.clf()

        plt.plot(np.sort(scat['Peak_flux']),np.sort(scat['Peak_flux'])*np.median(np.array(scat[auxcatname+'_Peak_flux']/options['%s_fluxfactor'%auxcatname])/np.array(scat['Peak_flux'])),'r--')
        plt.plot(scat['Peak_flux'],scat[auxcatname+'_Peak_flux']/options[auxcatname+'_fluxfactor'],'ko')
        plt.xlabel('Peak LOFAR flux (Jy)')
        plt.ylabel('Peak '+auxcatname+' flux (Jy)')
        plt.xlim(xmin=0.01,xmax=0.5)
        plt.ylim(ymin=0.01,ymax=0.5)
        plt.semilogx()
        plt.semilogy()
        equality = np.arange(0,10,0.01)
        plt.plot(equality,equality,'k-')
        plt.savefig(outname.replace('.png','_peak.png'))
        plt.close('all')
        plt.cla()
        plt.clf()

        plt.plot(radialseps,scat['Total_flux']/(scat[auxcatname+'_Total_flux']/options[auxcatname+'_fluxfactor']),'bo',alpha=0.4,markersize=3)
        equality = np.arange(-0.01,10,0.01)
        fractionrange = np.arange(0.99,1.01,0.002)
        for i in fractionrange:
            plt.plot(equality,1.0*i+0*equality,'k-')
        plt.plot(equality,1.0+0*equality,'k-')
        plt.xlabel('Distance from pointing centre (deg)')
        plt.ylabel('Integrated LOFAR flux / Integrated '+auxcatname+' flux')

        plt.xlim(xmin=0.0,xmax=2)
        plt.ylim(ymin=0.5,ymax=2.0)    

        distancerange = np.arange(0,np.max(radialseps)+0.15,0.15)

        for i in range(0,len(distancerange)-1):
            distancemin = distancerange[i]
            distancemax = distancerange[i+1]
            binvals = np.array([])
            for j in range(0,len(radialseps)):
                if distancemin < radialseps[j] < distancemax:
                    binvals = np.append(binvals,scat['Total_flux'][j]/(scat[auxcatname+'_Total_flux'][j]/options[auxcatname+'_fluxfactor']))
            midpoint = (distancemin+distancemax)/2.0
            if len(binvals) > 0:
                booterrl,booterrh = bootstrap(binvals, 100000, np.mean, 0.05)
                booterrl,booterrh = bootstrap(binvals, 100000, np.median, 0.05)
    #            print booterrl,booterrh, binvals
                plt.errorbar(midpoint,np.median(binvals),xerr=(distancemin-distancemax)/2.0,yerr=[[np.median(binvals)-booterrl],[booterrh-np.median(binvals)]],fmt='--o',ecolor='b',color='b',zorder=999999)
        plt.savefig(outname.replace('.png','_total_radial.png'))
        plt.close('all')
        plt.cla()
        plt.clf()
Example #26
0
    return outsols


def full_clearcache(o):
    clearcache(o['mslist'], o)
    clearcache('temp_mslist.txt', o)
    if o['full_mslist'] is not None:
        clearcache(o['full_mslist'], o)


if __name__ == '__main__':
    # Main loop
    report('Welcome to ddf-pipeline, version ' + __version__)
    if len(sys.argv) < 2:
        warn(
            'pipeline.py must be called with at least one parameter file or a command-line\noption list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.'
        )
        print_options(option_list)
        sys.exit(1)

    o = options(sys.argv[1:], option_list)

    if o['catch_signal']:
        catcher = Catcher()
    else:
        catcher = None

    uvrange = [o['image_uvmin'], o['uvmax']]
    killms_uvrange = [0, 1000]
    if o['solutions_uvmin'] is not None:
        killms_uvrange[0] = o['solutions_uvmin']
def plot_flux_ratios(catalog,fitsimage,outname,options=None):
    if options is None:
        options = o

    if os.path.isfile(outname):
        warn('Plot file %s exists, not making it' % outname)
    else:
        scat = Table.read(catalog)
        fluxratios = scat['Total_flux']/scat['Peak_flux']

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()
        radialseps = sepn(scat['RA']*deg2rad,scat['DEC']*deg2rad,fieldra*deg2rad,fielddec*deg2rad)*rad2deg

        nullfmt = NullFormatter()         # no labels

        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.8
        bottom_h = left_h = left + width + 0.02

        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]

        # start with a rectangular Figure
        plt.figure(1, figsize=(8, 8))
        axScatter = plt.axes(rect_scatter)
        plt.xlabel('Distance from pointing centre (deg)')
        plt.ylabel('Integrated flux / Peak flux')
        plt.xticks(rotation=270)
        axScatter.plot([0,3.0],[1.0,1.0],'k-')

        smids = []
        svals = []
        distancerange = np.arange(0,max(radialseps),0.3)
        for i in range(0,len(distancerange)-1):
            distancemin = distancerange[i]
            distancemax = distancerange[i+1]
            binvals = np.array([])
            for j in range(0,len(radialseps)):
                if distancemin < radialseps[j] < distancemax:
                    binvals = np.append(binvals,fluxratios[j])
            midpoint = (distancemin+distancemax)/2.0
            print i,binvals
            #bsmear = bandwidth_smearing2(6*arcsec2deg,150E6,midpoint,4*48.8E3)
            #tsmear = time_smearing2(16.0,midpoint,6*arcsec2deg)
            #smids.append(midpoint)
            #svals.append(1.0/(bsmear*tsmear))
            #booterrl,booterrh = bootstrap(binvals, 100000, np.mean, 0.05)
            if len(binvals)>0:
                booterrl,booterrh = bootstrap(binvals, 100000, np.median, 0.05)
                axScatter.errorbar(midpoint,np.median(binvals),xerr=(distancemin-distancemax)/2.0,yerr=[[np.median(binvals)-booterrl],[booterrh-np.median(binvals)]],fmt='--o',ecolor='b',color='b',zorder=999999)

        #axScatter.plot(smids,svals,'k--')

        axHisty = plt.axes(rect_histy)
        plt.xticks(rotation=270)

        # no labels
        axHisty.yaxis.set_major_formatter(nullfmt)

        # the scatter plot:

        axScatter.scatter(radialseps,fluxratios,color='blue',marker='+',s=10,alpha=0.5)

        binwidth = 0.05

        axScatter.set_xlim((0, 3.0))
        axScatter.set_ylim((0.8, 2.5))

        bins = np.arange(0.8, 3, binwidth)
        axHisty.hist(fluxratios, bins=bins, orientation='horizontal',color='blue',alpha=0.5)
        axHisty.set_ylim(axScatter.get_ylim())
        plt.savefig(outname)
        plt.close('all')
        plt.cla()
        plt.clf()
Example #28
0
def plot_flux_ratios(catalog,fitsimage,outname,options=None):
    if options is None:
        options = o

    if os.path.isfile(outname):
        warn('Plot file %s exists, not making it' % outname)
    else:
        scat = Table.read(catalog)
        fluxratios = old_div(scat['Total_flux'],scat['Peak_flux'])

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()
        radialseps = sepn(scat['RA']*deg2rad,scat['DEC']*deg2rad,fieldra*deg2rad,fielddec*deg2rad)*rad2deg

        nullfmt = NullFormatter()         # no labels

        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.8
        bottom_h = left_h = left + width + 0.02

        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]

        # start with a rectangular Figure
        plt.figure(1, figsize=(8, 8))
        axScatter = plt.axes(rect_scatter)
        plt.xlabel('Distance from pointing centre (deg)')
        plt.ylabel('Integrated flux / Peak flux')
        plt.xticks(rotation=270)
        axScatter.plot([0,3.0],[1.0,1.0],'k-')

        smids = []
        svals = []
        distancerange = np.arange(0,max(radialseps),0.3)
        for i in range(0,len(distancerange)-1):
            distancemin = distancerange[i]
            distancemax = distancerange[i+1]
            binvals = np.array([])
            for j in range(0,len(radialseps)):
                if distancemin < radialseps[j] < distancemax:
                    binvals = np.append(binvals,fluxratios[j])
            midpoint = (distancemin+distancemax)/2.0
            #print i,binvals
            #bsmear = bandwidth_smearing2(6*arcsec2deg,150E6,midpoint,4*48.8E3)
            #tsmear = time_smearing2(16.0,midpoint,6*arcsec2deg)
            #smids.append(midpoint)
            #svals.append(1.0/(bsmear*tsmear))
            #booterrl,booterrh = bootstrap(binvals, 100000, np.mean, 0.05)
            if len(binvals)>0:
                booterrl,booterrh = bootstrap(binvals, 100000, np.median, 0.05)
                axScatter.errorbar(midpoint,np.median(binvals),xerr=(distancemin-distancemax)/2.0,yerr=[[np.median(binvals)-booterrl],[booterrh-np.median(binvals)]],fmt='--o',ecolor='b',color='b',zorder=999999)

        #axScatter.plot(smids,svals,'k--')

        axHisty = plt.axes(rect_histy)
        plt.xticks(rotation=270)

        # no labels
        axHisty.yaxis.set_major_formatter(nullfmt)

        # the scatter plot:

        axScatter.scatter(radialseps,fluxratios,color='blue',marker='+',s=10,alpha=0.5)

        binwidth = 0.05

        axScatter.set_xlim((0, 3.0))
        axScatter.set_ylim((0.8, 2.5))

        bins = np.arange(0.8, 3, binwidth)
        axHisty.hist(fluxratios, bins=bins, orientation='horizontal',color='blue',alpha=0.5)
        axHisty.set_ylim(axScatter.get_ylim())
        plt.savefig(outname)
        plt.close('all')
        plt.cla()
        plt.clf()
def do_run_subtract(name,basedir,inarchivedir,outarchivedir,force=False):
    startdir = os.getcwd()
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    extract_status = extractdict['extract_status'].split(',')

    print 'Working on ',name, 'in fields', fields,'which have status',extract_status
    
    for i in range(0,len(fields)):
        os.chdir(startdir)
        if not(extract_status[i] == 'EREADY' or (force and extract_status[i] == 'STARTED')):
            continue
        field = fields[i]

        workdir=basedir+'/'+name
        try:
            os.mkdir(workdir)
        except OSError:
            warn('Working directory already exists')
        print 'In directory', os.getcwd()
        os.chdir(workdir)
        # Update status to running here
        extract_status[i] = 'STARTED'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to STARTED for',field,name
        time.sleep(2.0)
        report('Copying data from %s'%inarchivedir)
        
        # WANT TO MAKE THIS INTO A RSYNC SO THAT IT CAN BE DONE OUTSIDE LEIDEN
        #os.system('cp -r %s/%s %s'%(inarchivedir,field,workdir))
        do_rsync_download(field,inarchivedir,workdir)


        # Update status to copied here
        extract_status[i] = 'COPIED'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to COPIED for',field,name


        # Create boxfile
        create_ds9_region('%s.ds9.reg'%name,extractdict['ra'],extractdict['decl'],extractdict['size'])


        # Run subtract code
        print os.getcwd(), 'working here'
        os.chdir(field)
        print ('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s'%(workdir,name,name))
        result=os.system('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s'%(workdir,name,name))
        if result!=0:
            raise RuntimeError('sub-sources-outside-region.py failed with error code %i' % result)
        
        # Archive the results need an rsync code this is just the *archive file that needs to be archived.
        #os.system('mkdir %s/%s'%(outarchivedir,name))
        #os.system('mkdir %s/%s/%s'%(outarchivedir,name,field))
        os.chdir(workdir)
        f = glob.glob('%s/*.archive*'%(field))
        do_rsync_upload(name,field,f)

        #print  ('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))
        #os.system('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))


        # update the database to give success
        extract_status[i] = 'EDONE'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to EDONE for',field,name

    # update the database to give selfcal status as SREADY
    selfcal_status = 'SREADY'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to SREADY for',name
Example #30
0
def run_bootstrap(o):
    
    colname='DATA_DI_CORRECTED'
    
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn("You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located")
        sys.exit(2)

    o["tgss"]=o["tgss"].replace("$$",os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"]=[l.replace("$$",os.environ["DDF_PIPELINE_CATALOGS"]) for l in o["catalogues"]]
    lCat=o["catalogues"]+[o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist"%fCat)
            sys.exit(2)

    cl=len(o['catalogues'])
    if o['names'] is None:
        o['names']=[os.path.basename(x).replace('.fits','') for x in o['catalogues']]
    if o['radii'] is None:
        o['radii']=[10]*cl
    if o['groups'] is None:
        o['groups']=range(cl)
    if (len(o['frequencies'])!=cl or len(o['radii'])!=cl or
        len(o['names'])!=cl or len(o['groups'])!=cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list')

    low_uvrange=[o['image_uvmin'],2.5*206.0/o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize=o['low_imsize'] # allow over-ride
    else:
        low_imsize=o['imsize']*o['cellsize']/o['low_cell']

    low_robust=o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py',dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m=MSList(o['mslist'])
    Uobsid = set(m.obsids)
    
    for obsid in Uobsid:
        
        warn('Running bootstrap for obsid %s' % obsid)

        freqs=[]
        omslist=[]
        for ms,ob,f in zip(m.mss,m.obsids,m.freqs):
            if ob==obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs)<4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme')

        # sort to work in frequency order

        freqs,omslist = (list(x) for x in zip(*sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f,ms in zip(freqs,omslist):
            print ms,f

        # generate the sorted input mslist
        with open('temp_mslist.txt','w') as f:
            for line in omslist:
                f.write(line+'\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_'+obsid+'_init','temp_mslist.txt',
                  cleanmask=None,cleanmode='SSD',ddsols='DDS0',
                  applysols='P',majorcycles=0,robust=low_robust,
                  uvrange=low_uvrange,beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,cellsize=o['low_cell'],
                  options=o,colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=True)
        external_mask='bootstrap_external_mask.fits'
        make_external_mask(external_mask,'image_bootstrap_'+obsid+'_init.dirty.fits',use_tgss=True,clobber=False,cellsize='low_cell',options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_'+obsid,'temp_mslist.txt',
                  cleanmask=external_mask,reuse_psf=True,reuse_dirty=True,
                  cleanmode='SSD',ddsols='DDS0',applysols='P',
                  majorcycles=5,robust=low_robust,uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],imsize=low_imsize,
                  cellsize=o['low_cell'],options=o,
                  colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img=bdsm.process_image('image_bootstrap_'+obsid+'.cube.int.restored.fits',thresh_pix=5,rms_map=True,atrous_do=True,atrous_jmax=2,group_by_isl=True,rms_box=(80,20), adaptive_rms_box=True, adaptive_thresh=80, rms_box_bright=(35,7),mean_map='zero',spectralindex_do=True,specind_maxchan=1,debug=True,kappa_clip=3,flagchan_rms=False,flagchan_snr=False,incl_chan=True,spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',format='ascii',incl_chan='true')
            img.export_image(img_type='rms',img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid+'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0]+ '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra<0):
                ra+=2*np.pi
            ra*=180.0/np.pi
            dec*=180.0/np.pi

            cats=zip(o['catalogues'],o['names'],o['groups'],o['radii'])
            make_catalogue('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl',ra,dec,2.5,cats,outnameprefix=obsid)
    
        freqlist=open(obsid+'frequencies.txt','w')
        for n,f in zip(o['names'],o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' % (f,n,n))
        for i,f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' % (f,i+1,i+1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid+'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject=-1 # avoid error if we fail somewhere
        if os.path.isfile(obsid+'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject=find_outliers.run_all(1, name=obsid)
    
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
          if nreject==0:
              shutil.copyfile(obsid+'crossmatch-results-1.npy',obsid+'crossmatch-results-2.npy')
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not(o['dryrun']):
            warn('Applying corrections to MS list')
            scale=np.load(obsid+'crossmatch-results-2.npy')[:,0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)
            
            bigmslist=[s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]
            
            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy=t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy=None
                t.close()
                if dummy is not None:
                    warn('Table '+ms+' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
                    frq=t[0]['REF_FREQUENCY']
                    factor=spl(frq)
                    print frq,factor
                    t=pt.table(ms,readonly=False)
                    desc=t.getcoldesc(o['colname'])
                    desc['name']='SCALED_DATA'
                    t.addcols(desc)
                    d=t.getcol(o['colname'])
                    d*=factor
                    t.putcol('SCALED_DATA',d)
                    try:
                        dummy=t.getcoldesc(colname)
                    except RuntimeError:
                        dummy=None
                    if dummy is not None:
                        desc=t.getcoldesc(colname)
                        newname=colname+'_SCALED'
                        desc['name']=newname
                        t.addcols(desc)
                        d=t.getcol(colname)
                        d*=factor
                        t.putcol(newname,d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus=[]
        for obsid in Uobsid:
            hdus.append(fits.open('image_bootstrap_'+obsid+'.app.restored.fits'))
        for i in range(1,len(Uobsid)):
            hdus[0][0].data+=hdus[i][0].data
        hdus[0][0].data/=len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
        warn('Figure file %s exists, not re-making it' % savefig)
    else:
        cra,cdec=get_centpos()
        r=RegPoly(regfile,cra,cdec)
        if isinstance(t,str):
            t=Table.read(t)
        if 'Facet' not in t.columns:
            r.add_facet_labels(t)
        plot_offsets(t,r.clist,'red')
        if savefig is not None:
            plt.savefig(savefig)

if __name__=='__main__':
    # Main loop
    if len(sys.argv)<2:
        warn('quality_pipeline.py must be called with at least one parameter file\nor a command-line option list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.')
        print_options(option_list)
        sys.exit(1)

    o=options(sys.argv[1:],option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')
    if o['nonpbimage'] is None:
        die('nonpbimage must be specified')
    if o['list'] is not None:
        # fix up the new list-type options
        for i,cat in enumerate(o['list']):
            try:
                o[cat]=o['filenames'][i]
            except:
                pass
Example #32
0
def do_offsets(o):
    # o is the options file

    if o['mode']!='normal' and  o['mode']!='test':
        raise NotImplementedError('Offsets called with mode '+o['mode'])

    image_root='image_full_ampphase_di_m.NS'

    method=o['method']

    report('Determining astrometric offsets with method '+method+' in mode '+o['mode'])
    report('Merging downloaded catalogues')
    if os.path.isfile(method+'.fits'):
        warn('Merged file exists, reading from disk instead')
        data=Table.read(method+'.fits')
    else:
        if method=='pslocal':
            data=Table.read(method+'/'+method+'.txt',format='ascii')
            data['RA'].name='ra'
            data['DEC'].name='dec'
            data.write(method+'.fits')
        else:    
            kwargs={}
            if 'panstarrs' in method:
                kwargs['rastr']='ramean'
                kwargs['decstr']='decmean'
            data=merge_cat(method,**kwargs)

    if o['mode']=='test':
        image_root+='_shift'
        method+='-test'

    report('Running PyBDSM on LOFAR image, please wait...')
    catfile=image_root+'.offset_cat.fits'
    gaulfile=catfile.replace('cat','gaul')
    if os.path.isfile(catfile):
        warn('Catalogue already exists, skipping pybdsf run')
    else:
        if o['mode']=='test':
            suffix='facetRestored'
        else:
            suffix='restored'
        pbimage=image_root+'.int.'+suffix+'.fits'
        nonpbimage=image_root+'.app.'+suffix+'.fits'
        img = bdsm.process_image(pbimage, detection_image=nonpbimage, thresh_isl=4.0, thresh_pix=5.0, rms_box=(150,15), rms_map=True, mean_map='zero', ini_method='intensity', adaptive_rms_box=True, adaptive_thresh=150, rms_box_bright=(60,15), group_by_isl=False, group_tol=10.0,output_opts=True, output_all=True, atrous_do=False, flagging_opts=True, flag_maxsize_fwhm=0.5,advanced_opts=True, blank_limit=None)
        img.write_catalog(outfile=catfile,catalog_type='srl',format='fits',correct_proj='True')
        img.write_catalog(outfile=gaulfile,catalog_type='gaul',format='fits',correct_proj='True')

    lofar=Table.read(catfile)
    print len(lofar),'LOFAR sources before filtering'
    filter=(lofar['E_RA']*3600.0)<2.0
    filter&=(lofar['E_DEC']*3600.0)<2.0
    filter&=(lofar['Maj']*3600.0)<10
    lofar=lofar[filter]
    print len(lofar),'LOFAR sources after filtering'
    regfile=image_root+'.tessel.reg'
    cra,cdec=get_centpos()
    report('Set up structure')

    NDir=np.load("image_dirin_SSD_m.npy.ClusterCat.npy").shape[0]
    oo=Offsets(method,n=NDir,imroot=image_root,cellsize=o['cellsize'],fitmethod=o['fit'])
    report('Label table')
    lofar_l=oo.r.add_facet_labels(lofar)
    report('Finding offsets')
    oo.find_offsets(lofar_l,data)
    report('Fitting offsets')
    oo.fit_offsets()
    report('Making plots and saving output')
    #oo.plot_fits(method+'-fits.pdf')
    oo.save_fits()
    oo.plot_offsets()
    if 'test' not in o['mode']:
        oo.save(method+'-fit_state.pickle')
        report('Making astrometry error map, please wait')
        oo.make_astrometry_map('astromap.fits',20)
        oo.offsets_to_facetshift('facet-offset.txt')
Example #33
0
def do_run_selfcal(name,basedir,inarchivedir,outarchivedir):
    startdir = os.getcwd()
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    selfcal_status = extractdict['selfcal_status']
    extract_status = extractdict['extract_status'].split(',')
    

    print 'Working on ',name, 'in fields', fields,'current selfcal status',selfcal_status
    
  
    workdir=basedir+'/'+name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')
    print 'In directory', os.getcwd()
    os.chdir(workdir)
    # Update status to running here
    selfcal_status = 'STARTED'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to STARTED for',name
    time.sleep(2.0)
    
    print 'Starting rsync'
    fieldstring = ''

    for fieldid, field in enumerate(fields):
        print field, fields
    
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        sdb.close()
        extract_status = extractdict['extract_status'].split(',')
        
        if extract_status[fieldid] == 'EDONE':
          cmd = '%s/%s/%s/%s_%s*archive*'%(inarchivedir,name,field,field,name)
          observations = check_output('ssh [email protected] ls -d ' + cmd, shell=True)
          print 'ssh [email protected] ls -d ' + cmd
          observations = observations.split('\n')[:-1] # remove last empty item in this list
        else:
          observations = []

        print 'DATA LOCATIONS', observations
        print 'FIELDS', fields
        print 'EXTRACT STATUS', extract_status

        for observation in observations:
            print observation
            report('Copying data from %s'%observation)
        
            #'+ inarchivedir +'/' + name + '/' + field +'/' + field +'_'+name +'.dysco.sub.shift.avg.weights.ms.archive')
            do_rsync_download(observation.split('/')[-1],inarchivedir +'/'+name + '/'+field +'/',workdir)

            fieldstring += observation.split('/')[-1] + ' '
            #'%s_%s.dysco.sub.shift.avg.weights.ms.archive '%(field,name)
    fieldstring = fieldstring[:-1]

    # Update status to copied here
    report('Updating %s status to copied'%name)
    selfcal_status = 'COPIED'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    uvminstr =  str(extractdict['uvmin'])
    uvmin =  extractdict['uvmin']
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    
    
    # Create boxfile
    report('Create ds9 region file for extraction')
    create_ds9_region('%s.ds9.reg'%name,extractdict['ra'],extractdict['decl'],extractdict['size'])
    

    # Run subtract code
    print os.getcwd(), 'working here'
    
    
    if uvmin > 0.0:
       print ('runwsclean.py --uvmin=%s -b  %s.ds9.reg -i %s %s'%(uvminstr,name,name+"_image",fieldstring))
       os.system('runwsclean.py --uvmin=%s -b  %s.ds9.reg -i %s %s'%(uvminstr,name,name+"_image",fieldstring))
    else:    
       print ('runwsclean.py -b  %s.ds9.reg -i %s %s'%(name,name+"_image",fieldstring))
       os.system('runwsclean.py -b  %s.ds9.reg -i %s %s'%(name,name+"_image",fieldstring))

    report('Archiving the results to %s'%outarchivedir)
    os.chdir(workdir)
    f = glob.glob('%s.ds9.tar.gz'%(name))
    do_rsync_upload(name,outarchivedir,f)
    

    # update the database to give success
    selfcal_status = 'SDONE'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to SDONE for',name
Example #34
0
def run_bootstrap(o):

    colname = 'DATA_DI_CORRECTED'

    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn(
            "You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located"
        )
        sys.exit(2)

    o["tgss"] = o["tgss"].replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"] = [
        l.replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
        for l in o["catalogues"]
    ]
    lCat = o["catalogues"] + [o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist" % fCat)
            sys.exit(2)

    cl = len(o['catalogues'])
    if o['names'] is None:
        o['names'] = [
            os.path.basename(x).replace('.fits', '') for x in o['catalogues']
        ]
    if o['radii'] is None:
        o['radii'] = [10] * cl
    if o['groups'] is None:
        o['groups'] = range(cl)
    if (len(o['frequencies']) != cl or len(o['radii']) != cl
            or len(o['names']) != cl or len(o['groups']) != cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list'
            )

    low_uvrange = [o['image_uvmin'], 2.5 * 206.0 / o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize = o['low_imsize']  # allow over-ride
    else:
        low_imsize = o['imsize'] * o['cellsize'] / o['low_cell']

    low_robust = o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py', dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m = MSList(o['mslist'])
    Uobsid = set(m.obsids)

    for obsid in Uobsid:

        warn('Running bootstrap for obsid %s' % obsid)

        freqs = []
        omslist = []
        for ms, ob, f in zip(m.mss, m.obsids, m.freqs):
            if ob == obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs) < 4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme'
                )

        # sort to work in frequency order

        freqs, omslist = (list(x) for x in zip(
            *sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f, ms in zip(freqs, omslist):
            print ms, f

        # generate the sorted input mslist
        with open('temp_mslist.txt', 'w') as f:
            for line in omslist:
                f.write(line + '\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_' + obsid + '_init',
                  'temp_mslist.txt',
                  cleanmask=None,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=0,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=True)
        external_mask = 'bootstrap_external_mask.fits'
        make_external_mask(external_mask,
                           'image_bootstrap_' + obsid + '_init.dirty.fits',
                           use_tgss=True,
                           clobber=False,
                           cellsize='low_cell',
                           options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_' + obsid,
                  'temp_mslist.txt',
                  cleanmask=external_mask,
                  reuse_psf=True,
                  reuse_dirty=True,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=5,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_' + obsid +
                          '.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img = bdsm.process_image('image_bootstrap_' + obsid +
                                     '.cube.int.restored.fits',
                                     thresh_pix=5,
                                     rms_map=True,
                                     atrous_do=True,
                                     atrous_jmax=2,
                                     group_by_isl=True,
                                     rms_box=(80, 20),
                                     adaptive_rms_box=True,
                                     adaptive_thresh=80,
                                     rms_box_bright=(35, 7),
                                     mean_map='zero',
                                     spectralindex_do=True,
                                     specind_maxchan=1,
                                     debug=True,
                                     kappa_clip=3,
                                     flagchan_rms=False,
                                     flagchan_snr=False,
                                     incl_chan=True,
                                     spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',
                              format='ascii',
                              incl_chan='true')
            img.export_image(img_type='rms', img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid + 'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0] + '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra < 0):
                ra += 2 * np.pi
            ra *= 180.0 / np.pi
            dec *= 180.0 / np.pi

            cats = zip(o['catalogues'], o['names'], o['groups'], o['radii'])
            make_catalogue('image_bootstrap_' + obsid +
                           '.cube.int.restored.pybdsm.srl',
                           ra,
                           dec,
                           2.5,
                           cats,
                           outnameprefix=obsid)

        freqlist = open(obsid + 'frequencies.txt', 'w')
        for n, f in zip(o['names'], o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' %
                           (f, n, n))
        for i, f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' %
                           (f, i + 1, i + 1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid + 'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject = -1  # avoid error if we fail somewhere
        if os.path.isfile(obsid + 'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject = find_outliers.run_all(1, name=obsid)

        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
            if nreject == 0:
                shutil.copyfile(obsid + 'crossmatch-results-1.npy',
                                obsid + 'crossmatch-results-2.npy')
        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not (o['dryrun']):
            warn('Applying corrections to MS list')
            scale = np.load(obsid + 'crossmatch-results-2.npy')[:, 0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)

            bigmslist = [s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]

            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy = t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy = None
                t.close()
                if dummy is not None:
                    warn('Table ' + ms +
                         ' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms + '/SPECTRAL_WINDOW',
                                 readonly=True,
                                 ack=False)
                    frq = t[0]['REF_FREQUENCY']
                    factor = spl(frq)
                    print frq, factor
                    t = pt.table(ms, readonly=False)
                    desc = t.getcoldesc(o['colname'])
                    desc['name'] = 'SCALED_DATA'
                    t.addcols(desc)
                    d = t.getcol(o['colname'])
                    d *= factor
                    t.putcol('SCALED_DATA', d)
                    try:
                        dummy = t.getcoldesc(colname)
                    except RuntimeError:
                        dummy = None
                    if dummy is not None:
                        desc = t.getcoldesc(colname)
                        newname = colname + '_SCALED'
                        desc['name'] = newname
                        t.addcols(desc)
                        d = t.getcol(colname)
                        d *= factor
                        t.putcol(newname, d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus = []
        for obsid in Uobsid:
            hdus.append(
                fits.open('image_bootstrap_' + obsid + '.app.restored.fits'))
        for i in range(1, len(Uobsid)):
            hdus[0][0].data += hdus[i][0].data
        hdus[0][0].data /= len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
Example #35
0
def plot_flux_errors(catalog,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o
    if os.path.isfile(outname.replace('.png','_integrated.png')):
        warn('Plot file %s exists, not making it' % outname)
    else:

        scat = Table.read(catalog)

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()
        radialseps = sepn(scat['RA']*deg2rad,scat['DEC']*deg2rad,fieldra*deg2rad,fielddec*deg2rad)*rad2deg

        plt.plot(np.sort(scat['Total_flux']),np.sort(scat['Total_flux'])*np.median(old_div(np.array(old_div(scat[auxcatname+'_Total_flux'],options[auxcatname+'_fluxfactor'])),np.array(scat['Total_flux']))),'r--')
        plt.plot(scat['Total_flux'],old_div(scat[auxcatname+'_Total_flux'],options[auxcatname+'_fluxfactor']),'ko')
        plt.xlabel('Integrated LOFAR flux (Jy)')
        plt.ylabel('Integrated '+auxcatname+' flux (Jy)')
        plt.xlim(xmin=0.01,xmax=0.5)
        plt.ylim(ymin=0.01,ymax=0.5)
        plt.semilogx()
        plt.semilogy()
        equality = np.arange(0,10,0.01)
        plt.plot(equality,equality,'k-')
        plt.savefig(outname.replace('.png','_integrated.png'))
        plt.close('all')
        plt.cla()
        plt.clf()

        plt.plot(np.sort(scat['Peak_flux']),np.sort(scat['Peak_flux'])*np.median(old_div(np.array(old_div(scat[auxcatname+'_Peak_flux'],options['%s_fluxfactor'%auxcatname])),np.array(scat['Peak_flux']))),'r--')
        plt.plot(scat['Peak_flux'],old_div(scat[auxcatname+'_Peak_flux'],options[auxcatname+'_fluxfactor']),'ko')
        plt.xlabel('Peak LOFAR flux (Jy)')
        plt.ylabel('Peak '+auxcatname+' flux (Jy)')
        plt.xlim(xmin=0.01,xmax=0.5)
        plt.ylim(ymin=0.01,ymax=0.5)
        plt.semilogx()
        plt.semilogy()
        equality = np.arange(0,10,0.01)
        plt.plot(equality,equality,'k-')
        plt.savefig(outname.replace('.png','_peak.png'))
        plt.close('all')
        plt.cla()
        plt.clf()

        plt.plot(radialseps,old_div(scat['Total_flux'],(old_div(scat[auxcatname+'_Total_flux'],options[auxcatname+'_fluxfactor']))),'bo',alpha=0.4,markersize=3)
        equality = np.arange(-0.01,10,0.01)
        fractionrange = np.arange(0.99,1.01,0.002)
        for i in fractionrange:
            plt.plot(equality,1.0*i+0*equality,'k-')
        plt.plot(equality,1.0+0*equality,'k-')
        plt.xlabel('Distance from pointing centre (deg)')
        plt.ylabel('Integrated LOFAR flux / Integrated '+auxcatname+' flux')

        plt.xlim(xmin=0.0,xmax=2)
        plt.ylim(ymin=0.5,ymax=2.0)    

        distancerange = np.arange(0,np.max(radialseps)+0.15,0.15)

        for i in range(0,len(distancerange)-1):
            distancemin = distancerange[i]
            distancemax = distancerange[i+1]
            binvals = np.array([])
            for j in range(0,len(radialseps)):
                if distancemin < radialseps[j] < distancemax:
                    binvals = np.append(binvals,old_div(scat['Total_flux'][j],(old_div(scat[auxcatname+'_Total_flux'][j],options[auxcatname+'_fluxfactor']))))
            midpoint = (distancemin+distancemax)/2.0
            if len(binvals) > 0:
                booterrl,booterrh = bootstrap(binvals, 100000, np.mean, 0.05)
                booterrl,booterrh = bootstrap(binvals, 100000, np.median, 0.05)
    #            print booterrl,booterrh, binvals
                plt.errorbar(midpoint,np.median(binvals),xerr=(distancemin-distancemax)/2.0,yerr=[[np.median(binvals)-booterrl],[booterrh-np.median(binvals)]],fmt='--o',ecolor='b',color='b',zorder=999999)
        plt.savefig(outname.replace('.png','_total_radial.png'))
        plt.close('all')
        plt.cla()
        plt.clf()
def do_run_subtract(name, basedir, inarchivedir, outarchivedir, force=False):
    startdir = os.getcwd()
    sdb = SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    extract_status = extractdict['extract_status'].split(',')
    try:
        bad_pointings = extractdict['bad_pointings'].split(',')
    except AttributeError:
        bad_pointings = ['']
    print('Working on ', name, 'in fields', fields, 'which have status',
          extract_status)

    for i in range(0, len(fields)):
        os.chdir(startdir)
        if not (extract_status[i] == 'EREADY' or
                (force and extract_status[i] == 'STARTED')):
            continue
        field = fields[i]
        if field in bad_pointings:
            print('Field', field,
                  'in bad pointings -- skipping and setting to BADP')
            sdb = SurveysDB()
            extractdict = sdb.get_reprocessing(name)
            extract_status[i] = 'BADP'
            extractdict['extract_status'] = ','.join(extract_status)
            sdb.db_set('reprocessing', extractdict)
            sdb.close()
            continue
        workdir = basedir + '/' + name
        try:
            os.mkdir(workdir)
        except OSError:
            warn('Working directory already exists')
        print('In directory', os.getcwd())
        os.chdir(workdir)
        # Update status to running here
        extract_status[i] = 'STARTED'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to STARTED for', field, name)
        time.sleep(2.0)
        report('Copying data from %s' % inarchivedir)

        # WANT TO MAKE THIS INTO A RSYNC SO THAT IT CAN BE DONE OUTSIDE LEIDEN
        #os.system('cp -r %s/%s %s'%(inarchivedir,field,workdir))
        do_rsync_download(field, inarchivedir, workdir)

        # Update status to copied here
        extract_status[i] = 'COPIED'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to COPIED for', field, name)

        # Create boxfile
        create_ds9_region('%s.ds9.reg' % name, extractdict['ra'],
                          extractdict['decl'], extractdict['size'])

        # Run subtract code
        print(os.getcwd(), 'working here')
        os.chdir(field)
        print('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s' %
              (workdir, name, name))
        result = os.system(
            'sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s' %
            (workdir, name, name))
        if result != 0:
            raise RuntimeError(
                'sub-sources-outside-region.py failed with error code %i' %
                result)

        # Archive the results need an rsync code this is just the *archive file that needs to be archived.
        #os.system('mkdir %s/%s'%(outarchivedir,name))
        #os.system('mkdir %s/%s/%s'%(outarchivedir,name,field))
        os.chdir(workdir)
        f = glob.glob('%s/*.archive*' % (field))
        do_rsync_upload(name, field, f)

        #print  ('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))
        #os.system('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))

        # update the database to give success
        extract_status[i] = 'EDONE'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to EDONE for', field, name)

    # update the database to give selfcal status as SREADY
    selfcal_status = 'SREADY'
    sdb = SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing', extractdict)
    sdb.close()
    print('Updated status to SREADY for', name)
Example #37
0
import sys
import os

rootdir = '/data/lofar/mjh'
os.chdir(rootdir)

name = sys.argv[1]
try:
    qsubfile = sys.argv[2]
except:
    qsubfile = '/home/mjh/git/ddf-pipeline/pipeline.qsub'

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if not download_dataset('https://lofar-webdav.grid.sara.nl',
                        '/SKSP/' + name + '/'):
    die('Download failed to get the right number of files')

report('Unpacking data')
unpack()

report('Deleting tar files')
os.system('rm *.tar.gz')

report('Making ms lists')
if make_list():
    with SurveysDB() as sdb:
        sdb.cur.execute('select * from fields where status="Archived" or status="Complete" order by ra')
        result=sdb.cur.fetchall()

    print 'There are',len(result),'complete datasets'

    if not skip_construct:
        separator('Preparing release directory')
        releasefiles=['image_full_low_stokesV.dirty.fits','image_full_vlow_QU.cube.dirty.corr.fits.fz','image_full_low_QU.cube.dirty.corr.fits.fz','image_full_vlow_QU.cube.dirty.fits.fz','image_full_low_QU.cube.dirty.fits.fz','image_full_low_m.int.restored.fits','image_full_low_m.app.restored.fits','image_full_ampphase_di_m.NS.tessel.reg','image_full_ampphase_di_m.NS_shift.int.facetRestored.fits','image_full_ampphase_di_m.NS_shift.app.facetRestored.fits','image_full_ampphase_di_m.NS_Band0_shift.int.facetRestored.fits','image_full_ampphase_di_m.NS_Band1_shift.int.facetRestored.fits','image_full_ampphase_di_m.NS_Band2_shift.int.facetRestored.fits','astromap.fits','DynSpec*.tgz']

        os.chdir(workdir+'/fields')
        for r in result:
            id=r['id']
            if not os.path.isdir(id):
                warn('Directory %s does not exist, making it' % id)
                os.mkdir(id)
            tdir=workdir+'/fields/'+id
            if r['clustername']=='Herts':
                location=r['location']
                resolved_release=[]
                for f in releasefiles:
                    if '*' in f:
                        resolved_release+=[os.path.basename(g) for g in glob.glob(location+'/'+f)]
                    else:
                        resolved_release.append(f)                       
                                                 
                if location:
                    for f in resolved_release:
                        source=location+'/'+f
                        if not os.path.isfile(tdir+'/'+f) or (os.path.isfile(source)  and os.path.getmtime(source)>os.path.getmtime(tdir+'/'+f)):
Example #39
0
def plot_position_offset(catalog,fitsimage,outname,auxcatname,options=None):
    if options is None:
        options = o
    if os.path.isfile(outname):
        warn('Plot file %s exists, not making it' % outname)
    else:
        scat = Table.read(catalog)

        fitsimage = fits.open(fitsimage)
        fieldra = fitsimage[0].header['CRVAL1']
        fielddec = fitsimage[0].header['CRVAL2']
        fitsimage.close()

        x = scat[auxcatname+'_dRA']
        y = scat[auxcatname+'_dDEC']

        nullfmt = NullFormatter()         # no labels

        # definitions for the axes
        left, width = 0.1, 0.65
        bottom, height = 0.1, 0.65
        bottom_h = left_h = left + width + 0.02

        rect_scatter = [left, bottom, width, height]
        rect_histx = [left, bottom_h, width, 0.2]
        rect_histy = [left_h, bottom, 0.2, height]

        # start with a rectangular Figure
        plt.figure(1, figsize=(8, 8))

        axScatter = plt.axes(rect_scatter)
        plt.xlabel('$RA_{\\rm LOFAR} - RA_{\\rm %s}$ (arcsec)' % auxcatname)
        plt.ylabel('$DEC_{\\rm LOFAR} - DEC_{\\rm %s}$ (arcsec)' % auxcatname)
        plt.xticks(rotation=270)
        axScatter.plot(np.zeros(40),np.arange(-20,20),'k--')
        axScatter.plot(np.arange(-20,20),np.zeros(40),'k--')
        axHistx = plt.axes(rect_histx)
        axHisty = plt.axes(rect_histy)
        plt.xticks(rotation=270)

        # no labels
        axHistx.xaxis.set_major_formatter(nullfmt)
        axHisty.yaxis.set_major_formatter(nullfmt)

        # the scatter plot:
        axScatter.scatter(x, y,marker='+',s=2,alpha=0.3)

        binwidth = 0.25
        xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
        lim = (int(old_div(xymax,binwidth)) + 1) * binwidth

        axScatter.set_xlim((-lim, lim))
        axScatter.set_ylim((-lim, lim))

        bins = np.arange(-lim, lim + binwidth, binwidth)
        axHistx.hist(x, bins=bins)
        axHisty.hist(y, bins=bins, orientation='horizontal')

        axHistx.set_xlim(axScatter.get_xlim())
        axHisty.set_ylim(axScatter.get_ylim())

        axScatter.add_artist(Ellipse((np.median(x),np.median(y)),np.std(x),np.std(y),angle=0,linewidth=2,fill=False,color='k',zorder=10000))

        plt.savefig(outname)
        plt.close('all')
        plt.cla()
        plt.clf()
hdus = []
for i in range(3):
    h = fits.open('band%i-mosaic.fits' % i)
    hdus.append(h)
    y, x = h[0].data.shape

newdata = np.zeros((3, y, x), dtype=np.float32)
for i, h in enumerate(hdus):
    newdata[i, :, :] = h[0].data

ohdu = hdus[0]

outfile = 'band-cube.fits'
if os.path.isfile(outfile):
    warn('Output file exists, skipping cube generation')
else:
    ohdu[0].data = newdata
    ohdu[0].header['NAXIS3'] = 3
    ohdu[0].header['CTYPE3'] = 'FREQ'
    ohdu[0].header['CUNIT3'] = 'Hz'
    ohdu[0].header['CRPIX3'] = 1
    ohdu[0].header['CRVAL3'] = 128e6
    ohdu[0].header['CDELT3'] = 16.0e6
    ohdu[0].header['WCSAXES'] = 3
    ohdu.writeto(outfile, overwrite=True)

os.system('chmod u+w .')
os.system('chmod u+w mosaic-blanked_pybdsm')
catprefix = 'bandcat'
img = bdsf.process_image(outfile,
import os

rootdir='/disks/paradata/shimwell/LoTSS-DR2/ongoing-leiden-runs'
os.chdir(rootdir)

name=sys.argv[1]

if name[0]!='P' and name[0]!='L':
    die('This code should be used only with field or observation names',database=False)

do_field=(name[0]=='P')

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if do_field:
    success=download_field(name)
else:
    success=download_dataset('https://lofar-webdav.grid.sara.nl','/SKSP/'+name+'/')

if not success:
    die('Download failed, see earlier errors',database=False)

    
report('Unpacking data')
unpack()
if do_field:
Example #42
0
def do_offsets(o):
    # o is the options file

    if o['mode'] != 'normal' and o['mode'] != 'test':
        raise NotImplementedError('Offsets called with mode ' + o['mode'])

    image_root = 'image_full_ampphase_di_m.NS'

    method = o['method']

    report('Determining astrometric offsets with method ' + method +
           ' in mode ' + o['mode'])
    report('Merging downloaded catalogues')
    if os.path.isfile(method + '.fits'):
        warn('Merged file exists, reading from disk instead')
        data = Table.read(method + '.fits')
    else:
        if method == 'pslocal':
            data = Table.read(method + '/' + method + '.txt', format='ascii')
            data['RA'].name = 'ra'
            data['DEC'].name = 'dec'
            data.write(method + '.fits')
        else:
            kwargs = {}
            if 'panstarrs' in method:
                kwargs['rastr'] = 'ramean'
                kwargs['decstr'] = 'decmean'
            data = merge_cat(method, **kwargs)

    if o['mode'] == 'test':
        image_root += '_shift'
        method += '-test'

    report('Running PyBDSM on LOFAR image, please wait...')
    catfile = image_root + '.offset_cat.fits'
    gaulfile = catfile.replace('cat', 'gaul')
    if os.path.isfile(catfile):
        warn('Catalogue already exists, skipping pybdsf run')
    else:
        if o['mode'] == 'test':
            suffix = 'facetRestored'
        else:
            suffix = 'restored'
        pbimage = image_root + '.int.' + suffix + '.fits'
        nonpbimage = image_root + '.app.' + suffix + '.fits'
        img = bdsm.process_image(pbimage,
                                 detection_image=nonpbimage,
                                 thresh_isl=4.0,
                                 thresh_pix=5.0,
                                 rms_box=(150, 15),
                                 rms_map=True,
                                 mean_map='zero',
                                 ini_method='intensity',
                                 adaptive_rms_box=True,
                                 adaptive_thresh=150,
                                 rms_box_bright=(60, 15),
                                 group_by_isl=False,
                                 group_tol=10.0,
                                 output_opts=True,
                                 output_all=True,
                                 atrous_do=False,
                                 flagging_opts=True,
                                 flag_maxsize_fwhm=0.5,
                                 advanced_opts=True,
                                 blank_limit=None)
        img.write_catalog(outfile=catfile,
                          catalog_type='srl',
                          format='fits',
                          correct_proj='True')
        img.write_catalog(outfile=gaulfile,
                          catalog_type='gaul',
                          format='fits',
                          correct_proj='True')

    lofar = Table.read(catfile)
    print len(lofar), 'LOFAR sources before filtering'
    filter = (lofar['E_RA'] * 3600.0) < 2.0
    filter &= (lofar['E_DEC'] * 3600.0) < 2.0
    filter &= (lofar['Maj'] * 3600.0) < 10
    lofar = lofar[filter]
    print len(lofar), 'LOFAR sources after filtering'
    regfile = image_root + '.tessel.reg'
    cra, cdec = get_centpos()
    report('Set up structure')

    NDir = np.load("image_dirin_SSD_m.npy.ClusterCat.npy").shape[0]
    oo = Offsets(method,
                 n=NDir,
                 imroot=image_root,
                 cellsize=o['cellsize'],
                 fitmethod=o['fit'])
    report('Label table')
    lofar_l = oo.r.add_facet_labels(lofar)
    report('Finding offsets')
    oo.find_offsets(lofar_l, data)
    report('Fitting offsets')
    oo.fit_offsets()
    report('Making plots and saving output')
    #oo.plot_fits(method+'-fits.pdf')
    oo.save_fits()
    oo.plot_offsets()
    if 'test' not in o['mode']:
        oo.save(method + '-fit_state.pickle')
        report('Making astrometry error map, please wait')
        oo.make_astrometry_map('astromap.fits', 20)
        oo.offsets_to_facetshift('facet-offset.txt')
Example #43
0
                'image_full_ampphase_di_m.NS_shift.int.facetRestored.fits',
                'image_full_ampphase_di_m.NS_shift.app.facetRestored.fits',
                'image_full_ampphase_di_m.NS_Band0_shift.int.facetRestored.fits',
                'image_full_ampphase_di_m.NS_Band1_shift.int.facetRestored.fits',
                'image_full_ampphase_di_m.NS_Band0_shift.app.facetRestored.fits',
                'image_full_ampphase_di_m.NS_Band1_shift.app.facetRestored.fits',
                'image_full_ampphase_di_m.NS_Band2_shift.app.facetRestored.fits',
                'astromap.fits', 'DynSpec*.tgz'
            ]

            os.chdir(workdir + '/fields')
            for r in result:
                id = r['id']
                print('Doing', id)
                if not os.path.isdir(id):
                    warn('Directory %s does not exist, making it' % id)
                    os.mkdir(id)
                tdir = workdir + '/fields/' + id
                if r['clustername'] == 'Herts' and r['location'] != "":
                    location = r['location']
                    resolved_release = []
                    for f in releasefiles:
                        if '*' in f:
                            resolved_release += [
                                os.path.basename(g)
                                for g in glob.glob(location + '/' + f)
                            ]
                        else:
                            resolved_release.append(f)

                    if location:
Example #44
0
def ddf_image(imagename,
              mslist,
              cleanmask=None,
              cleanmode='HMP',
              ddsols=None,
              applysols=None,
              threshold=None,
              majorcycles=3,
              use_dicomodel=False,
              robust=0,
              beamsize=None,
              beamsize_minor=None,
              beamsize_pa=None,
              reuse_psf=False,
              reuse_dirty=False,
              verbose=False,
              saveimages=None,
              imsize=None,
              cellsize=None,
              uvrange=None,
              colname='CORRECTED_DATA',
              peakfactor=0.1,
              dicomodel_base=None,
              options=None,
              do_decorr=None,
              normalization=None,
              dirty_from_resid=False,
              clusterfile=None,
              HMPsize=None,
              automask=True,
              automask_threshold=10.0,
              smooth=False,
              noweights=False,
              cubemode=False,
              apply_weights=True,
              catcher=None,
              rms_factor=3.0):

    if catcher: catcher.check()

    # saveimages lists _additional_ images to save
    if saveimages is None:
        saveimages = ''
    saveimages += 'onNeds'
    if options is None:
        options = o  # attempt to get global if it exists

    if HMPsize is None:
        HMPsize = options['HMPsize']
    if do_decorr is None:
        do_decorr = options['do_decorr']
    if beamsize is None:
        beamsize = options['psf_arcsec']
    if imsize is None:
        imsize = options['imsize']
    if cellsize is None:
        cellsize = options['cellsize']

    cache_dir = find_cache_dir(options)

    if majorcycles > 0:
        fname = imagename + '.app.restored.fits'
    else:
        fname = imagename + '.dirty.fits'

    runcommand = "DDF.py --Output-Name=%s --Data-MS=%s --Deconv-PeakFactor %f --Data-ColName %s --Parallel-NCPU=%i --Image-Mode=Clean --Beam-CenterNorm=1 --Deconv-CycleFactor=0 --Deconv-MaxMinorIter=1000000 --Deconv-MaxMajorIter=%s --Deconv-Mode %s --Beam-Model=LOFAR --Beam-LOFARBeamMode=A --Weight-Robust %f --Image-NPix=%i --CF-wmax 50000 --CF-Nw 100 --Output-Also %s --Image-Cell %f --Facets-NFacets=11 --SSDClean-NEnlargeData 0 --Freq-NDegridBand 1 --Beam-NBand 1 --Facets-DiamMax 1.5 --Facets-DiamMin 0.1 --Deconv-RMSFactor=%f --Data-Sort 1 --Cache-Dir=%s" % (
        imagename, mslist, peakfactor, colname, options['NCPU_DDF'],
        majorcycles, cleanmode, robust, imsize, saveimages, float(cellsize),
        rms_factor, cache_dir)

    if beamsize_minor is not None:
        runcommand += ' --Output-RestoringBeam %f,%f,%f' % (
            beamsize, beamsize_minor, beamsize_pa)
    elif beamsize is not None:
        runcommand += ' --Output-RestoringBeam %f' % (beamsize)

    if apply_weights:
        runcommand += ' --Weight-ColName="IMAGING_WEIGHT"'
    else:
        runcommand += ' --Weight-ColName="None"'

    if cubemode:
        channels = len(open(mslist).readlines())
        runcommand += ' --Output-Cubes I --Freq-NBand=%i' % channels
    else:
        runcommand += ' --Freq-NBand=2'

    if do_decorr:
        runcommand += ' --RIME-DecorrMode=FT'

    if cleanmode == 'SSD':
        runcommand += ' --SSDClean-SSDSolvePars [S,Alpha] --SSDClean-BICFactor 0'
    if clusterfile is not None:
        runcommand += ' --Facets-CatNodes=%s' % clusterfile
    if automask:
        runcommand += ' --Mask-Auto=1 --Mask-SigTh=%.2f' % automask_threshold
    if cleanmask is not None:
        runcommand += ' --Mask-External=%s' % cleanmask
    if applysols is not None:
        if normalization is not None:
            if normalization[:3] == 'Abs':
                normalization = 'Mean' + normalization  # backward compat. hack
            runcommand += ' --DDESolutions-GlobalNorm=' + normalization
        runcommand += ' --DDESolutions-DDModeGrid=%s --DDESolutions-DDModeDeGrid=%s --DDESolutions-DDSols=%s' % (
            applysols, applysols, ddsols)
    if use_dicomodel:
        if dicomodel_base is not None:
            runcommand += ' --Predict-InitDicoModel=%s.DicoModel' % dicomodel_base
        else:
            raise RuntimeError(
                'use_dicomodel is set but no dicomodel supplied')
    if threshold is not None:
        runcommand += ' --Deconv-FluxThreshold=%f' % threshold
    if uvrange is not None:
        runcommand += ' --Selection-UVRangeKm=[%f,%f]' % (uvrange[0],
                                                          uvrange[1])
    if dirty_from_resid and reuse_dirty:
        raise RuntimeError('Cannot combine reuse_dirty and dirty_from_resid')
    if dirty_from_resid:
        # possible that crashes could destroy the cache, so need to check
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/LastResidual'):
            runcommand += ' --Cache-Dirty forceresidual'
    if reuse_dirty:
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/Dirty'):
            runcommand += ' --Cache-Dirty forcedirty'
    if reuse_psf:
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/PSF'):
            runcommand += ' --Cache-PSF force'

    if HMPsize is not None:
        runcommand += ' --SSDClean-MinSizeInitHMP=%i' % HMPsize

    if options['nobar']:
        runcommand += ' --Log-Boring=1'

    if smooth:
        runcommand += ' --Beam-Smooth=1'

    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping DDF step')
        if verbose:
            print 'would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('DDF-' + imagename + '.log', options=options),
            quiet=options['quiet'])