Exemple #1
0
def optimize_uvmin(rootname, mslist, colname, uvmin_limit=None):
    uvminfile = rootname + '_uvmin.txt'
    report('Optimizing uvmin for self-cal')
    if os.path.isfile(uvminfile):
        result = float(open(uvminfile).readlines()[0].rstrip())
    else:
        level = sumdico(rootname)
        result = find_uvmin(mslist, level, colname=colname) * 1.1
        print 'Will use shortest baseline of', result, 'km'
        with open(uvminfile, 'w') as f:
            f.write('%f\n' % result)
    if uvmin_limit is not None and result < uvmin_limit:
        result = uvmin_limit
    return result
Exemple #2
0
def do_run_job(name, basedir, qsubfile=None, do_field=True, prefix='ddfp'):
    config = ''
    workdir = basedir + '/' + name
    g = glob.glob(workdir + '/tier1*.cfg')
    if len(g) > 0:
        print 'Local config file exists, using that'
        config = ',CONFIG=' + g[0]
    if qsubfile is None:
        qsubfile = '/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'
    report('Submit job')
    os.system('qsub -N ' + prefix + '-' + name + ' -v WD=' + workdir + config +
              ' ' + qsubfile)
    if do_field:
        update_status(name, 'Queued', workdir=workdir)
Exemple #3
0
def make_external_mask(fname,
                       templatename,
                       use_tgss=True,
                       options=None,
                       extended_use=None,
                       clobber=False):
    if options is None:
        options = o  # attempt to get global
    if options['restart'] and os.path.isfile(fname) and not clobber:
        warn('External mask already exists, not creating it')
    else:
        report('Make blank external mask')
        hdus = fits.open(templatename)
        hdus[0].data = np.zeros_like(hdus[0].data, dtype=np.int32)
        hdus.writeto(fname, clobber=True)
        hdus.close()
        if use_tgss and options['tgss'] is not None:
            report('Merging the mask with TGSS catalogue')
            # TGSS path is provided, this means we want to add the positions of bright TGSS sources to the mask
            modify_mask(fname,
                        fname,
                        options['tgss'],
                        options['tgss_radius'],
                        options['tgss_flux'],
                        do_extended=options['tgss_extended'],
                        cellsize=options['cellsize'],
                        pointsize=options['tgss_pointlike'])

        if options['region'] is not None:
            report('Merging with mask with user-specified region')
            add_manual_mask(fname, options['region'], fname)

        if options['extended_size'] is not None and extended_use is not None:
            report('Merging with automatic extended mask')
            merge_mask(fname, extended_use, fname)
Exemple #4
0
def do_run_job(name,basedir,qsubfile=None,do_field=True,prefix='ddfp',dysco=False):
    config=''
    workdir=basedir+'/'+name
    g=glob.glob(workdir+'/tier1*.cfg')
    if len(g)>0:
        print 'Local config file exists, using that'
        config=',CONFIG='+g[0]
    if qsubfile is None:
        qsubfile='/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'
    if dysco:
        qsubfile=qsubfile.replace('.qsub','-fdr14.qsub')
    report('Submit job')
    os.system('qsub -N '+prefix+'-'+name+' -v WD='+workdir+config+' '+qsubfile)
    if do_field:
        update_status(name,'Queued',workdir=workdir)
Exemple #5
0
def clearcache(mslist, options):
    cachedir = find_cache_dir(options)

    report('Clearing cache for ' + mslist)
    if os.path.isfile(mslist):
        filenames = [l.strip() for l in open(mslist, 'r').readlines()]
    else:
        filenames = []

    try:
        rmtglob(cachedir + '/' + mslist + '*.ddfcache')
        rmtglob(mslist + '*.ddfcache')
    except OSError:
        pass
    for f in filenames:
        try:
            rmtglob(cachedir + '/' + f + '*.ddfcache')
        except OSError:
            pass
Exemple #6
0
def check_imaging_weight(mslist_name):

    # returns a boolean that says whether it did something
    result = False
    report('Checking for IMAGING_WEIGHT in input MSS')
    mslist = [s.strip() for s in open(mslist_name).readlines()]
    for ms in mslist:
        t = pt.table(ms)
        try:
            dummy = t.getcoldesc('IMAGING_WEIGHT')
        except RuntimeError:
            dummy = None
        t.close()
        if dummy is not None:
            warn('Table ' + ms + ' already has imaging weights')
        else:
            pt.addImagingColumns(ms)
            result = True
    return result
def run_reprocess(wd=None):
    # by default assume we're in the working directory at this point
    if wd is not None:
        os.chdir(wd)
    update_status(None,'Running')
    solsfile = glob.glob('DDS3_full*smoothed.npz')
    if len(solsfile) < 1:
        die('Cannot find the correct solution file -- exiting')
    solsfile = str(solsfile[0])
    o = options('reprocess-vlow.cfg',option_list)
    cubefiles=['image_full_vlow_QU.cube.dirty.fits','image_full_vlow_QU.cube.dirty.corr.fits']
    cthreads=[]
    flist=[]
    ddf_kw = {}
    do_polcubes('DATA','[DDS3_full_smoothed,DDS3_full_slow]',[o['image_uvmin'],1.600000],'image_full_vlow',ddf_kw,beamsize=o['vlow_psf_arcsec'],imsize=o['vlow_imsize'],cellsize=o['vlow_cell'],robust=o['vlow_robust'],options=o,catcher=None)
    if o['compress_polcubes']:
        for cubefile in cubefiles:
            if o['restart'] and os.path.isfile(cubefile+'.fz'):
                warn('Compressed cube file '+cubefile+'.fz already exists, not starting compression thread')
            else:
                report('Starting compression thread for '+cubefile)
                thread = threading.Thread(target=compress_fits, args=(cubefile,o['fpack_q']))
                thread.start()
                cthreads.append(thread)
                flist.append(cubefile)
    if o['compress_polcubes']:
        # cthreads and flist exist
        for thread in cthreads:
            if thread.isAlive():
                warn('Waiting for a compression thread to finish')
                thread.join()
        if o['delete_compressed']:
            for f in flist:
                warn('Deleting compressed file %s' % f)
                os.remove(f)
    update_status(None,'Complete')
    
    # matching with catalogs
    removelist=[]
    for cat in o['list']:
        print 'Doing catalogue',cat
        if crossmatch_image(o['catprefix'] + '.cat.fits',cat,catdir=o['catdir'])>10:
            filter_catalog(o['catprefix'] + '.cat.fits',o['catprefix']+'.cat.fits_'+cat+'_match.fits',o['pbimage'],o['catprefix']+'.cat.fits_'+cat+'_match_filtered.fits',cat,options=o)
        else:
            print 'Insufficient matches, abandoning catalogue'
            removelist.append(cat)
    for cat in removelist:
        o['list'].remove(cat)

    # Astrometric plots
    if 'FIRST' in o['list']:
        report('Plotting position offsets')
        plot_position_offset('%s.cat.fits_FIRST_match_filtered.fits'%o['catprefix'],o['pbimage'],'%s.cat.fits_FIRST_match_filtered_positions.png'%o['catprefix'],'FIRST',options=o)

        t=Table.read(o['catprefix']+'.cat.fits_FIRST_match_filtered.fits')
        bsra=np.percentile(bootstrap(t['FIRST_dRA'],np.mean,10000),(16,84))
        bsdec=np.percentile(bootstrap(t['FIRST_dDEC'],np.mean,10000),(16,84))
        mdra=np.mean(t['FIRST_dRA'])
        mddec=np.mean(t['FIRST_dDEC'])
        print 'Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (mdra,bsra[0],bsra[1])
        print 'Mean delta DEC is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (mddec,bsdec[0],bsdec[1])
        first_ra=mdra
        first_dec=mddec
        
        report('Plotting per-facet position offsets')
        do_plot_facet_offsets(t,tesselfile,o['catprefix']+'.cat.fits_FIRST_match_filtered_offsets.png')
        t['FIRST_dRA']-=mdra
Exemple #9
0
def main(msin, config_path):
    o = options(config_path, option_list)
    if o['pbimage'] is None:
        die('pbimage must be specified')

    # fix up the new list-type options
    for i, cat in enumerate(o['list']):
        try:
            o[cat] = o['filenames'][i]
        except:
            pass
        try:
            o[cat + '_matchrad'] = o['radii'][i]
        except:
            pass
        try:
            o[cat + '_fluxfactor'] = o['fluxfactor'][i]
        except:
            pass

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # pybdsm source finding
    sfind_image(o['catprefix'], o['pbimage'], o['sfind_pixel_fraction'])

    # matching with catalogs
    for cat in o['list']:
        print 'Doing catalogue', cat
        crossmatch_image(o['catprefix'] + '.cat.fits', cat)
        filter_catalog(o['catprefix'] + '.cat.fits',
                       o['catprefix'] + '.cat.fits_' + cat + '_match.fits',
                       o['pbimage'],
                       o['catprefix'] + '.cat.fits_' + cat +
                       '_match_filtered.fits',
                       cat,
                       options=o)

    # Filter catalogs (only keep isolated compact sources within 3deg of pointing centre)

    # Astrometric plots
    if 'FIRST' in o['list']:
        report('Plotting position offsets')
        plot_position_offset(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_positions.png' % o['catprefix'],
            'FIRST',
            options=o)

        t = Table.read(o['catprefix'] + '.cat.fits_FIRST_match_filtered.fits')
        bsra = np.percentile(bootstrap(t['FIRST_dRA'], np.mean, 10000),
                             (16, 84))
        bsdec = np.percentile(bootstrap(t['FIRST_dDEC'], np.mean, 10000),
                              (16, 84))
        mdra = np.mean(t['FIRST_dRA'])
        mddec = np.mean(t['FIRST_dDEC'])
        print 'Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mdra, bsra[0], bsra[1])
        print 'Mean delta DEC is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
            mddec, bsdec[0], bsdec[1])

        report('Plotting flux ratios')
        # Flux ratio plots (only compact sources)
        plot_flux_ratios(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_fluxerrors.png' % o['catprefix'],
            options=o)

    report('Plotting flux scale comparison')
    # Flux scale comparison plots
    if 'TGSS' in o['list']:
        plot_flux_errors(
            '%s.cat.fits_TGSS_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_TGSS_match_filtered_fluxratio.png' % o['catprefix'],
            'TGSS',
            options=o)
        t = Table.read(o['catprefix'] + '.cat.fits_TGSS_match_filtered.fits')
        ratios = t['Total_flux'] / (t['TGSS_Total_flux'] /
                                    o['TGSS_fluxfactor'])
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/TGSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    if 'NVSS' in o['list']:
        t = Table.read(o['catprefix'] + '.cat.fits_NVSS_match_filtered.fits')
        t = t[t['Total_flux'] > 10e-3]
        ratios = t['Total_flux'] / t['NVSS_Total_flux']
        bsratio = np.percentile(bootstrap(ratios, np.median, 10000), (16, 84))
        print 'Median LOFAR/NVSS ratio is %.3f (1-sigma %.3f -- %.3f)' % (
            np.median(ratios), bsratio[0], bsratio[1])
    # Noise estimate
    hdu = fits.open(o['pbimage'])

    imagenoise = get_rms(hdu)
    print 'An estimate of the image noise is %.3f muJy/beam' % (imagenoise *
                                                                1E6)
    return 0
            filter_catalog(o['catprefix'] + '.cat.fits',
                           o['catprefix'] + '.cat.fits_' + cat + '_match.fits',
                           o['pbimage'],
                           o['catprefix'] + '.cat.fits_' + cat +
                           '_match_filtered.fits',
                           cat,
                           options=o)
        else:
            print 'No matches, abandoning catalogue'
            removelist.append(cat)
    for cat in removelist:
        o['list'].remove(cat)

    # Astrometric plots
    if 'FIRST' in o['list']:
        report('Plotting position offsets')
        plot_position_offset(
            '%s.cat.fits_FIRST_match_filtered.fits' % o['catprefix'],
            o['pbimage'],
            '%s.cat.fits_FIRST_match_filtered_positions.png' % o['catprefix'],
            'FIRST',
            options=o)

        t = Table.read(o['catprefix'] + '.cat.fits_FIRST_match_filtered.fits')
        bsra = np.percentile(bootstrap(t['FIRST_dRA'], np.mean, 10000),
                             (16, 84))
        bsdec = np.percentile(bootstrap(t['FIRST_dDEC'], np.mean, 10000),
                              (16, 84))
        mdra = np.mean(t['FIRST_dRA'])
        mddec = np.mean(t['FIRST_dDEC'])
        print 'Mean delta RA is %.3f arcsec (1-sigma %.3f -- %.3f arcsec)' % (
rootdir = '/data/lofar/mjh'
os.chdir(rootdir)

name = sys.argv[1]
try:
    qsubfile = sys.argv[2]
except:
    qsubfile = '/home/mjh/git/ddf-pipeline/pipeline.qsub'

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if not download_dataset('https://lofar-webdav.grid.sara.nl',
                        '/SKSP/' + name + '/'):
    die('Download failed to get the right number of files')

report('Unpacking data')
unpack()

report('Deleting tar files')
os.system('rm *.tar.gz')

report('Making ms lists')
if make_list():
    report('Submit job')
    os.system('qsub -N ddfp-' + name + ' -v WD=' + rootdir + '/' + name + ' ' +
              qsubfile)
os.chdir(rootdir)

name=sys.argv[1]

if name[0]!='P' and name[0]!='L':
    die('This code should be used only with field or observation names',database=False)

do_field=(name[0]=='P')

try:
    os.mkdir(name)
except OSError:
    warn('Working directory already exists')
    pass
os.chdir(name)
report('Downloading data')
if do_field:
    success=download_field(name)
else:
    success=download_dataset('https://lofar-webdav.grid.sara.nl','/SKSP/'+name+'/')

if not success:
    die('Download failed, see earlier errors',database=False)

    
report('Unpacking data')
unpack()
if do_field:
    unpack_db_update()
    
report('Deleting tar files')
Exemple #13
0
def do_run_pipeline(name, basedir):

    if name[0] != 'P' and name[0] != 'L':
        die('This code should be used only with field or observation names',
            database=False)

    do_field = (name[0] == 'P')

    try:
        qsubfile = sys.argv[2]
    except:
        qsubfile = '/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'

    workdir = basedir + '/' + name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')

    report('Downloading data')
    if do_field:
        success = download_field(name, basedir=basedir)
    else:
        success = download_dataset('https://lofar-webdav.grid.sara.nl',
                                   '/SKSP/' + name + '/',
                                   basedir=basedir)

    if not success:
        die('Download failed, see earlier errors', database=False)

    report('Unpacking data')
    try:
        unpack(workdir=workdir)
    except RuntimeError:
        if do_field:
            update_status(name, 'List failed', workdir=workdir)
        raise
    if do_field:
        update_status(name, 'Unpacked', workdir=workdir)

    report('Deleting tar files')
    os.system('rm ' + workdir + '/*.tar.gz')
    os.system('rm ' + workdir + '/*.tar')

    averaged = False
    report('Checking structure')
    g = glob.glob(workdir + '/*.ms')
    msl = MSList(None, mss=g)
    uobsids = set(msl.obsids)
    for thisobs in uobsids:
        # check one MS with each ID
        for m, ch, o in zip(msl.mss, msl.channels, msl.obsids):
            if o == thisobs:
                channels = len(ch)
                print 'MS', m, 'has', channels, 'channels'
                if channels > 20:
                    update_status(name, 'Averaging', workdir=workdir)
                    print 'Averaging needed for', thisobs, '!'
                    averaged = True
                    average(wildcard=workdir + '/*' + thisobs + '*')
                    os.system('rm -r ' + workdir + '/*' + thisobs +
                              '*pre-cal.ms')
                break

    report('Making ms lists')
    success = make_list(workdir=workdir)
    if do_field:
        list_db_update(success, workdir=workdir)
    if not success:
        die('make_list could not construct the MS list', database=False)

    report('Creating custom config file from template')
    make_custom_config(name, workdir, do_field, averaged)

    # now run the job
    do_run_job(name, basedir=basedir, qsubfile=None, do_field=do_field)
Exemple #14
0
def do_offsets(o):
    # o is the options file

    if o['mode'] != 'normal' and o['mode'] != 'test':
        raise NotImplementedError('Offsets called with mode ' + o['mode'])

    image_root = 'image_full_ampphase_di_m.NS'

    method = o['method']

    report('Determining astrometric offsets with method ' + method +
           ' in mode ' + o['mode'])
    report('Merging downloaded catalogues')
    if os.path.isfile(method + '.fits'):
        warn('Merged file exists, reading from disk instead')
        data = Table.read(method + '.fits')
    else:
        if method == 'pslocal':
            data = Table.read(method + '/' + method + '.txt', format='ascii')
            data['RA'].name = 'ra'
            data['DEC'].name = 'dec'
            data.write(method + '.fits')
        else:
            kwargs = {}
            if 'panstarrs' in method:
                kwargs['rastr'] = 'ramean'
                kwargs['decstr'] = 'decmean'
            data = merge_cat(method, **kwargs)

    if o['mode'] == 'test':
        image_root += '_shift'
        method += '-test'

    report('Running PyBDSM on LOFAR image, please wait...')
    catfile = image_root + '.offset_cat.fits'
    gaulfile = catfile.replace('cat', 'gaul')
    if os.path.isfile(catfile):
        warn('Catalogue already exists, skipping pybdsf run')
    else:
        if o['mode'] == 'test':
            suffix = 'facetRestored'
        else:
            suffix = 'restored'
        pbimage = image_root + '.int.' + suffix + '.fits'
        nonpbimage = image_root + '.app.' + suffix + '.fits'
        img = bdsm.process_image(pbimage,
                                 detection_image=nonpbimage,
                                 thresh_isl=4.0,
                                 thresh_pix=5.0,
                                 rms_box=(150, 15),
                                 rms_map=True,
                                 mean_map='zero',
                                 ini_method='intensity',
                                 adaptive_rms_box=True,
                                 adaptive_thresh=150,
                                 rms_box_bright=(60, 15),
                                 group_by_isl=False,
                                 group_tol=10.0,
                                 output_opts=True,
                                 output_all=True,
                                 atrous_do=False,
                                 flagging_opts=True,
                                 flag_maxsize_fwhm=0.5,
                                 advanced_opts=True,
                                 blank_limit=None)
        img.write_catalog(outfile=catfile,
                          catalog_type='srl',
                          format='fits',
                          correct_proj='True')
        img.write_catalog(outfile=gaulfile,
                          catalog_type='gaul',
                          format='fits',
                          correct_proj='True')

    lofar = Table.read(catfile)
    print len(lofar), 'LOFAR sources before filtering'
    filter = (lofar['E_RA'] * 3600.0) < 2.0
    filter &= (lofar['E_DEC'] * 3600.0) < 2.0
    filter &= (lofar['Maj'] * 3600.0) < 10
    lofar = lofar[filter]
    print len(lofar), 'LOFAR sources after filtering'
    regfile = image_root + '.tessel.reg'
    cra, cdec = get_centpos()
    report('Set up structure')

    NDir = np.load("image_dirin_SSD_m.npy.ClusterCat.npy").shape[0]
    oo = Offsets(method,
                 n=NDir,
                 imroot=image_root,
                 cellsize=o['cellsize'],
                 fitmethod=o['fit'])
    report('Label table')
    lofar_l = oo.r.add_facet_labels(lofar)
    report('Finding offsets')
    oo.find_offsets(lofar_l, data)
    report('Fitting offsets')
    oo.fit_offsets()
    report('Making plots and saving output')
    #oo.plot_fits(method+'-fits.pdf')
    oo.save_fits()
    oo.plot_offsets()
    if 'test' not in o['mode']:
        oo.save(method + '-fit_state.pickle')
        report('Making astrometry error map, please wait')
        oo.make_astrometry_map('astromap.fits', 20)
        oo.offsets_to_facetshift('facet-offset.txt')
Exemple #15
0
def do_offsets(o):
    # o is the options file

    if o['mode']!='normal' and  o['mode']!='test':
        raise NotImplementedError('Offsets called with mode '+o['mode'])

    image_root='image_full_ampphase_di_m.NS'

    method=o['method']

    report('Determining astrometric offsets with method '+method+' in mode '+o['mode'])
    report('Merging downloaded catalogues')
    if os.path.isfile(method+'.fits'):
        warn('Merged file exists, reading from disk instead')
        data=Table.read(method+'.fits')
    else:
        if method=='pslocal':
            data=Table.read(method+'/'+method+'.txt',format='ascii')
            data['RA'].name='ra'
            data['DEC'].name='dec'
            data.write(method+'.fits')
        else:    
            kwargs={}
            if 'panstarrs' in method:
                kwargs['rastr']='ramean'
                kwargs['decstr']='decmean'
            data=merge_cat(method,**kwargs)

    if o['mode']=='test':
        image_root+='_shift'
        method+='-test'

    report('Running PyBDSM on LOFAR image, please wait...')
    catfile=image_root+'.offset_cat.fits'
    gaulfile=catfile.replace('cat','gaul')
    if os.path.isfile(catfile):
        warn('Catalogue already exists, skipping pybdsf run')
    else:
        if o['mode']=='test':
            suffix='facetRestored'
        else:
            suffix='restored'
        pbimage=image_root+'.int.'+suffix+'.fits'
        nonpbimage=image_root+'.app.'+suffix+'.fits'
        img = bdsm.process_image(pbimage, detection_image=nonpbimage, thresh_isl=4.0, thresh_pix=5.0, rms_box=(150,15), rms_map=True, mean_map='zero', ini_method='intensity', adaptive_rms_box=True, adaptive_thresh=150, rms_box_bright=(60,15), group_by_isl=False, group_tol=10.0,output_opts=True, output_all=True, atrous_do=False, flagging_opts=True, flag_maxsize_fwhm=0.5,advanced_opts=True, blank_limit=None)
        img.write_catalog(outfile=catfile,catalog_type='srl',format='fits',correct_proj='True')
        img.write_catalog(outfile=gaulfile,catalog_type='gaul',format='fits',correct_proj='True')

    lofar=Table.read(catfile)
    print len(lofar),'LOFAR sources before filtering'
    filter=(lofar['E_RA']*3600.0)<2.0
    filter&=(lofar['E_DEC']*3600.0)<2.0
    filter&=(lofar['Maj']*3600.0)<10
    lofar=lofar[filter]
    print len(lofar),'LOFAR sources after filtering'
    regfile=image_root+'.tessel.reg'
    cra,cdec=get_centpos()
    report('Set up structure')

    NDir=np.load("image_dirin_SSD_m.npy.ClusterCat.npy").shape[0]
    oo=Offsets(method,n=NDir,imroot=image_root,cellsize=o['cellsize'],fitmethod=o['fit'])
    report('Label table')
    lofar_l=oo.r.add_facet_labels(lofar)
    report('Finding offsets')
    oo.find_offsets(lofar_l,data)
    report('Fitting offsets')
    oo.fit_offsets()
    report('Making plots and saving output')
    #oo.plot_fits(method+'-fits.pdf')
    oo.save_fits()
    oo.plot_offsets()
    if 'test' not in o['mode']:
        oo.save(method+'-fit_state.pickle')
        report('Making astrometry error map, please wait')
        oo.make_astrometry_map('astromap.fits',20)
        oo.offsets_to_facetshift('facet-offset.txt')
Exemple #16
0
def do_run_selfcal(name,basedir,inarchivedir,outarchivedir):
    startdir = os.getcwd()
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    selfcal_status = extractdict['selfcal_status']
    extract_status = extractdict['extract_status'].split(',')
    

    print 'Working on ',name, 'in fields', fields,'current selfcal status',selfcal_status
    
  
    workdir=basedir+'/'+name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')
    print 'In directory', os.getcwd()
    os.chdir(workdir)
    # Update status to running here
    selfcal_status = 'STARTED'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to STARTED for',name
    time.sleep(2.0)
    
    print 'Starting rsync'
    fieldstring = ''

    for fieldid, field in enumerate(fields):
        print field, fields
    
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        sdb.close()
        extract_status = extractdict['extract_status'].split(',')
        
        if extract_status[fieldid] == 'EDONE':
          cmd = '%s/%s/%s/%s_%s*archive*'%(inarchivedir,name,field,field,name)
          observations = check_output('ssh [email protected] ls -d ' + cmd, shell=True)
          print 'ssh [email protected] ls -d ' + cmd
          observations = observations.split('\n')[:-1] # remove last empty item in this list
        else:
          observations = []

        print 'DATA LOCATIONS', observations
        print 'FIELDS', fields
        print 'EXTRACT STATUS', extract_status

        for observation in observations:
            print observation
            report('Copying data from %s'%observation)
        
            #'+ inarchivedir +'/' + name + '/' + field +'/' + field +'_'+name +'.dysco.sub.shift.avg.weights.ms.archive')
            do_rsync_download(observation.split('/')[-1],inarchivedir +'/'+name + '/'+field +'/',workdir)

            fieldstring += observation.split('/')[-1] + ' '
            #'%s_%s.dysco.sub.shift.avg.weights.ms.archive '%(field,name)
    fieldstring = fieldstring[:-1]

    # Update status to copied here
    report('Updating %s status to copied'%name)
    selfcal_status = 'COPIED'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    uvminstr =  str(extractdict['uvmin'])
    uvmin =  extractdict['uvmin']
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    
    
    # Create boxfile
    report('Create ds9 region file for extraction')
    create_ds9_region('%s.ds9.reg'%name,extractdict['ra'],extractdict['decl'],extractdict['size'])
    

    # Run subtract code
    print os.getcwd(), 'working here'
    
    
    if uvmin > 0.0:
       print ('runwsclean.py --uvmin=%s -b  %s.ds9.reg -i %s %s'%(uvminstr,name,name+"_image",fieldstring))
       os.system('runwsclean.py --uvmin=%s -b  %s.ds9.reg -i %s %s'%(uvminstr,name,name+"_image",fieldstring))
    else:    
       print ('runwsclean.py -b  %s.ds9.reg -i %s %s'%(name,name+"_image",fieldstring))
       os.system('runwsclean.py -b  %s.ds9.reg -i %s %s'%(name,name+"_image",fieldstring))

    report('Archiving the results to %s'%outarchivedir)
    os.chdir(workdir)
    f = glob.glob('%s.ds9.tar.gz'%(name))
    do_rsync_upload(name,outarchivedir,f)
    

    # update the database to give success
    selfcal_status = 'SDONE'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to SDONE for',name
Exemple #17
0
def do_run_pipeline(name,basedir,qsubfile=None,do_field=True):
    '''
    set do_field False for the now obsolete behaviour of downloading
    and imaging a particular observation

    '''
    if qsubfile is None:
        qsubfile='/home/mjh/pipeline-master/ddf-pipeline/torque/pipeline.qsub'

    workdir=basedir+'/'+name
    try:
        os.mkdir(workdir)
    except OSError:
        warn('Working directory already exists')

    report('Downloading data')
    if do_field:
        success=download_field(name,basedir=basedir)
    else:
        success=download_dataset('https://lofar-webdav.grid.sara.nl','/SKSP/'+name+'/',basedir=basedir)

    if not success:
        die('Download failed, see earlier errors',database=False)

    report('Unpacking data')
    try:
        unpack(workdir=workdir)
    except RuntimeError:
        if do_field:
            update_status(name,'Unpack failed',workdir=workdir)
        raise
    if do_field:
        update_status(name,'Unpacked',workdir=workdir)

    report('Deleting tar files')
    os.system('rm '+workdir+'/*.tar.gz')
    os.system('rm '+workdir+'/*.tar')

    averaged=False
    report('Checking structure')
    g=glob.glob(workdir+'/*.ms')
    msl=MSList(None,mss=g)
    dysco=np.any(msl.dysco)
    uobsids=set(msl.obsids)
    for thisobs in uobsids:
        # check one MS with each ID
        for m,ch,o,hc in zip(msl.mss,msl.channels,msl.obsids,msl.hascorrected):
            if o==thisobs:
                if not(hc):
                    print('MS',m,'has no corrected_data column, force use of DATA')
                    averaged=True
                channels=len(ch)
                print('MS',m,'has',channels,'channels')
                if channels>20:
                    update_status(name,'Averaging',workdir=workdir)
                    print('Averaging needed for',thisobs,'!')
                    averaged=True
                    average(wildcard=workdir+'/*'+thisobs+'*')
                    os.system('rm -r '+workdir+'/*'+thisobs+'*pre-cal.ms')
                break
    
    report('Making ms lists')
    success=make_list(workdir=workdir)
    if do_field:
        list_db_update(success,workdir=workdir)
    if not success:
        die('make_list could not construct the MS list',database=False)
        
    report('Creating custom config file from template')
    make_custom_config(name,workdir,do_field,averaged)
    
    # now run the job
    do_run_job(name,basedir=basedir,qsubfile=None,do_field=do_field,dysco=dysco)
def do_run_subtract(name,basedir,inarchivedir,outarchivedir,force=False):
    startdir = os.getcwd()
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    extract_status = extractdict['extract_status'].split(',')

    print 'Working on ',name, 'in fields', fields,'which have status',extract_status
    
    for i in range(0,len(fields)):
        os.chdir(startdir)
        if not(extract_status[i] == 'EREADY' or (force and extract_status[i] == 'STARTED')):
            continue
        field = fields[i]

        workdir=basedir+'/'+name
        try:
            os.mkdir(workdir)
        except OSError:
            warn('Working directory already exists')
        print 'In directory', os.getcwd()
        os.chdir(workdir)
        # Update status to running here
        extract_status[i] = 'STARTED'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to STARTED for',field,name
        time.sleep(2.0)
        report('Copying data from %s'%inarchivedir)
        
        # WANT TO MAKE THIS INTO A RSYNC SO THAT IT CAN BE DONE OUTSIDE LEIDEN
        #os.system('cp -r %s/%s %s'%(inarchivedir,field,workdir))
        do_rsync_download(field,inarchivedir,workdir)


        # Update status to copied here
        extract_status[i] = 'COPIED'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to COPIED for',field,name


        # Create boxfile
        create_ds9_region('%s.ds9.reg'%name,extractdict['ra'],extractdict['decl'],extractdict['size'])


        # Run subtract code
        print os.getcwd(), 'working here'
        os.chdir(field)
        print ('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s'%(workdir,name,name))
        result=os.system('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s'%(workdir,name,name))
        if result!=0:
            raise RuntimeError('sub-sources-outside-region.py failed with error code %i' % result)
        
        # Archive the results need an rsync code this is just the *archive file that needs to be archived.
        #os.system('mkdir %s/%s'%(outarchivedir,name))
        #os.system('mkdir %s/%s/%s'%(outarchivedir,name,field))
        os.chdir(workdir)
        f = glob.glob('%s/*.archive*'%(field))
        do_rsync_upload(name,field,f)

        #print  ('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))
        #os.system('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))


        # update the database to give success
        extract_status[i] = 'EDONE'
        sdb=SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing',extractdict)
        sdb.close()
        print 'Updated status to EDONE for',field,name

    # update the database to give selfcal status as SREADY
    selfcal_status = 'SREADY'
    sdb=SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing',extractdict)
    sdb.close()
    print 'Updated status to SREADY for',name
def do_run_subtract(name, basedir, inarchivedir, outarchivedir, force=False):
    startdir = os.getcwd()
    sdb = SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    sdb.close()
    fields = extractdict['fields'].split(',')
    extract_status = extractdict['extract_status'].split(',')
    try:
        bad_pointings = extractdict['bad_pointings'].split(',')
    except AttributeError:
        bad_pointings = ['']
    print('Working on ', name, 'in fields', fields, 'which have status',
          extract_status)

    for i in range(0, len(fields)):
        os.chdir(startdir)
        if not (extract_status[i] == 'EREADY' or
                (force and extract_status[i] == 'STARTED')):
            continue
        field = fields[i]
        if field in bad_pointings:
            print('Field', field,
                  'in bad pointings -- skipping and setting to BADP')
            sdb = SurveysDB()
            extractdict = sdb.get_reprocessing(name)
            extract_status[i] = 'BADP'
            extractdict['extract_status'] = ','.join(extract_status)
            sdb.db_set('reprocessing', extractdict)
            sdb.close()
            continue
        workdir = basedir + '/' + name
        try:
            os.mkdir(workdir)
        except OSError:
            warn('Working directory already exists')
        print('In directory', os.getcwd())
        os.chdir(workdir)
        # Update status to running here
        extract_status[i] = 'STARTED'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to STARTED for', field, name)
        time.sleep(2.0)
        report('Copying data from %s' % inarchivedir)

        # WANT TO MAKE THIS INTO A RSYNC SO THAT IT CAN BE DONE OUTSIDE LEIDEN
        #os.system('cp -r %s/%s %s'%(inarchivedir,field,workdir))
        do_rsync_download(field, inarchivedir, workdir)

        # Update status to copied here
        extract_status[i] = 'COPIED'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to COPIED for', field, name)

        # Create boxfile
        create_ds9_region('%s.ds9.reg' % name, extractdict['ra'],
                          extractdict['decl'], extractdict['size'])

        # Run subtract code
        print(os.getcwd(), 'working here')
        os.chdir(field)
        print('sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s' %
              (workdir, name, name))
        result = os.system(
            'sub-sources-outside-region.py -b %s/%s.ds9.reg -p %s' %
            (workdir, name, name))
        if result != 0:
            raise RuntimeError(
                'sub-sources-outside-region.py failed with error code %i' %
                result)

        # Archive the results need an rsync code this is just the *archive file that needs to be archived.
        #os.system('mkdir %s/%s'%(outarchivedir,name))
        #os.system('mkdir %s/%s/%s'%(outarchivedir,name,field))
        os.chdir(workdir)
        f = glob.glob('%s/*.archive*' % (field))
        do_rsync_upload(name, field, f)

        #print  ('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))
        #os.system('cp -r %s_%s.dysco.sub.shift.avg.weights.ms.archive %s/%s/%s'%(field,name,outarchivedir,name,field))

        # update the database to give success
        extract_status[i] = 'EDONE'
        sdb = SurveysDB()
        extractdict = sdb.get_reprocessing(name)
        extractdict['extract_status'] = ','.join(extract_status)
        sdb.db_set('reprocessing', extractdict)
        sdb.close()
        print('Updated status to EDONE for', field, name)

    # update the database to give selfcal status as SREADY
    selfcal_status = 'SREADY'
    sdb = SurveysDB()
    extractdict = sdb.get_reprocessing(name)
    extractdict['selfcal_status'] = selfcal_status
    sdb.db_set('reprocessing', extractdict)
    sdb.close()
    print('Updated status to SREADY for', name)
Exemple #20
0
                'SolsFile': ddsols,
                'WSize': interval
            }))
    return outsols


def full_clearcache(o):
    clearcache(o['mslist'], o)
    clearcache('temp_mslist.txt', o)
    if o['full_mslist'] is not None:
        clearcache(o['full_mslist'], o)


if __name__ == '__main__':
    # Main loop
    report('Welcome to ddf-pipeline, version ' + __version__)
    if len(sys.argv) < 2:
        warn(
            'pipeline.py must be called with at least one parameter file or a command-line\noption list.\nE.g "pipeline.py example.cfg second_example.cfg --solutions-robust=0.1"\nSee below for a complete list of possible options with their default values.'
        )
        print_options(option_list)
        sys.exit(1)

    o = options(sys.argv[1:], option_list)

    if o['catch_signal']:
        catcher = Catcher()
    else:
        catcher = None

    uvrange = [o['image_uvmin'], o['uvmax']]