コード例 #1
0
def make_mask(imagename, thresh):

    fname = imagename + '.mask.fits'
    runcommand = "MakeMask.py --RestoredIm=%s --Th=%s --Box=50,2" % (imagename,
                                                                     thresh)
    run(runcommand)
    return fname
コード例 #2
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def make_mask(imagename,
              thresh,
              verbose=False,
              options=None,
              external_mask=None,
              catcher=None):
    if catcher: catcher.check()

    # mask_use specifies a mask file to use
    if options is None:
        options = o  # attempt to get global

    fname = imagename + '.mask.fits'
    runcommand = "MakeMask.py --RestoredIm=%s --Th=%s --Box=50,2" % (imagename,
                                                                     thresh)
    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping MakeMask step')
        if verbose:
            print 'Would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('MM-' + imagename + '.log', options=options),
            quiet=options['quiet'])
        if external_mask is not None:
            merge_mask(fname, external_mask, fname)
コード例 #3
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def killms_data(imagename,
                mslist,
                outsols,
                clusterfile=None,
                colname='CORRECTED_DATA',
                niterkf=6,
                dicomodel=None,
                uvrange=None,
                wtuv=None,
                robust=None,
                catcher=None,
                options=None):

    if options is None:
        options = o  # attempt to get global if it exists

    cache_dir = find_cache_dir(options)

    # run killms individually on each MS -- allows restart if it failed in the middle
    filenames = [l.strip() for l in open(mslist, 'r').readlines()]
    for f in filenames:
        if catcher: catcher.check()
        checkname = f + '/killMS.' + outsols + '.sols.npz'
        if options['restart'] and os.path.isfile(checkname):
            warn('Solutions file ' + checkname +
                 ' already exists, not running killMS step')
        else:
            runcommand = "killMS.py --MSName %s --SolverType KAFCA --PolMode Scalar --BaseImageName %s --dt %f --BeamMode LOFAR --LOFARBeamMode=A --NIterKF %i --CovQ 0.1 --LambdaKF=%f --NCPU %i --OutSolsName %s --NChanSols %i --PowerSmooth=%f --InCol %s --DDFCacheDir=%s" % (
                f, imagename, options['dt'], niterkf, options['LambdaKF'],
                options['NCPU_killms'], outsols, options['NChanSols'],
                options['PowerSmooth'], colname, cache_dir)
            if robust is None:
                runcommand += ' --Weighting Natural'
            else:
                runcommand += ' --Weighting Briggs --Robust=%f' % robust
            if uvrange is not None:
                if wtuv is not None:
                    runcommand += ' --WTUV=%f --WeightUVMinMax=%f,%f' % (
                        wtuv, uvrange[0], uvrange[1])
                else:
                    runcommand += ' --UVMinMax=%f,%f' % (uvrange[0],
                                                         uvrange[1])
            if clusterfile is not None:
                runcommand += ' --NodesFile ' + clusterfile
            if dicomodel is not None:
                runcommand += ' --DicoModel ' + dicomodel
            if options['nobar']:
                runcommand += ' --DoBar=0'

            rootfilename = outsols.split('/')[-1]
            f = f.replace("/", "_")
            run(runcommand,
                dryrun=options['dryrun'],
                log=logfilename('KillMS-' + f + '_' + rootfilename + '.log',
                                options=options),
                quiet=options['quiet'])
コード例 #4
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def mask_dicomodel(indico, maskname, outdico, catcher=None):
    if catcher: catcher.check()

    if o['restart'] and os.path.isfile(outdico):
        warn('File ' + outdico +
             ' already exists, skipping MaskDicoModel step')
        return False
    else:
        runcommand = "MaskDicoModel.py --MaskName=%s --InDicoModel=%s --OutDicoModel=%s" % (
            maskname, indico, outdico)
        run(runcommand,
            dryrun=o['dryrun'],
            log=logfilename('MaskDicoModel-' + maskname + '.log'),
            quiet=o['quiet'])
        return True
コード例 #5
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def make_model(maskname, imagename, catcher=None):
    # returns True if the step was run, False if skipped
    if catcher: catcher.check()

    fname = imagename + '.npy'
    if o['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping MakeModel step')
        return False
    else:
        runcommand = "MakeModel.py --MaskName=%s --BaseImageName=%s --NCluster=%i --DoPlot=0" % (
            maskname, imagename, o['ndir'])
        run(runcommand,
            dryrun=o['dryrun'],
            log=logfilename('MakeModel-' + maskname + '.log'),
            quiet=o['quiet'])
        return True
コード例 #6
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def ddf_shift(imagename, shiftfile, catcher=None, options=None, verbose=False):
    if catcher: catcher.check()
    if options is None:
        options = o  # attempt to get global if it exists

    cache_dir = find_cache_dir(options)

    runcommand = 'DDF.py ' + imagename + '.parset --Output-Name=' + imagename + '_shift --Image-Mode=RestoreAndShift --Output-ShiftFacetsFile=' + shiftfile + ' --Predict-InitDicoModel ' + imagename + '.DicoModel --Cache-SmoothBeam=force --Cache-Dir=' + cache_dir

    fname = imagename + '_shift.app.facetRestored.fits'
    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping DDF-shift step')
        if verbose:
            print 'would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('DDF-' + imagename + '_shift.log',
                            options=options),
            quiet=options['quiet'])
コード例 #7
0
def image_vlow(wd=None):
    if wd is not None:
        os.chdir(wd)
    update_status(None, 'Running')
    run('CleanSHM.py')
    run('DDF.py --Output-Name=image_full_vlow_nocut --Data-MS=big-mslist.txt --Deconv-PeakFactor 0.001000 --Data-ColName DATA --Parallel-NCPU=%i --Beam-CenterNorm=1 --Deconv-CycleFactor=0 --Deconv-MaxMinorIter=1000000 --Deconv-MaxMajorIter=2 --Deconv-Mode SSD --Beam-Model=LOFAR --Beam-LOFARBeamMode=A --Weight-Robust -0.20000 --Image-NPix=2000 --CF-wmax 50000 --CF-Nw 100 --Output-Also onNeds --Image-Cell 15.00000 --Facets-NFacets=11 --SSDClean-NEnlargeData 0 --Freq-NDegridBand 1 --Beam-NBand 1 --Facets-DiamMax 1.5 --Facets-DiamMin 0.1 --Deconv-RMSFactor=3.000000 --SSDClean-ConvFFTSwitch 10000 --Data-Sort 1 --Cache-Dir=. --Log-Memory 1 --GAClean-RMSFactorInitHMP 1.000000 --GAClean-MaxMinorIterInitHMP 10000.000000 --GAClean-AllowNegativeInitHMP True --DDESolutions-SolsDir=SOLSDIR --Cache-Weight=reset --Output-Mode=Clean --Output-RestoringBeam 60.000000 --Weight-ColName="IMAGING_WEIGHT" --Freq-NBand=2 --RIME-DecorrMode=FT --SSDClean-SSDSolvePars [S,Alpha] --SSDClean-BICFactor 0 --Mask-Auto=1 --Mask-SigTh=4.00 --DDESolutions-GlobalNorm=None --DDESolutions-DDModeGrid=AP --DDESolutions-DDModeDeGrid=AP --DDESolutions-DDSols=[DDS3_full_smoothed,DDS3_full_slow] --Selection-UVRangeKm=[0.000000,7.0] --GAClean-MinSizeInit=10 --Beam-Smooth=1 --Debug-Pdb=never'
        % getcpus())
    vlowmask = make_mask('image_full_vlow_nocut.app.restored.fits', 3.0)
    run('DDF.py --Output-Name=image_full_vlow_nocut_m --Data-MS=big-mslist.txt --Deconv-PeakFactor 0.001000 --Data-ColName DATA --Parallel-NCPU=%i --Beam-CenterNorm=1 --Deconv-CycleFactor=0 --Deconv-MaxMinorIter=1000000 --Deconv-MaxMajorIter=2 --Deconv-Mode SSD --Beam-Model=LOFAR --Beam-LOFARBeamMode=A --Weight-Robust -0.20000 --Image-NPix=2000 --CF-wmax 50000 --CF-Nw 100 --Output-Also onNeds --Image-Cell 15.00000 --Facets-NFacets=11 --SSDClean-NEnlargeData 0 --Freq-NDegridBand 1 --Beam-NBand 1 --Facets-DiamMax 1.5 --Facets-DiamMin 0.1 --Deconv-RMSFactor=3.000000 --SSDClean-ConvFFTSwitch 10000 --Data-Sort 1 --Cache-Dir=. --Log-Memory 1 --GAClean-RMSFactorInitHMP 1.000000 --GAClean-MaxMinorIterInitHMP 10000.000000 --GAClean-AllowNegativeInitHMP True --DDESolutions-SolsDir=SOLSDIR --Cache-Weight=reset --Output-Mode=Clean --Output-RestoringBeam 60.000000 --Weight-ColName="IMAGING_WEIGHT" --Freq-NBand=2 --RIME-DecorrMode=FT --SSDClean-SSDSolvePars [S,Alpha] --SSDClean-BICFactor 0 --Mask-Auto=1 --Mask-SigTh=3.00 --DDESolutions-GlobalNorm=None --DDESolutions-DDModeGrid=AP --DDESolutions-DDModeDeGrid=AP --DDESolutions-DDSols=[DDS3_full_smoothed,DDS3_full_slow] --Selection-UVRangeKm=[0.000000,7.0] --GAClean-MinSizeInit=10 --Beam-Smooth=1 --Debug-Pdb=never --Predict-InitDicoModel=image_full_vlow_nocut.DicoModel --Mask-External=%s'
        % (getcpus(), vlowmask))
    update_status(None, 'Complete')
コード例 #8
0
def run_wsclean(useIDG=False,
                stringMultiscaleScales="0,4,8,16,32,64,128,256",
                stringMultiscaleScalesIDG="0,4,8,16,32,64",
                sizePixels=2000,
                taperGaussian=60.0,
                beamsize=60.0,
                scale=15.0,
                numberOfSubbands=6,
                uvDistanceMinLambda=0,
                IDGMode="cpu",
                name='WSCLEAN_low'):

    update_status(None, 'WSCLEAN')
    g = glob.glob('*.archive0')
    stringMSs = ' '.join(g)
    print("Imaging with multiscale CLEAN (at low-resolution)...")
    if (useIDG):
        # Generate IDG configuration file.
        print("Generating IDG configuration file...")
        with open("aconfig.txt", "w") as configurationFile:
            configurationFile.write(
                "aterms=[beam]\nbeam.differential = true\nbeam.update_interval = 600\nbeam.usechannelfreq = true"
            )

        command = "wsclean -no-update-model-required -size " + str(
            sizePixels
        ) + " " + str(
            sizePixels
        ) + " -reorder -weight briggs -0.5 -weighting-rank-filter 3 -clean-border 1 -mgain 0.8 -no-fit-beam -data-column DATA -join-channels -channels-out " + str(
            numberOfSubbands
        ) + " -padding 1.2 -multiscale -multiscale-scales " + stringMultiscaleScalesIDG + " -auto-mask 3.0 -auto-threshold 2.5 -taper-gaussian " + str(
            taperGaussian
        ) + "arcsec -circular-beam -beam-size " + str(
            beamsize
        ) + "arcsec -pol i -name " + name + " -scale " + str(
            scale
        ) + "arcsec -niter 100000 -minuv-l " + str(
            uvDistanceMinLambda
        ) + " -use-idg -idg-mode " + IDGMode + " -aterm-kernel-size 16 -aterm-config aconfig.txt " + stringMSs
    else:
        command = "wsclean -no-update-model-required -apply-primary-beam -use-differential-lofar-beam -size " + str(
            sizePixels
        ) + " " + str(
            sizePixels
        ) + " -reorder -weight briggs -0.5 -weighting-rank-filter 3 -clean-border 1 -mgain 0.8 -no-fit-beam -data-column DATA -join-channels -channels-out " + str(
            numberOfSubbands
        ) + " -padding 1.2 -multiscale -multiscale-scales " + stringMultiscaleScales + " -auto-mask 3.0 -auto-threshold 2.5 -taper-gaussian " + str(
            taperGaussian) + "arcsec -circular-beam -beam-size " + str(
                beamsize) + "arcsec -pol i -name " + name + " -scale " + str(
                    scale) + "arcsec -niter 100000 -minuv-l " + str(
                        uvDistanceMinLambda
                    ) + " -baseline-averaging 10.0 " + stringMSs

    print(command)
    run(command)
    print('Now correcting for beam...')

    f = 'image_full_vlow_nocut_m.app.restored.fits'
    dapp = fits.open(f)
    dint = fits.open(f.replace('app', 'int'))
    beam = dint[0].data / dapp[0].data
    wsuc = fits.open('WSCLEAN_low-MFS-image.fits')
    wsuc[0].data *= beam[0, 0, 12:-13, 12:-13]
    wsuc.writeto('WSCLEAN_low-MFS-image-int.fits')

    update_status(None, 'WSCLEAN complete')
コード例 #9
0
def subtract_sources():
    update_status(None, 'Subtracting')
    run('sub-sources-outside-region.py -b fullfield -p SUB -c DATA --uselowres -t 8 -f 5'
        )
    update_status(None, 'Subtracted')
コード例 #10
0
    #       os.system(cmd)

    # Apparently it can be dangerous to remove a column for tiled storagemanagers, comment out!
    #for ms in msfiles:
    #  removecolumn(ms, 'PREDICT_SUB')
    #  removecolumn(ms, 'DATA_SUB')

    os.system('rm -f ' + outdico)  # clean up
    os.system('rm -f ' + outmask)

    if boxfile != 'fullfield':
        mask_region(fullmask, boxfile, outmask)
    else:
        outmask = fullmask

    run("MaskDicoModel.py --MaskName=%s --InDicoModel=%s --OutDicoModel=%s" %
        (outmask, indico, outdico))

    #if uselowres == False:
    ##imagenpix = 20000
    #robust=-0.5
    #imagecell = 1.5
    #else:
    ##imagenpix = 6000
    #robust = -0.25
    #imagecell = 4.5
    if holesfixed:
        print('Starting DDF for prediction')
        if args['h5sols'] != None:
            run("DDF.py --Output-Name=image_dd_SUB --Data-ChunkHours=" +
                str(args['chunkhours']) + " --Data-MS=" + args['mslist'] +
                " --Deconv-PeakFactor 0.001000 --Data-ColName " +
コード例 #11
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
def ddf_image(imagename,
              mslist,
              cleanmask=None,
              cleanmode='HMP',
              ddsols=None,
              applysols=None,
              threshold=None,
              majorcycles=3,
              use_dicomodel=False,
              robust=0,
              beamsize=None,
              beamsize_minor=None,
              beamsize_pa=None,
              reuse_psf=False,
              reuse_dirty=False,
              verbose=False,
              saveimages=None,
              imsize=None,
              cellsize=None,
              uvrange=None,
              colname='CORRECTED_DATA',
              peakfactor=0.1,
              dicomodel_base=None,
              options=None,
              do_decorr=None,
              normalization=None,
              dirty_from_resid=False,
              clusterfile=None,
              HMPsize=None,
              automask=True,
              automask_threshold=10.0,
              smooth=False,
              noweights=False,
              cubemode=False,
              apply_weights=True,
              catcher=None,
              rms_factor=3.0):

    if catcher: catcher.check()

    # saveimages lists _additional_ images to save
    if saveimages is None:
        saveimages = ''
    saveimages += 'onNeds'
    if options is None:
        options = o  # attempt to get global if it exists

    if HMPsize is None:
        HMPsize = options['HMPsize']
    if do_decorr is None:
        do_decorr = options['do_decorr']
    if beamsize is None:
        beamsize = options['psf_arcsec']
    if imsize is None:
        imsize = options['imsize']
    if cellsize is None:
        cellsize = options['cellsize']

    cache_dir = find_cache_dir(options)

    if majorcycles > 0:
        fname = imagename + '.app.restored.fits'
    else:
        fname = imagename + '.dirty.fits'

    runcommand = "DDF.py --Output-Name=%s --Data-MS=%s --Deconv-PeakFactor %f --Data-ColName %s --Parallel-NCPU=%i --Image-Mode=Clean --Beam-CenterNorm=1 --Deconv-CycleFactor=0 --Deconv-MaxMinorIter=1000000 --Deconv-MaxMajorIter=%s --Deconv-Mode %s --Beam-Model=LOFAR --Beam-LOFARBeamMode=A --Weight-Robust %f --Image-NPix=%i --CF-wmax 50000 --CF-Nw 100 --Output-Also %s --Image-Cell %f --Facets-NFacets=11 --SSDClean-NEnlargeData 0 --Freq-NDegridBand 1 --Beam-NBand 1 --Facets-DiamMax 1.5 --Facets-DiamMin 0.1 --Deconv-RMSFactor=%f --Data-Sort 1 --Cache-Dir=%s" % (
        imagename, mslist, peakfactor, colname, options['NCPU_DDF'],
        majorcycles, cleanmode, robust, imsize, saveimages, float(cellsize),
        rms_factor, cache_dir)

    if beamsize_minor is not None:
        runcommand += ' --Output-RestoringBeam %f,%f,%f' % (
            beamsize, beamsize_minor, beamsize_pa)
    elif beamsize is not None:
        runcommand += ' --Output-RestoringBeam %f' % (beamsize)

    if apply_weights:
        runcommand += ' --Weight-ColName="IMAGING_WEIGHT"'
    else:
        runcommand += ' --Weight-ColName="None"'

    if cubemode:
        channels = len(open(mslist).readlines())
        runcommand += ' --Output-Cubes I --Freq-NBand=%i' % channels
    else:
        runcommand += ' --Freq-NBand=2'

    if do_decorr:
        runcommand += ' --RIME-DecorrMode=FT'

    if cleanmode == 'SSD':
        runcommand += ' --SSDClean-SSDSolvePars [S,Alpha] --SSDClean-BICFactor 0'
    if clusterfile is not None:
        runcommand += ' --Facets-CatNodes=%s' % clusterfile
    if automask:
        runcommand += ' --Mask-Auto=1 --Mask-SigTh=%.2f' % automask_threshold
    if cleanmask is not None:
        runcommand += ' --Mask-External=%s' % cleanmask
    if applysols is not None:
        if normalization is not None:
            if normalization[:3] == 'Abs':
                normalization = 'Mean' + normalization  # backward compat. hack
            runcommand += ' --DDESolutions-GlobalNorm=' + normalization
        runcommand += ' --DDESolutions-DDModeGrid=%s --DDESolutions-DDModeDeGrid=%s --DDESolutions-DDSols=%s' % (
            applysols, applysols, ddsols)
    if use_dicomodel:
        if dicomodel_base is not None:
            runcommand += ' --Predict-InitDicoModel=%s.DicoModel' % dicomodel_base
        else:
            raise RuntimeError(
                'use_dicomodel is set but no dicomodel supplied')
    if threshold is not None:
        runcommand += ' --Deconv-FluxThreshold=%f' % threshold
    if uvrange is not None:
        runcommand += ' --Selection-UVRangeKm=[%f,%f]' % (uvrange[0],
                                                          uvrange[1])
    if dirty_from_resid and reuse_dirty:
        raise RuntimeError('Cannot combine reuse_dirty and dirty_from_resid')
    if dirty_from_resid:
        # possible that crashes could destroy the cache, so need to check
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/LastResidual'):
            runcommand += ' --Cache-Dirty forceresidual'
    if reuse_dirty:
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/Dirty'):
            runcommand += ' --Cache-Dirty forcedirty'
    if reuse_psf:
        if os.path.exists(cache_dir + '/' + mslist + '.ddfcache/PSF'):
            runcommand += ' --Cache-PSF force'

    if HMPsize is not None:
        runcommand += ' --SSDClean-MinSizeInitHMP=%i' % HMPsize

    if options['nobar']:
        runcommand += ' --Log-Boring=1'

    if smooth:
        runcommand += ' --Beam-Smooth=1'

    if options['restart'] and os.path.isfile(fname):
        warn('File ' + fname + ' already exists, skipping DDF step')
        if verbose:
            print 'would have run', runcommand
    else:
        run(runcommand,
            dryrun=options['dryrun'],
            log=logfilename('DDF-' + imagename + '.log', options=options),
            quiet=options['quiet'])
コード例 #12
0
ファイル: pipeline.py プロジェクト: alexmatze/ddf-pipeline
    uvrange = [o['image_uvmin'], o['uvmax']]
    killms_uvrange = [0, 1000]
    if o['solutions_uvmin'] is not None:
        killms_uvrange[0] = o['solutions_uvmin']
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # Set column name for first steps
    colname = o['colname']

    # Clear the shared memory
    run('CleanSHM.py', dryrun=o['dryrun'])

    # Check imaging weights -- needed before DDF
    new = check_imaging_weight(o['mslist'])

    if o['clearcache'] or new or o['redofrom']:
        # Clear the cache, we don't know where it's been. If this is a
        # completely new dataset it is always safe (and required) to
        # clear the cache -- solves problems where the cache is not
        # stored per dataset. If we are redoing, cache needs to be removed
        full_clearcache(o)

    if o['redofrom']:

        if not os.path.isdir(o['archive_dir']):
            os.mkdir(o['archive_dir'])
コード例 #13
0
def run_bootstrap(o):

    colname = 'DATA_DI_CORRECTED'

    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn(
            "You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located"
        )
        sys.exit(2)

    o["tgss"] = o["tgss"].replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"] = [
        l.replace("$$", os.environ["DDF_PIPELINE_CATALOGS"])
        for l in o["catalogues"]
    ]
    lCat = o["catalogues"] + [o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist" % fCat)
            sys.exit(2)

    cl = len(o['catalogues'])
    if o['names'] is None:
        o['names'] = [
            os.path.basename(x).replace('.fits', '') for x in o['catalogues']
        ]
    if o['radii'] is None:
        o['radii'] = [10] * cl
    if o['groups'] is None:
        o['groups'] = range(cl)
    if (len(o['frequencies']) != cl or len(o['radii']) != cl
            or len(o['names']) != cl or len(o['groups']) != cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list'
            )

    low_uvrange = [o['image_uvmin'], 2.5 * 206.0 / o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize = o['low_imsize']  # allow over-ride
    else:
        low_imsize = o['imsize'] * o['cellsize'] / o['low_cell']

    low_robust = o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py', dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m = MSList(o['mslist'])
    Uobsid = set(m.obsids)

    for obsid in Uobsid:

        warn('Running bootstrap for obsid %s' % obsid)

        freqs = []
        omslist = []
        for ms, ob, f in zip(m.mss, m.obsids, m.freqs):
            if ob == obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs) < 4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme'
                )

        # sort to work in frequency order

        freqs, omslist = (list(x) for x in zip(
            *sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f, ms in zip(freqs, omslist):
            print ms, f

        # generate the sorted input mslist
        with open('temp_mslist.txt', 'w') as f:
            for line in omslist:
                f.write(line + '\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_' + obsid + '_init',
                  'temp_mslist.txt',
                  cleanmask=None,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=0,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=True)
        external_mask = 'bootstrap_external_mask.fits'
        make_external_mask(external_mask,
                           'image_bootstrap_' + obsid + '_init.dirty.fits',
                           use_tgss=True,
                           clobber=False,
                           cellsize='low_cell',
                           options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_' + obsid,
                  'temp_mslist.txt',
                  cleanmask=external_mask,
                  reuse_psf=True,
                  reuse_dirty=True,
                  cleanmode='SSD',
                  ddsols='DDS0',
                  applysols='P',
                  majorcycles=5,
                  robust=low_robust,
                  uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,
                  cellsize=o['low_cell'],
                  options=o,
                  colname=colname,
                  automask=True,
                  automask_threshold=15,
                  smooth=True,
                  cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_' + obsid +
                          '.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img = bdsm.process_image('image_bootstrap_' + obsid +
                                     '.cube.int.restored.fits',
                                     thresh_pix=5,
                                     rms_map=True,
                                     atrous_do=True,
                                     atrous_jmax=2,
                                     group_by_isl=True,
                                     rms_box=(80, 20),
                                     adaptive_rms_box=True,
                                     adaptive_thresh=80,
                                     rms_box_bright=(35, 7),
                                     mean_map='zero',
                                     spectralindex_do=True,
                                     specind_maxchan=1,
                                     debug=True,
                                     kappa_clip=3,
                                     flagchan_rms=False,
                                     flagchan_snr=False,
                                     incl_chan=True,
                                     spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',
                              format='ascii',
                              incl_chan='true')
            img.export_image(img_type='rms', img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid + 'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0] + '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra < 0):
                ra += 2 * np.pi
            ra *= 180.0 / np.pi
            dec *= 180.0 / np.pi

            cats = zip(o['catalogues'], o['names'], o['groups'], o['radii'])
            make_catalogue('image_bootstrap_' + obsid +
                           '.cube.int.restored.pybdsm.srl',
                           ra,
                           dec,
                           2.5,
                           cats,
                           outnameprefix=obsid)

        freqlist = open(obsid + 'frequencies.txt', 'w')
        for n, f in zip(o['names'], o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' %
                           (f, n, n))
        for i, f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' %
                           (f, i + 1, i + 1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid + 'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject = -1  # avoid error if we fail somewhere
        if os.path.isfile(obsid + 'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject = find_outliers.run_all(1, name=obsid)

        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
            if nreject == 0:
                shutil.copyfile(obsid + 'crossmatch-results-1.npy',
                                obsid + 'crossmatch-results-2.npy')
        if os.path.isfile(obsid + 'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not (o['dryrun']):
            warn('Applying corrections to MS list')
            scale = np.load(obsid + 'crossmatch-results-2.npy')[:, 0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)

            bigmslist = [s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]

            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy = t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy = None
                t.close()
                if dummy is not None:
                    warn('Table ' + ms +
                         ' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms + '/SPECTRAL_WINDOW',
                                 readonly=True,
                                 ack=False)
                    frq = t[0]['REF_FREQUENCY']
                    factor = spl(frq)
                    print frq, factor
                    t = pt.table(ms, readonly=False)
                    desc = t.getcoldesc(o['colname'])
                    desc['name'] = 'SCALED_DATA'
                    t.addcols(desc)
                    d = t.getcol(o['colname'])
                    d *= factor
                    t.putcol('SCALED_DATA', d)
                    try:
                        dummy = t.getcoldesc(colname)
                    except RuntimeError:
                        dummy = None
                    if dummy is not None:
                        desc = t.getcoldesc(colname)
                        newname = colname + '_SCALED'
                        desc['name'] = newname
                        t.addcols(desc)
                        d = t.getcol(colname)
                        d *= factor
                        t.putcol(newname, d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus = []
        for obsid in Uobsid:
            hdus.append(
                fits.open('image_bootstrap_' + obsid + '.app.restored.fits'))
        for i in range(1, len(Uobsid)):
            hdus[0][0].data += hdus[i][0].data
        hdus[0][0].data /= len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
コード例 #14
0
ファイル: bootstrap.py プロジェクト: mhardcastle/ddf-pipeline
def run_bootstrap(o):
    
    colname='DATA_DI_CORRECTED'
    
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    if "DDF_PIPELINE_CATALOGS" not in os.environ.keys():
        warn("You need to define the environment variable DDF_PIPELINE_CATALOGS where your catalogs are located")
        sys.exit(2)

    o["tgss"]=o["tgss"].replace("$$",os.environ["DDF_PIPELINE_CATALOGS"])
    o["catalogues"]=[l.replace("$$",os.environ["DDF_PIPELINE_CATALOGS"]) for l in o["catalogues"]]
    lCat=o["catalogues"]+[o["tgss"]]
    for fCat in lCat:
        if not os.path.isfile(fCat):
            warn("Catalog %s does not exist"%fCat)
            sys.exit(2)

    cl=len(o['catalogues'])
    if o['names'] is None:
        o['names']=[os.path.basename(x).replace('.fits','') for x in o['catalogues']]
    if o['radii'] is None:
        o['radii']=[10]*cl
    if o['groups'] is None:
        o['groups']=range(cl)
    if (len(o['frequencies'])!=cl or len(o['radii'])!=cl or
        len(o['names'])!=cl or len(o['groups'])!=cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list')

    low_uvrange=[o['image_uvmin'],2.5*206.0/o['low_psf_arcsec']]
    if o['low_imsize'] is not None:
        low_imsize=o['low_imsize'] # allow over-ride
    else:
        low_imsize=o['imsize']*o['cellsize']/o['low_cell']

    low_robust=o['low_robust']

    # Clear the shared memory
    run('CleanSHM.py',dryrun=o['dryrun'])

    # We use the individual ms in mslist.
    m=MSList(o['mslist'])
    Uobsid = set(m.obsids)
    
    for obsid in Uobsid:
        
        warn('Running bootstrap for obsid %s' % obsid)

        freqs=[]
        omslist=[]
        for ms,ob,f in zip(m.mss,m.obsids,m.freqs):
            if ob==obsid:
                omslist.append(ms)
                freqs.append(f)

        if len(freqs)<4:
            die('Not enough frequencies to bootstrap. Check your mslist or MS naming scheme')

        # sort to work in frequency order

        freqs,omslist = (list(x) for x in zip(*sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f,ms in zip(freqs,omslist):
            print ms,f

        # generate the sorted input mslist
        with open('temp_mslist.txt','w') as f:
            for line in omslist:
                f.write(line+'\n')

        # Clean in cube mode
        # As for the main pipeline, first make a dirty map
        ddf_image('image_bootstrap_'+obsid+'_init','temp_mslist.txt',
                  cleanmask=None,cleanmode='SSD',ddsols='DDS0',
                  applysols='P',majorcycles=0,robust=low_robust,
                  uvrange=low_uvrange,beamsize=o['low_psf_arcsec'],
                  imsize=low_imsize,cellsize=o['low_cell'],
                  options=o,colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=True)
        external_mask='bootstrap_external_mask.fits'
        make_external_mask(external_mask,'image_bootstrap_'+obsid+'_init.dirty.fits',use_tgss=True,clobber=False,cellsize='low_cell',options=o)
        # Deep SSD clean with this external mask and automasking
        ddf_image('image_bootstrap_'+obsid,'temp_mslist.txt',
                  cleanmask=external_mask,reuse_psf=True,reuse_dirty=True,
                  cleanmode='SSD',ddsols='DDS0',applysols='P',
                  majorcycles=5,robust=low_robust,uvrange=low_uvrange,
                  beamsize=o['low_psf_arcsec'],imsize=low_imsize,
                  cellsize=o['low_cell'],options=o,
                  colname=colname,automask=True,
                  automask_threshold=15,smooth=True,cubemode=True,
                  conditional_clearcache=False)

        if os.path.isfile('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img=bdsm.process_image('image_bootstrap_'+obsid+'.cube.int.restored.fits',thresh_pix=5,rms_map=True,atrous_do=True,atrous_jmax=2,group_by_isl=True,rms_box=(80,20), adaptive_rms_box=True, adaptive_thresh=80, rms_box_bright=(35,7),mean_map='zero',spectralindex_do=True,specind_maxchan=1,debug=True,kappa_clip=3,flagchan_rms=False,flagchan_snr=False,incl_chan=True,spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',format='ascii',incl_chan='true')
            img.export_image(img_type='rms',img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid+'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0]+ '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra<0):
                ra+=2*np.pi
            ra*=180.0/np.pi
            dec*=180.0/np.pi

            cats=zip(o['catalogues'],o['names'],o['groups'],o['radii'])
            make_catalogue('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl',ra,dec,2.5,cats,outnameprefix=obsid)
    
        freqlist=open(obsid+'frequencies.txt','w')
        for n,f in zip(o['names'],o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' % (f,n,n))
        for i,f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' % (f,i+1,i+1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid+'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject=-1 # avoid error if we fail somewhere
        if os.path.isfile(obsid+'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject=find_outliers.run_all(1, name=obsid)
    
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
          if nreject==0:
              shutil.copyfile(obsid+'crossmatch-results-1.npy',obsid+'crossmatch-results-2.npy')
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not(o['dryrun']):
            warn('Applying corrections to MS list')
            scale=np.load(obsid+'crossmatch-results-2.npy')[:,0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)
            
            bigmslist=[s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]
            
            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy=t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy=None
                t.close()
                if dummy is not None:
                    warn('Table '+ms+' has already been corrected, skipping')
                else:
                    # in this version we need to scale both the original data and the data in colname
                    t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
                    frq=t[0]['REF_FREQUENCY']
                    factor=spl(frq)
                    print frq,factor
                    t=pt.table(ms,readonly=False)
                    desc=t.getcoldesc(o['colname'])
                    desc['name']='SCALED_DATA'
                    t.addcols(desc)
                    d=t.getcol(o['colname'])
                    d*=factor
                    t.putcol('SCALED_DATA',d)
                    try:
                        dummy=t.getcoldesc(colname)
                    except RuntimeError:
                        dummy=None
                    if dummy is not None:
                        desc=t.getcoldesc(colname)
                        newname=colname+'_SCALED'
                        desc['name']=newname
                        t.addcols(desc)
                        d=t.getcol(colname)
                        d*=factor
                        t.putcol(newname,d)

                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus=[]
        for obsid in Uobsid:
            hdus.append(fits.open('image_bootstrap_'+obsid+'.app.restored.fits'))
        for i in range(1,len(Uobsid)):
            hdus[0][0].data+=hdus[i][0].data
        hdus[0][0].data/=len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
コード例 #15
0
ファイル: bootstrap.py プロジェクト: alexmatze/ddf-pipeline
def run_bootstrap(o):
    
    if o['mslist'] is None:
        die('MS list must be specified')

    if o['logging'] is not None and not os.path.isdir(o['logging']):
        os.mkdir(o['logging'])

    # check the data supplied
    if o['frequencies'] is None or o['catalogues'] is None:
        die('Frequencies and catalogues options must be specified')

    cl=len(o['catalogues'])
    if o['names'] is None:
        o['names']=[os.path.basename(x).replace('.fits','') for x in o['catalogues']]
    if o['radii'] is None:
        o['radii']=[10]*cl
    if o['groups'] is None:
        o['groups']=range(cl)
    if (len(o['frequencies'])!=cl or len(o['radii'])!=cl or
        len(o['names'])!=cl or len(o['groups'])!=cl):
        die('Names, groups, radii and frequencies entries must be the same length as the catalogue list')

    low_robust=-0.25
    low_uvrange=[0.1,25.0]

    # Clear the shared memory
    run('CleanSHM.py',dryrun=o['dryrun'])


    # We use the individual ms in mslist.
    mslist=[s.strip() for s in open(o['mslist']).readlines()]
    
    obsids = [os.path.basename(ms).split('_')[0] for ms in mslist]
    Uobsid = set(obsids)
    
    for obsid in Uobsid:
        
        warn('Running bootstrap for obsid %s' % obsid)

        # Get the frequencies -- need to take this from the MSs
        
        omslist = [ms for ms in mslist if obsid in ms]

        freqs=[]
        for ms in omslist:
            t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
            freqs.append(t[0]['REF_FREQUENCY'])


        # sort to work in frequency order

        freqs,omslist = (list(x) for x in zip(*sorted(zip(freqs, omslist), key=lambda pair: pair[0])))

        for f,m in zip(freqs,omslist):
            print m,f


        # Clean in cube mode
        with open('temp_mslist.txt','w') as f:
            for line in omslist:
                f.write(line+'\n')
        ddf_image('image_bootstrap_'+obsid,'temp_mslist.txt',cleanmode='SSD',ddsols='killms_p1',applysols='P',majorcycles=4,robust=low_robust,uvrange=low_uvrange,beamsize=20,imsize=o['bsimsize'],cellsize=o['bscell'],options=o,colname=o['colname'],automask=True,automask_threshold=15,smooth=True,cubemode=True)

        if os.path.isfile('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl'):
            warn('Source list exists, skipping source extraction')
        else:
            warn('Running PyBDSM, please wait...')
            img=bdsm.process_image('image_bootstrap_'+obsid+'.cube.int.restored.fits',thresh_pix=5,rms_map=True,atrous_do=True,atrous_jmax=2,group_by_isl=True,rms_box=(80,20), adaptive_rms_box=True, adaptive_thresh=80, rms_box_bright=(35,7),mean_map='zero',spectralindex_do=True,specind_maxchan=1,debug=True,kappa_clip=3,flagchan_rms=False,flagchan_snr=False,incl_chan=True,spline_rank=1)
            # Write out in ASCII to work round bug in pybdsm
            img.write_catalog(catalog_type='srl',format='ascii',incl_chan='true')
            img.export_image(img_type='rms',img_format='fits')

        from make_fitting_product import make_catalogue
        import fitting_factors
        import find_outliers

        # generate the fitting product
        if os.path.isfile(obsid+'crossmatch-1.fits'):
            warn('Crossmatch table exists, skipping crossmatch')
        else:
            t = pt.table(omslist[0]+ '/FIELD', readonly=True, ack=False)
            direction = t[0]['PHASE_DIR']
            ra, dec = direction[0]

            if (ra<0):
                ra+=2*np.pi
            ra*=180.0/np.pi
            dec*=180.0/np.pi

            cats=zip(o['catalogues'],o['names'],o['groups'],o['radii'])
            make_catalogue('image_bootstrap_'+obsid+'.cube.int.restored.pybdsm.srl',ra,dec,2.5,cats,outnameprefix=obsid)
    
        freqlist=open(obsid+'frequencies.txt','w')
        for n,f in zip(o['names'],o['frequencies']):
            freqlist.write('%f %s_Total_flux %s_E_Total_flux False\n' % (f,n,n))
        for i,f in enumerate(freqs):
            freqlist.write('%f Total_flux_ch%i E_Total_flux_ch%i True\n' % (f,i+1,i+1))
        freqlist.close()

        # Now call the fitting code

        if os.path.isfile(obsid+'crossmatch-results-1.npy'):
            warn('Results 1 exists, skipping first fit')
        else:
            fitting_factors.run_all(1, name=obsid)

        nreject=-1 # avoid error if we fail somewhere
        if os.path.isfile(obsid+'crossmatch-2.fits'):
            warn('Second crossmatch exists, skipping outlier rejection')
        else:
            nreject=find_outliers.run_all(1, name=obsid)
    
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping second fit')
        else:
          if nreject==0:
              shutil.copyfile(obsid+'crossmatch-results-1.npy',obsid+'crossmatch-results-2.npy')
        if os.path.isfile(obsid+'crossmatch-results-2.npy'):
            warn('Results 2 exists, skipping first fit')
        else:
            fitting_factors.run_all(2, name=obsid)

        # Now apply corrections

        if o['full_mslist'] is None:
            die('Need big mslist to apply corrections')
        if not(o['dryrun']):
            warn('Applying corrections to MS list')
            scale=np.load(obsid+'crossmatch-results-2.npy')[:,0]
            # InterpolatedUS gives us linear interpolation between points
            # and extrapolation outside it
            spl = InterpolatedUnivariateSpline(freqs, scale, k=1)
            
            bigmslist=[s.strip() for s in open(o['full_mslist']).readlines()]
            obigmslist = [ms for ms in bigmslist if obsid in ms]
            
            for ms in obigmslist:
                t = pt.table(ms)
                try:
                    dummy=t.getcoldesc('SCALED_DATA')
                except RuntimeError:
                    dummy=None
                t.close()
                if dummy is not None:
                    warn('Table '+ms+' has already been corrected, skipping')
                else:
                    t = pt.table(ms+'/SPECTRAL_WINDOW', readonly=True, ack=False)
                    frq=t[0]['REF_FREQUENCY']
                    factor=spl(frq)
                    print frq,factor
                    t=pt.table(ms,readonly=False)
                    desc=t.getcoldesc(o['colname'])
                    desc['name']='SCALED_DATA'
                    t.addcols(desc)
                    d=t.getcol(o['colname'])
                    d*=factor
                    t.putcol('SCALED_DATA',d)
                    t.close()
    if os.path.isfile('image_bootstrap.app.mean.fits'):
        warn('Mean bootstrap image exists, not creating it')
    else:
        warn('Creating mean bootstrap image')
        hdus=[]
        for obsid in Uobsid:
            hdus.append(fits.open('image_bootstrap_'+obsid+'.app.restored.fits'))
        for i in range(1,len(Uobsid)):
            hdus[0][0].data+=hdus[i][0].data
        hdus[0][0].data/=len(Uobsid)
        hdus[0].writeto('image_bootstrap.app.mean.fits')
コード例 #16
0
def compress_fits(filename,q):
    command='fpack -q %i %s' % (q,filename)
    run(command)