Ejemplo n.º 1
0
    def testSingularity(self):
        global INPUT, MSDIR, OUTPUT, MS, MS_SIM

        # Start stimela Recipe instance
        pipeline = stimela.Recipe(
            "Singularity Test",  # Recipe name
            ms_dir=MSDIR,
            singularity_image_dir=os.environ["SINGULARITY_PULLFOLDER"],
        )

        pipeline.add(
            "cab/simms",  # Executor image to start container from
            "simms_example",  # Container name
            {  # Parameters to parse to executor container
                "msname": MS_SIM,
                "telescope": "kat-7",  # Telescope name
                "direction":
                "J2000,0deg,-30deg",  # Phase tracking centre of observation
                "synthesis": 0.128,  # Synthesis time of observation
                "dtime": 10,  # Integration time in seconds
                "freq0": "750MHz",  # Start frequency of observation
                "dfreq": "1MHz",  # Channel width
                "nchan": 1  # Number of channels
            },
            input=INPUT,  # Input folder
            output=OUTPUT,  # Output folder
            label="Creating MS",  # Process label
            cpus=2.5,
            memory_limit="2gb",
            time_out=300)

        # Run recipe. The 'steps' added above will be executed in the sequence that they were adde. The 'steps' added above will be
        # executed in the sequence that they were added
        pipeline.run()
Ejemplo n.º 2
0
    def test_udocker(self):
        import sys
        global MSDIR
        global INPUT
        global OUTPUT
        global UDOCKER
        if UDOCKER is False:
            return

        stimela.register_globals()
        rrr = stimela.Recipe("singularitypaths",
                             ms_dir=MSDIR,
                             JOB_TYPE="udocker",
                             cabpath="cab/",
                             log_dir="logs")
        assert os.path.exists(MSDIR)
        rrr.add("cab/custom", "test1", {
            "bla1": "a", # only accepts a, b or c
            "bla5": ["testinput2.txt:input",
                     "testinput3.txt:msfile",
                     spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
        }, input=INPUT, output=OUTPUT)
        rrr.run() #validate and run
        assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"], 
                    "testinput2.txt")
        assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
                    "testinput3.txt")
        assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
                "{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(
                    rrr.jobs[0].job.IODEST["input"],
                    rrr.jobs[0].job.IODEST["msfile"],
                    rrr.jobs[0].job.IODEST["output"]
                )
Ejemplo n.º 3
0
 def test_iopathlist(self):
     global MSDIR
     global INPUT
     global OUTPUT
     global CABPATH
     stimela.register_globals()
     rrr = stimela.Recipe("pathlist", ms_dir=MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla5": ["testinput2.txt:input",
                  "testinput3.txt:msfile",
                  spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
     },
     cabpath=CABPATH,
     input=INPUT, output=OUTPUT)
     rrr.run() #validate and run
     assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"], 
             "testinput2.txt")
     assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
             "testinput3.txt")
     assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
             "{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(rrr.jobs[0].job.IODEST["input"],
                 rrr.jobs[0].job.IODEST["msfile"],
                 rrr.jobs[0].job.IODEST["output"]
                 )
Ejemplo n.º 4
0
    def test_singularity(self):
        global MSDIR
        global INPUT
        global OUTPUT
        global SINGULARITY
        if SINGULARITY is False:
            return

        stimela.register_globals()
        rrr = stimela.Recipe("singularitypaths",
                             ms_dir=MSDIR,
                             JOB_TYPE="singularity",
                             cabpath="cab/",
                             singularity_image_dir=os.environ["STIMELA_PULLFOLDER"],
                             log_dir="logs")
        rrr.add("cab/custom", "test1", {
            "bla1": "a", # only accepts a, b or c
            "bla5": ["testinput2.txt:input",
                     "testinput3.txt:msfile",
                     spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input", "msfile", "output")],
        }, input=INPUT, output=OUTPUT)
        rrr.run() #validate and run

        assert rrr.jobs[0].job._cab.parameters[4].value[0] == os.path.join(rrr.jobs[0].job.IODEST["input"], 
                    "testinput2.txt")
        assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(rrr.jobs[0].job.IODEST["msfile"],
                    "testinput3.txt")
        assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
                "{}/hello{{reim}}.fits,{}/to.fits,{}/world.fits".format(
                    rrr.jobs[0].job.IODEST["input"],
                    rrr.jobs[0].job.IODEST["msfile"],
                    rrr.jobs[0].job.IODEST["output"]
                )
Ejemplo n.º 5
0
 def test_iopathlist(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("pathlist", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add(
         "cab/custom",
         "test1",
         {
             "bla1":
             "a",  # only accepts a, b or c
             "bla5": [
                 "testinput2.txt:input", "testinput3.txt:msfile",
                 spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input",
                     "msfile", "output")
             ],
         },
         input=INPUT,
         output=OUTPUT)
     rrr.run()  #validate and run
     assert rrr.jobs[0].job._cab.parameters[4].value[
         0] == "/input/testinput2.txt"
     assert rrr.jobs[0].job._cab.parameters[4].value[1] == os.path.join(
         "/", "home", os.environ["USER"], "msdir", "testinput3.txt")
     assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
             "{}hello{{reim}}.fits,{}to.fits,{}world.fits".format(
                 "/input/",
                 os.path.join("/", "home", os.environ["USER"], "msdir/"),
                 os.path.join("/", "home", os.environ["USER"], "output/"),
             )
Ejemplo n.º 6
0
def get_fields(pipeline,
               recipe,
               indir,
               caltable,
               cab_name="get_field_info",
               label=""):

    _recipe = stimela.Recipe(
        "get field info",
        ms_dir=recipe.ms_dir,
        JOB_TYPE=recipe.JOB_TYPE,
        singularity_image_dir=recipe.singularity_image_dir,
        log_dir=pipeline.logs)

    tfile = tempfile.NamedTemporaryFile(suffix=".json", dir=pipeline.output)
    tfile.flush
    _recipe.add("cab/pycasacore",
                cab_name, {
                    "msname":
                    caltable + ":input",
                    "script":
                    """
from casacore.tables import table
import json
import os
import numpy
import codecs

INDIR = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
tabname = os.path.join(INDIR, "{caltab:s}")

tab = table(tabname)

uf = numpy.unique(tab.getcol("FIELD_ID"))
fields = dict(field_id=list(map(int, uf)))

with codecs.open(OUTPUT+'/{fname:s}', 'w', 'utf8') as stdw:
        a = json.dumps(fields, ensure_ascii=False)
        stdw.write(a)

tab.close()
""".format(caltab=caltable, fname=os.path.basename(tfile.name)),
                },
                input=indir,
                output=pipeline.output)

    _recipe.run()

    with codecs.open(tfile.name, "r", "utf8") as stdr:
        fields = json.load(stdr)
    tfile.close()

    return fields
Ejemplo n.º 7
0
 def test_required(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("testrequired", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla3": 4.0,
     }, input=INPUT, output=OUTPUT)
     with self.assertRaises(PipelineException):
         rrr.run() #validate and run
Ejemplo n.º 8
0
 def test_invalid_choice(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("invchoice", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
     "bla1": "d" # only accepts a, b or c
     }, input=INPUT, output=OUTPUT)
     with self.assertRaises(PipelineException):
         rrr.run() #validate and run
Ejemplo n.º 9
0
 def test_iopathval(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("ioval", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla2": "testinput2.txt:input",
     }, input=INPUT, output=OUTPUT)
     with self.assertRaises(PipelineException): # not exist during validation
         rrr.run() #validate and run
Ejemplo n.º 10
0
 def test_floattypesuccess(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("testfloattypesuccess", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla3": 4.0,
     }, input=INPUT, output=OUTPUT)
     rrr.run() #validate and run
     assert rrr.jobs[0].job._cab.parameters[2].value == [4.0]
Ejemplo n.º 11
0
 def test_floattypefail(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("testfloattypefail", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla3": "1.0a",
     }, input=INPUT, output=OUTPUT)
     with self.assertRaises(PipelineException):
         rrr.run() #validate and run
Ejemplo n.º 12
0
 def test_define_cab(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("customcab", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a"
     }, input=INPUT, output=OUTPUT)
     assert len(rrr.jobs) == 1
     rrr.run() #validate and run
     assert rrr.jobs[0].job._cab.parameters[0].value == "a"
     assert len(rrr.completed) == 1
     assert len(rrr.remaining) == 0
Ejemplo n.º 13
0
 def test_dismissable(self):
     global MSDIR
     global INPUT
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("testdismissable", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla4": sdm("abc"),
         "bla3": sdm(None)
     }, input=INPUT, output=OUTPUT)
     rrr.run() #validate and run
     assert rrr.jobs[0].job._cab.parameters[0].value == "a"
     assert rrr.jobs[0].job._cab.parameters[1].value is None
     assert rrr.jobs[0].job._cab.parameters[2].value is None
     assert rrr.jobs[0].job._cab.parameters[3].value == ["abc"]
Ejemplo n.º 14
0
 def test_iooverride(self):
     global MSDIR
     global INPUT
     with open(os.path.join(INPUT, "testinput.txt"), "w+") as f:
         pass
     global OUTPUT
     stimela.register_globals()
     rrr = stimela.Recipe("testiooverrides", ms_dir=MSDIR)
     assert os.path.exists(MSDIR)
     rrr.add("cab/custom", "test1", {
         "bla1": "a", # only accepts a, b or c
         "bla2": "testinput.txt:input",
     }, input=INPUT, output=OUTPUT)
     rrr.run() #validate and run
     assert rrr.jobs[0].job._cab.parameters[0].value == "a"
     assert rrr.jobs[0].job._cab.parameters[1].value == os.path.join(rrr.jobs[0].job.IODEST["input"], 
             "testinput.txt")
Ejemplo n.º 15
0
    def test_udocker(self):
        import sys
        if sys.version_info[0] > 2:
            return

        global MSDIR
        global INPUT
        global OUTPUT
        stimela.register_globals()
        rrr = stimela.Recipe("singularitypaths",
                             ms_dir=MSDIR,
                             JOB_TYPE="udocker",
                             cabpath="cab/")
        assert os.path.exists(MSDIR)
        rrr.add(
            "cab/custom",
            "test1",
            {
                "bla1":
                "a",  # only accepts a, b or c
                "bla5": [
                    "testinput2.txt:input", "testinput3.txt:msfile",
                    spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input",
                        "msfile", "output")
                ],
            },
            input=INPUT,
            output=OUTPUT)
        rrr.run()  #validate and run
        assert rrr.jobs[0].job._cab.parameters[4].value[
            0] == "/scratch/input/testinput2.txt"
        assert rrr.jobs[0].job._cab.parameters[4].value[
            1] == "/scratch/msdir/testinput3.txt"
        assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
                "{}hello{{reim}}.fits,{}to.fits,{}world.fits".format(
                    "/scratch/input/",
                    "/scratch/msdir/",
                    "/scratch/output/"
                )
Ejemplo n.º 16
0
    def test_singularity(self):
        global MSDIR
        global INPUT
        global OUTPUT
        stimela.register_globals()
        rrr = stimela.Recipe(
            "singularitypaths",
            ms_dir=MSDIR,
            JOB_TYPE="singularity",
            cabpath="cab/",
            singularity_image_dir=os.environ["SINGULARITY_PULLFOLDER"])
        assert os.path.exists(MSDIR)
        rrr.add(
            "cab/custom",
            "test1",
            {
                "bla1":
                "a",  # only accepts a, b or c
                "bla5": [
                    "testinput2.txt:input", "testinput3.txt:msfile",
                    spf("{}hello\{reim\}.fits,{}to.fits,{}world.fits", "input",
                        "msfile", "output")
                ],
            },
            input=INPUT,
            output=OUTPUT)
        rrr.run()  #validate and run

        assert rrr.jobs[0].job._cab.parameters[4].value[
            0] == "/scratch/input/testinput2.txt"
        assert rrr.jobs[0].job._cab.parameters[4].value[
            1] == "/scratch/msdir/testinput3.txt"
        assert rrr.jobs[0].job._cab.parameters[4].value[2] == \
                "{}hello{{reim}}.fits,{}to.fits,{}world.fits".format(
                    "/scratch/input/",
                    "/scratch/msdir/",
                    "/scratch/output/"
                )
Ejemplo n.º 17
0
import stimela

INPUT = "input"
OUTPUT = "output"
MSDIR = "msdir"

recipe = stimela.Recipe("wsclean_psf_only", ms_dir=MSDIR)
recipe.add("cab/wsclean",
           "wsclean_psf_only", {
               "msname": ['fa1_conc.ms'],
               "name": 'tmp',
               "column": 'DATA',
               "weight": 'natural',
               "npix": 10000,
               "trim": 8192,
               "scale": 1.3,
               "make-psf-only": True,
               "niter": 1,
               "pol": 'xx',
               "channelsout": 1,
               "joinchannels": False,
           },
           input=INPUT,
           output=OUTPUT,
           shared_memory="250gb",
           label="PSF ONLY")

recipe.run()
Ejemplo n.º 18
0
    print('Warning: the number of sources is not divisible by the bins')
    print('         images will have %d/%d sources' %(total_sources, NUM_SOURCES))

#
# create stimela recipe
#

import stimela

INPUT = 'input'
OUTPUT = 'output'
MSDIR = 'msdir'
MS = 'meerkat.ms'

recipe = stimela.Recipe(name='Make noise image populated with sources',
                        ms_dir=MSDIR,
                        JOB_TYPE='udocker')


for img in range(0, NUM_IMAGES):

	# determine field centre
	centre_ra = np.random.uniform(RA_MIN, RA_MAX)
	centre_dec = np.random.uniform(DEC_MIN, DEC_MAX)

	# create sky model
    with open('input/%d-skymodel.txt' %(img), 'w') as f:
        f.write('#format: ra_d dec_d i\n')
        for flux in bins:
            for i in range(0, sources_per_bin):
                ra = np.random.uniform(centre_ra-OFFSET_RA, centre_ra+OFFSET_RA)
Ejemplo n.º 19
0
import stimela
import os

INPUT = "/home/maccagni/FornaxA/data/MeerKAT/fa1/input"
OUTPUT = "/home/maccagni/FornaxA/data/MeerKAT/fa1/output"
MSDIR = "/home/maccagni/FornaxA/data/MeerKAT/fa1/msdir"

calmodel = 'fa1_fast_modelcorr.fits'
msname = 'fa1_conc-corr.ms'
recipe = stimela.Recipe("Calibrate with meqtrees", ms_dir=MSDIR)

calmodel = '{0:s}_{1:d}-nullmodel.txt'
with open(os.path.join(INPUT, calmodel), 'w') as stdw:
    stdw.write('#format: ra_d dec_d i\n')
    stdw.write('0.0 -30.0 1e-99')

# step = 'add_bitflag_column'
# recipe.add('cab/msutils', step,
# {
#   "msname"  : msname,
#   "command" : 'copycol' ,
#   "fromcol" : 'FLAG',
#   "tocol"   : 'BITFLAG',
# },
# input=INPUT,
# output=OUTPUT,
# label='Add BITFLAG column ')

step = 'calibrate_cubical'
recipe.add('cab/cubical',
           step, {
Ejemplo n.º 20
0
    return 0


# def chdr(filename):

#     with fits.open(filename, 'update') as mask:

#       mask[0].header['NAXIS'] = '2'
#       mask[0].header['NAXIS3']
#       mask[0].header['NAXIS4']

#       mask.flush()

# step= '0'
#
recipe = stimela.Recipe("Test clean mask making", ms_dir=MSDIR)
# recipe.add("cab/cleanmask", step,
# {
# "image"		: image+":output",
# "output"	: fa1_cleanmask+":output",
# "dilate"	: False,
# "sigma"		: 15,
# "no-negative"   : True,
# },
# input=INPUT,
# output=OUTPUT,
# label="Make a mask out of clean image clean image")

step = '1'
recipe.add('cab/fitstool',
           step, {
Ejemplo n.º 21
0
import stimela

PREFIX = "selfcal"

recipe = stimela.Recipe("selfcal_simulation",
                        indir="input",
                        outdir="output",
                        cachedir="cachedir")

recipe.add(
    "simms",
    "makems",
    {
        "msname": "meerkat_SourceRecovery.ms",
        "telescope": "meerkat",
        "direction": "J2000,0deg,-30deg",
        "synthesis": 0.5,  # in hours
        "dtime": 5,  # in seconds
        "freq0": 1.42e9,  # in hertz
        "dfreq": 1e6,  # in hertz
        "nchan": 4,
    },
    doc="Create Empty MS")

recipe.add(
    "simulator",
    "simsky",
    {
        "msname": recipe.makems.outputs["msname_out"],
        "config": "tdlconf.profiles",
        "use_smearing": False,
Ejemplo n.º 22
0
vermeerkat.log.info("The following fields are available:")
for f in FDB:
    vermeerkat.log.info("\t '{0:s}' index {1:s}{2:s}".format(
        f, FDB[f],
        " selected as 'BP'" if f == BPCALIBRATOR else " selected as 'GC'"
        if f in GCALIBRATOR else " selected as 'ALTCAL'" if f in ALTCAL else
        " selected as 'TARGET'" if f in TARGET else " not selected"))

if not vermeerkat.prompt(dont_prompt=args.dont_prompt):
    vermeerkat.log.info("Aborted per user request")
    sys.exit(1)

stimela.register_globals()
recipe = stimela.Recipe('MEERKAT: basic transfer calibration',
                        ms_dir=MSDIR,
                        singularity_image_dir=os.environ.get(
                            "SINGULARITY_PULLFOLDER", ""),
                        JOB_TYPE=args.containerization)


def addmanualflags(recipe,
                   reason,
                   antenna="",
                   spw="",
                   scan="",
                   uvrange="",
                   field=""):
    """ Read CASA flagdata docs before using """
    recipe.add("cab/casa_flagdata",
               "handflags", {
                   "vis": ZEROGEN_DATA,
Ejemplo n.º 23
0
import stimela

INPUT = "/Users/maccagni/Projects/FornaX/FornaxA/data/MeerKAT/fa1/input"
OUTPUT = "/Users/maccagni/Projects/FornaX/FornaxA/data/MeerKAT/fa1/output"
MSDIR = "/Users/maccagni/Projects/FornaX/FornaxA/data/MeerKAT/fa1/msdir"

ms = MSDIR + '/fa1_tosub.ms'
toutname = MSDIR + '/fa1_conc_tmp-corr.ms'
recipe = stimela.Recipe("Subtract model of continuum", ms_dir=MSDIR)

recipe.add(
    "cab/msutils",
    "sub_model",
    {
        "command": 'sumcols',
        "msname": 'fa1_conc_csub-corr.ms',
        #"subtract"    : True,
        "col1": 'CORRECTED_DATA',
        "col2": 'MODEL_DATA',
        "tocol": 'CORRECTED_DATA'
    },
    input=INPUT,
    output=OUTPUT,
    label="Subtract model_data column")

recipe.run()

#import pyrap.tables as tables

#print ''
#print '--- Working on file {0:s} ---'.format(ms)
Ejemplo n.º 24
0
         timegain_solint)

log.info("Fields being used for solving:")
for fi, f in enumerate([field_list[fk] for fk in field_list.keys()]):
    label = " (BP)" if f in args.bandpass_field else " (POL)" if f in args.polcal_field else ""
    log.info("\t %d: %s%s" % (fi, f, label))

if not vermeerkat.prompt(dont_prompt=args.dont_prompt):
    vermeerkat.log.info("Aborted per user request")
    sys.exit(1)

stimela.register_globals()

recipe = stimela.Recipe(
    'MEERKAT FleetingPol: Interferometric boresight polarization calibration',
    ms_dir=MSDIR,
    singularity_image_dir=os.environ.get("SINGULARITY_PULLFOLDER", ""),
    JOB_TYPE=args.containerization)

recipe.add("cab/casa_split",
           "split_avg_data", {
               "vis": COMB_MS,
               "outputvis": BP_CAL_MS,
               "datacolumn": "corrected",
               "field": ",".join(args.bandpass_field + args.polcal_field),
               "timebin": pol_mstimeavg,
               "width": pol_solchanavg,
           },
           input=INPUT,
           output=OUTPUT,
           label="split_avg_data")
Ejemplo n.º 25
0
    def testEndToEndReduction(self):
        global INPUT, OUTPUT, MSDIR, MS, LABEL
        recipe = stimela.Recipe('Test reduction script',
                                ms_dir=MSDIR, JOB_TYPE="docker", log_dir="logs")

        imname1 = "deep2.1gc"
        imname2 = "deep2.2gc"
        recipe.add("cab/ddfacet", "ddfacet_test1",
                   {
                       "Data-MS": [MS],
                       "Output-Name": imname1,
                       "Image-NPix": 2048,
                       "Image-Cell": 2,
                       "Cache-Reset": True,
                       "Freq-NBand": 2,
                       "Freq-NDegridBand": 4,
                       "Weight-ColName": "WEIGHT",
                       "Data-ChunkHours": 0.1,
                       "Data-Sort": True,
                       "Log-Boring": True,
                       "Deconv-MaxMajorIter": 2,
                       "Deconv-MaxMinorIter": 1500,
                       "Predict-ColName": "MODEL_DATA"
                   },
                   input=INPUT, output=OUTPUT, shared_memory="8gb",
                   label="image1",
                   time_out=1800)

        recipe.add('cab/tricolour', "flag_data",
                   {
                       "ms": MS,
                       "data-column": "DATA",
                       "window-backend": 'numpy',
                       "flagging-strategy": "total_power",
                       "subtract-model-column": "MODEL_DATA",
                   },
                   input=INPUT, output=OUTPUT, label="flag_data",
                   time_out=1800)

        maskname0 = "MASK.fits"
        recipe.add('cab/cleanmask', 'mask0', {
            "image": '%s.app.restored.fits:output' % (imname1),
            "output": '%s:output' % (maskname0),
            "dilate": False,
            "sigma": 25,
        },
            input=INPUT,
            output=OUTPUT,
            label='mask0:: Make mask',
            time_out=1800)

        recipe.add("cab/ddfacet", "ddfacet_test2",
                   {
                       "Data-MS": [MS],
                       "Output-Name": imname1,
                       "Image-NPix": 2048,
                       "Image-Cell": 2.0,
                       "Cache-Reset": True,
                       "Freq-NBand": 2,
                       "Freq-NDegridBand": 4,
                       "Mask-External": '%s:output' % (maskname0),
                       "Weight-ColName": "WEIGHT",
                       "Data-ChunkHours": 0.1,
                       "Data-Sort": True,
                       "Log-Boring": True,
                       "Deconv-MaxMajorIter": 2,
                       "Deconv-MaxMinorIter": 1500,
                   },
                   input=INPUT, output=OUTPUT, shared_memory="24gb",
                   label="image2",
                   time_out=1800)

        recipe.add("cab/shadems", "shadems_test",
                   {
                       'ms': MS,
                       'xaxis': 'DATA:imag',
                       'yaxis': 'real',
                       'col': 'DATA',
                       'png': '%s_shadems_test_real_imag' % (PREFIX)
                   },
                   input=INPUT, output=OUTPUT,
                   label="shadems_test",
                   time_out=1800)

        # # First selfcal round

        recipe.add("cab/cubical_ddf", "cubical_cal",
                   {
                       'data-ms': MS,
                       'data-column': "DATA",
                       'dist-nworker': 4,
                       'dist-nthread': 1,
                       'dist-max-chunks': 20,
                       'data-freq-chunk': 0,
                       'data-time-chunk': 1,
                       'model-list': spf("MODEL_DATA"),
                       'weight-column': "WEIGHT",
                       'flags-apply': "FLAG",
                       'flags-auto-init': "legacy",
                       'madmax-enable': False,
                       'madmax-threshold': [0, 0, 10],
                       'madmax-global-threshold': [0, 0],
                       'sol-jones': 'g',
                       'sol-stall-quorum': 0.95,
                       'out-name': "cubicaltest",
                       'out-column': "CORRECTED_DATA",
                       'log-verbose': "solver=2",
                       'g-type': "complex-2x2",
                       'g-freq-int': 0,
                       'g-time-int': 20,
                       'g-max-iter': 10,
                       'sol-term-iters': 10,
                       'g-update-type': "complex-2x2",

                   }, input=INPUT, output=OUTPUT,
                   label="cubical",
                   shared_memory="24gb",
                   time_out=1800)

        recipe.add("cab/cubical", "cubical_cal2",
                   {
                       'data-ms': MS,
                       'data-column': "DATA",
                       'dist-nworker': 4,
                       'dist-nthread': 1,
                       'dist-max-chunks': 20,
                       'data-freq-chunk': 0,
                       'data-time-chunk': 1,
                       'model-list': spf("MODEL_DATA"),
                       'weight-column': "WEIGHT",
                       'flags-apply': "FLAG",
                       'flags-auto-init': "legacy",
                       'madmax-enable': False,
                       'madmax-threshold': [0, 0, 10],
                       'madmax-global-threshold': [0, 0],
                       'sol-jones': 'g',
                       'sol-stall-quorum': 0.95,
                       'out-name': "cubicaltest",
                       'out-column': "CORRECTED_DATA",
                       'log-verbose': "solver=2",
                       'g-type': "complex-2x2",
                       'g-freq-int': 0,
                       'g-time-int': 20,
                       'g-max-iter': 10,
                       'sol-term-iters': 10,
                       'out-overwrite' : True,
                       'g-update-type': "complex-2x2",

                   }, input=INPUT, output=OUTPUT,
                   label="cubical",
                   shared_memory="24gb",
                   time_out=1800)


        recipe.add("cab/ragavi_vis", "ragavi_vis_test",
                   {
                       'ms': MS,
                       'xaxis': 'imaginary',
                       'yaxis': 'real',
                       'data-column': 'CORRECTED_DATA',
                       'htmlname': "%s_ragavi_vis_real_imag" % (PREFIX)
                   },
                   input=INPUT, output=OUTPUT,
                   label="ragavi_vis_test",
                   time_out=1800)
        recipe.run()
Ejemplo n.º 26
0
    def testEndToEndReduction(self):
        global INPUT, OUTPUT, MSDIR, MS, LABEL
        global GAINCAL_TABLE2, FLUXSCALE_TABLE, GAINCAL_TABLE, DELAYCAL_TABLE, BPCAL_TABLE, ANTPOS_TABLE
        global REFANT, BPCAL, TARGET, GCAL, PREFIX
        global MSCONTSUB, SPW, LSM0, SELFCAL_TABLE1, corr_ms, lsm0
        global IMAGE1, IMAGE2, MASK1, nchans, chans, imname0, maskname0, maskname01, imname1

        recipe = stimela.Recipe('Test reduction script',
                                ms_dir=MSDIR,
                                JOB_TYPE="docker")

        recipe.add('cab/casa_listobs',
                   'listobs', {"vis": MS},
                   input=INPUT,
                   output=OUTPUT,
                   label='listobs:: some stats',
                   time_out=300)

        # It is common for the array to require a small amount of time to settle down at the start of a scan. Consequently, it has
        # become standard practice to flag the initial samples from the start of each scan. This is known as 'quack' flagging
        recipe.add('cab/casa_flagdata',
                   'quack_flagging', {
                       "vis": MS,
                       "mode": 'quack',
                       "quackinterval": 10.0,
                       "quackmode": 'beg',
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='quack_flagging:: Quack flagging',
                   time_out=300)

        # Flag the autocorrelations

        recipe.add('cab/casa_flagdata',
                   'autocorr_flagging', {
                       "vis": MS,
                       "mode": 'manual',
                       "autocorr": True,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='autocorr_flagging:: Autocorrelations flagging',
                   time_out=300)

        # Flag bad channels
        recipe.add(
            'cab/casa_flagdata',
            'badchan_flagging', {
                "vis":
                MS,
                "mode":
                'manual',
                "spw":
                "0:113~113,0:313~313,0:369~369,0:601~607,0:204~204,0:212~212,0:594~600",
            },
            input=INPUT,
            output=OUTPUT,
            label='badchan_flagging:: Bad Channel flagging',
            time_out=300)

        recipe.add('cab/casa_clearcal',
                   'clearcal', {
                       "vis": MS,
                       "addmodel": True
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='clearcal:: casa clearcal',
                   time_out=300)

        recipe.add(
            'cab/casa_setjy',
            'set_flux_scaling', {
                "vis": MS,
                "field": BPCAL,
                "standard": 'Perley-Butler 2010',
                "usescratch": True,
                "scalebychan": True,
            },
            input=INPUT,
            output=OUTPUT,
            label=
            'set_flux_scaling:: Set flux density value for the amplitude calibrator',
            time_out=300)

        recipe.add(
            'cab/casa_bandpass',
            'bandpass_cal',
            {
                "vis": MS,
                "caltable": BPCAL_TABLE,
                "field": BPCAL,
                "refant": REFANT,
                "spw": SPW,
                "solint": 'inf',
                "bandtype": 'B',
                #                        "opacity"   : 0.0,
                #                        "gaincurve" : False,
            },
            input=INPUT,
            output=OUTPUT,
            label='bandpass_cal:: Bandpass calibration',
            time_out=300)

        # display the bandpass solutions. Note that in the plotcal inputs below, the amplitudes are being displayed as a function of
        # frequency channel. The parameter subplot=221 is used to display multiple plots per page (2 plots per page in the y
        # direction and 2 in the x direction). The first two commands below show the amplitude solutions (one per each polarization)
        # and the last two show the phase solutions (one per each polarization). Parameter iteration='antenna' is used to step
        # through separate plots for each antenna.
        recipe.add('cab/casa_plotcal',
                   'plot_bandpass_amp_R', {
                       "caltable": BPCAL_TABLE,
                       "poln": 'R',
                       "xaxis": 'chan',
                       "yaxis": 'amp',
                       "field": BPCAL,
                       "spw": SPW,
                       "subplot": 221,
                       "figfile": PREFIX + '-B0-R-amp.png',
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='plot_bandpass_amp_R:: Plot bandpass table. AMP, R',
                   time_out=1200)

        # Gain calibration - amplitude and phase - first for BPCAL.
        recipe.add('cab/casa_gaincal',
                   'gaincal_bp', {
                       "vis": MS,
                       "caltable": GAINCAL_TABLE,
                       "field": "{0:s},{1:s}".format(BPCAL, GCAL),
                       "solint": 'inf',
                       "refant": '',
                       "gaintype": 'G',
                       "calmode": 'ap',
                       "spw": SPW,
                       "solnorm": False,
                       "gaintable": [BPCAL_TABLE],
                       "interp": ['nearest'],
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label="gaincal:: Gain calibration",
                   time_out=300)

        # Set fluxscale
        recipe.add('cab/casa_fluxscale',
                   'fluxscale', {
                       "vis": MS,
                       "caltable": GAINCAL_TABLE,
                       "fluxtable": FLUXSCALE_TABLE,
                       "reference": [BPCAL],
                       "transfer": [GCAL],
                       "incremental": False,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='fluxscale:: Set fluxscale',
                   time_out=300)

        # Apply calibration to BPCAL
        recipe.add(
            'cab/casa_applycal',
            'applycal_bp', {
                "vis": MS,
                "field": BPCAL,
                "gaintable": [BPCAL_TABLE, FLUXSCALE_TABLE],
                "gainfield": ['', '', BPCAL],
                "interp": ['', '', 'nearest'],
                "calwt": [False],
                "parang": False,
                "applymode": "calflag",
            },
            input=INPUT,
            output=OUTPUT,
            label='applycal_bp:: Apply calibration to Bandpass Calibrator',
            time_out=1800)

        recipe.run()

        recipe = stimela.Recipe('KAT reduction script 2',
                                ms_dir=MSDIR,
                                JOB_TYPE="docker")
        # Copy CORRECTED_DATA to DATA, so we can start uv_contsub
        recipe.add("cab/msutils",
                   "move_corrdata_to_data", {
                       "command": "copycol",
                       "msname": MS,
                       "fromcol": "CORRECTED_DATA",
                       "tocol": "DATA",
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label="move_corrdata_to_data::msutils",
                   time_out=1800)

        os.system("rm -rf {}/{}-corr.ms".format(MSDIR, MS[:-3]))
        recipe.add('cab/casa_split',
                   'split_corr_data', {
                       "vis": MS,
                       "outputvis": MS[:-3] + '-corr.ms',
                       "field": str(BPCAL),
                       "spw": SPW,
                       "datacolumn": 'data',
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='split_corr_data:: Split corrected data from MS',
                   time_out=1800)

        MS = MS[:-3] + '-corr.ms'

        recipe.add(
            'cab/casa_clearcal',
            'prep_split_data', {
                "vis": MS,
                "addmodel": True
            },
            input=INPUT,
            output=OUTPUT,
            label='prep_split_data:: Prep split data with casa clearcal',
            time_out=1800)

        # Clean-Mask-Clean
        imname0 = PREFIX + 'image0'
        maskname0 = PREFIX + 'mask0.fits'
        maskname01 = PREFIX + 'mask01.fits'
        imname1 = PREFIX + 'image1'

        recipe.add(
            'cab/casa_tclean',
            'image_target_field_r1',
            {
                "vis": MS,
                "datacolumn": "corrected",
                "field": "0",
                "start": 21,  # Other channels don't have any data
                "nchan": 235 - 21,
                "width": 1,
                # Use Briggs weighting to weigh visibilities for imaging
                "weighting": "briggs",
                "robust": 0,
                "imsize": 256,  # Image size in pixels
                "cellsize": "30arcsec",  # Size of each square pixel
                "niter": 100,
                "stokes": "I",
                "prefix": '%s:output' % (imname1),
            },
            input=INPUT,
            output=OUTPUT,
            label="image_target_field_r1:: Image target field second round",
            time_out=90)

        recipe.add('cab/cleanmask',
                   'mask0', {
                       "image": '%s.image.fits:output' % (imname1),
                       "output": '%s:output' % (maskname0),
                       "dilate": False,
                       "sigma": 20,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='mask0:: Make mask',
                   time_out=1800)

        lsm0 = PREFIX + '-LSM0'
        # Source finding for initial model
        recipe.add(
            "cab/pybdsm",
            "extract_init_model", {
                "image": '%s.image.fits:output' % (imname1),
                "outfile": '%s:output' % (lsm0),
                "thresh_pix": 25,
                "thresh_isl": 15,
                "port2tigger": True,
            },
            input=INPUT,
            output=OUTPUT,
            label=
            "extract_init_model:: Make initial model from preselfcal image",
            time_out=1800)

        # Add bitflag column. To keep track of flagsets.
        recipe.add("cab/msutils",
                   "msutils", {
                       'command': 'prep',
                       'msname': MS,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label="prepms::Adds flagsets",
                   time_out=1800)

        # Not used currently.
        recipe.add("cab/flagms",
                   "backup_initial_flags", {
                       "msname": MS,
                       "create": True,
                       "nan": True,
                       "flagged-any": "+L",
                       "flag": "legacy",
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label="backup_initial_flags:: Backup selfcal flags",
                   time_out=1800)

        # First selfcal round
        recipe.add(
            "cab/calibrator",
            "calibrator_Gjones_subtract_lsm0",
            {
                "skymodel": "%s.lsm.html:output" % (lsm0),
                "msname": MS,
                "threads": 16,
                "column": "DATA",
                "output-data": "CORR_RES",
                "Gjones": True,
                # Ad-hoc right now, subject to change
                "Gjones-solution-intervals": [20, 0],
                "Gjones-matrix-type": "GainDiagPhase",
                "tile-size": 512,
                "field-id": 0,
            },
            input=INPUT,
            output=OUTPUT,
            label=
            "calibrator_Gjones_subtract_lsm0:: Calibrate and subtract LSM0",
            time_out=1800)

        # Diversity is a good thing... lets add some DDFacet to this soup bowl
        imname = PREFIX + 'ddfacet'

        recipe.add(
            "cab/ddfacet",
            "ddfacet_test", {
                "Data-MS": [MS],
                "Output-Name": imname,
                "Image-NPix": 256,
                "Image-Cell": 30,
                "Cache-Reset": True,
                "Freq-NBand": 2,
                "Weight-ColName": "WEIGHT",
                "Data-ChunkHours": 10,
                "Beam-FITSFeed": "rl",
                "Data-Sort": True,
                "Log-Boring": True,
                "Deconv-MaxMajorIter": 1,
                "Deconv-MaxMinorIter": 20,
            },
            input=INPUT,
            output=OUTPUT,
            shared_memory="200gb",
            label=
            "image_target_field_r0ddfacet:: Make a test image using ddfacet",
            time_out=120)

        lsm1 = PREFIX + '-LSM0'
        # Source finding for initial model
        recipe.add(
            "cab/pybdsm",
            "extract_init_model", {
                "image": '%s.app.restored.fits:output' % (imname),
                "outfile": '%s:output' % (lsm1),
                "thresh_pix": 25,
                "thresh_isl": 15,
                "port2tigger": True,
            },
            input=INPUT,
            output=OUTPUT,
            label=
            "extract_init_model:: Make initial model from preselfcal image",
            time_out=1800)

        # Stitch LSMs together
        lsm2 = PREFIX + '-LSM2'
        recipe.add("cab/tigger_convert",
                   "stitch_lsms1", {
                       "input-skymodel": "%s.lsm.html:output" % lsm0,
                       "output-skymodel": "%s.lsm.html:output" % lsm2,
                       "rename": True,
                       "force": True,
                       "append": "%s.lsm.html:output" % lsm1,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label="stitch_lsms1::Create master lsm file",
                   time_out=300)

        recipe.add('cab/casa_uvcontsub',
                   'uvcontsub', {
                       "msname": MS,
                       "field": "0",
                       "fitorder": 1,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='uvcontsub:: Subtract continuum in the UV plane',
                   time_out=1800)

        # Image HI
        recipe.add('cab/casa_clean',
                   'casa_dirty_cube', {
                       "msname": MS + ".contsub",
                       "prefix": PREFIX,
                       "mode": 'channel',
                       "nchan": nchans,
                       "niter": 0,
                       "npix": 256,
                       "cellsize": 30,
                       "weight": 'natural',
                       "port2fits": True,
                   },
                   input=INPUT,
                   output=OUTPUT,
                   label='casa_dirty_cube:: Make a dirty cube with CASA CLEAN',
                   time_out=1800)

        recipe.add(
            'cab/sofia',
            'sofia',
            {
                #    USE THIS FOR THE WSCLEAN DIRTY CUBE
                #    "import.inFile"     :   '{:s}-cube.dirty.fits:output'.format(combprefix),
                #    USE THIS FOR THE CASA CLEAN CUBE
                # CASA CLEAN cube
                "import.inFile": '{:s}.image.fits:output'.format(PREFIX),
                "steps.doFlag": False,
                "steps.doScaleNoise": True,
                "steps.doSCfind": True,
                "steps.doMerge": True,
                "steps.doReliability": False,
                "steps.doParameterise": False,
                "steps.doWriteMask": True,
                "steps.doMom0": False,
                "steps.doMom1": False,
                "steps.doWriteCat": True,
                "flag.regions": [],
                "scaleNoise.statistic": 'mad',
                "SCfind.threshold": 4,
                "SCfind.rmsMode": 'mad',
                "merge.radiusX": 2,
                "merge.radiusY": 2,
                "merge.radiusZ": 2,
                "merge.minSizeX": 2,
                "merge.minSizeY": 2,
                "port2tigger": False,
                "merge.minSizeZ": 2,
            },
            input=INPUT,
            output=OUTPUT,
            label='sofia:: Make SoFiA mask and images',
            time_out=1800)

        recipe.run()
Ejemplo n.º 27
0
import stimela
import time
import json
import re

INPUT = "input"
OUTPUT = "output"
MSDIR = "msdir"

MANUAL = False
CONFIG = ''

if MANUAL:
    recipe = stimela.Recipe('Make Pipeline', ms_dir=MSDIR)
    stimela.register_globals()


def save_execution_time(data_dict, filename='recipes.json', root='output'):
    """Save execution time"""
    filename = '{}/{}_time-it.json'.format(root, filename.split('.')[0])
    try:
        # Extract data from the json data file
        with open(filename) as data_file:
            data_existing = json.load(data_file)
            data_existing.update(data_dict)
            data = data_existing
    except IOError:
        data = data_dict
    if data:
        with open(filename, 'w') as f:
            json.dump(data, f)
Ejemplo n.º 28
0
PREFIX = "stimela-example"  # Prefix for output images
try:
    SINGULARTITY_IMAGE_DIR = os.environ["STIMELA_SINGULARTITY_IMAGE_DIR"]
except KeyError:
    SINGULARTITY_IMAGE_DIR = None

# MS name
MS = "meerkat_simulation_example.ms"

# Use the NVSS skymodel. This is natively available
LSM = "nvss1deg.lsm.html"

# Start stimela Recipe instance
pipeline = stimela.Recipe(
    "Simulation Example",  # Recipe name
    ms_dir=MSDIR,
    singularity_image_dir=SINGULARTITY_IMAGE_DIR,
    log_dir=os.path.join(OUTPUT, "logs"),
)

pipeline.JOB_TYPE = "podman"

# 1: Make empty MS
pipeline.add(
    "cab/simms",  # Executor image to start container from
    "simms_example",  # Container name
    {  # Parameters to parse to executor container
        "msname": MS,
        "telescope": "meerkat",  # Telescope name
        "direction":
        "J2000,0deg,-30deg",  # Phase tracking centre of observation
        "synthesis": 0.128,  # Synthesis time of observation
Ejemplo n.º 29
0
    def testBasicSim(self):
        global INPUT, MSDIR, OUTPUT, MS, PREFIX, LSM

        # Start stimela Recipe instance
        pipeline = stimela.Recipe(
            "Singularity Test",  # Recipe name
            ms_dir=MSDIR,
            singularity_image_dir=os.environ["SINGULARITY_PULLFOLDER"],
        )

        pipeline.add(
            "cab/simms",  # Executor image to start container from 
            "simms_example",  # Container name
            {  # Parameters to parse to executor container
                "msname": MS,
                "telescope": "kat-7",  # Telescope name
                "direction":
                "J2000,0deg,-30deg",  # Phase tracking centre of observation
                "synthesis": 0.128,  # Synthesis time of observation
                "dtime": 10,  # Integration time in seconds
                "freq0": "750MHz",  # Start frequency of observation
                "dfreq": "1MHz",  # Channel width
                "nchan": 1  # Number of channels
            },
            input=INPUT,  # Input folder
            output=OUTPUT,  # Output folder
            label="Creating MS",  # Process label
            cpus=2.5,
            memory_limit="2gb",
            time_out=300)

        pipeline.add("cab/casa_listobs",
                     "listobs_example", {"vis": MS},
                     input=INPUT,
                     output=OUTPUT,
                     label="Some obs details",
                     time_out=100)

        pipeline.add(
            "cab/simulator",
            "simulator_example",
            {
                "msname": MS,
                "skymodel": LSM,  # Sky model to simulate into MS
                "addnoise": True,  # Add thermal noise to visibilities
                "column":
                "CORRECTED_DATA",  # Simulated data will be saved in this column
                "sefd": 831,  # Compute noise from this SEFD
                "recenter":
                True,  # Recentre sky model to phase tracking centre of MS
                "tile-size": 64,
                "threads": 4,
            },
            input=INPUT,
            output=OUTPUT,
            label="Simulating visibilities",
            time_out=600)

        #           pipeline.add('cab/casa_plotms',
        #                       'plot_vis',
        #                       {
        #                           "vis"           :   MS,
        #                           "xaxis"         :   'uvdist',
        #                           "yaxis"         :   'amp',
        #                           "xdatacolumn"   :   'corrected',
        #                           "ydatacolumn"   :   'corrected',
        #                           "plotfile"      :   PREFIX+'-amp_uvdist.png',
        #                           "overwrite"     :   True,
        #                       },
        #                       input=INPUT,
        #                       output=OUTPUT,
        #                       label='plot_amp_uvdist:: Plot amplitude vs uv-distance',
        #                       time_out=600)

        ## Image
        # Make things a bit interesting by imaging with different weights
        # Briggs robust values to use for each image
        briggs_robust = [2]

        for i, robust in enumerate(briggs_robust):

            pipeline.add(
                "cab/wsclean",
                "imager_example_robust_{:d}".format(i),
                {
                    "msname": MS,
                    "weight": "briggs {:d}".format(i),
                    "prefix": "{:s}_robust-{:d}".format(PREFIX, robust),
                    "npix": 2048,  # Image size in pixels
                    "cellsize": 2,  # Size of each square pixel
                    "clean_iterations":
                    1000,  # Perform 1000 iterarions of clean (Deconvolution)
                },
                input=INPUT,
                output=OUTPUT,
                label="Imaging MS, robust={:d}".format(robust),
                cpus=2,
                memory_limit="2gb",
                time_out=600)

        pipeline.add("cab/casa_rmtables",
                     "delete_ms", {
                         "tablenames": MS + ":msfile",
                     },
                     input=INPUT,
                     output=OUTPUT,
                     label="Remove MS",
                     time_out=300)

        # Run recipe. The 'steps' added above will be executed in the sequence that they were adde. The 'steps' added above will be
        # executed in the sequence that they were added
        pipeline.run()
Ejemplo n.º 30
0
    def run_workers(self):
        """ Runs the  workers """
        report_updated = False

        for _name, _worker, i in self.workers:
            try:
                worker = __import__(_worker)
            except ImportError:
                traceback.print_exc()
                raise ImportError('Worker "{0:s}" could not be found at {1:s}'.format(
                    _worker, self.workers_directory))

        if self.config["general"]["cabs"]:
            log.info("Configuring cab specification overrides")
            cabspecs_general = self.parse_cabspec_dict(self.config["general"]["cabs"])
        else:
            cabspecs_general = {}

        active_workers = []
        # first, check that workers import, and check their configs
        for _name, _worker, i in self.workers:
            config = self.config[_name]
            if 'enable' in config and not config['enable']:
                self.skip.append(_worker)
                continue
            log.info("Configuring worker {}".format(_name))
            try:
                worker = __import__(_worker)
            except ImportError:
                log.error('Error importing worker "{0:s}" from {1:s}'.format(_worker, self.workers_directory))
                raise
            if hasattr(worker, 'check_config'):
                worker.check_config(config)
            # check for cab specs
            cabspecs = cabspecs_general
            if config["cabs"]:
                cabspecs = cabspecs.copy()
                cabspecs.update(self.parse_cabspec_dict(config["cabs"]))
            active_workers.append((_name, worker, config, cabspecs))

        # now run the actual pipeline
        #for _name, _worker, i in self.workers:
        for _name, worker, config, cabspecs in active_workers:
            # Define stimela recipe instance for worker
            # Also change logger name to avoid duplication of logging info
            label = getattr(worker, 'LABEL', None)
            if label is None:
                # if label is not set, take filename, and split off _worker.py
                label =  os.path.basename(worker.__file__).rsplit("_", 1)[0]

            recipe = stimela.Recipe(label,
                                    ms_dir=self.msdir,
                                    singularity_image_dir=self.singularity_image_dir,
                                    log_dir=self.logs,
                                    cabspecs=cabspecs,
                                    logfile=False, # no logfiles for recipes
                                    logfile_task=f'{self.logs}/log-{label}-{{task}}-{self.timeNow}.txt')

            recipe.JOB_TYPE = self.container_tech
            self.CURRENT_WORKER = _name
            # Don't allow pipeline-wide resume
            # functionality
            os.system('rm -f {}'.format(recipe.resume_file))
            # Get recipe steps
            # 1st get correct section of config file
            log.info("{0:s}: initializing".format(label), extra=dict(color="GREEN"))
            worker.worker(self, recipe, config)
            log.info("{0:s}: running".format(label))
            recipe.run()
            log.info("{0:s}: finished".format(label))

            # this should be in the cab cleanup code, no?

            casa_last = glob.glob(self.output + '/*.last')
            for file_ in casa_last:
                os.remove(file_)

            # update report at end of worker if so configured
            if self.generate_reports and config["report"]:
                self.regenerate_reports()
                report_updated = True
            else:
                report_updated = False

        # generate final report
        if self.config["general"]["final_report"] and self.generate_reports and not report_updated:
            self.regenerate_reports()

        log.info("pipeline run complete")