示例#1
0
 def img_wsclean(mslist,field):
     key='image_wsclean'
     outdir = field+"_ddcal"
     imweight = config[key]['img_ws_weight']
     pref = "DD_wsclean"
     mspref = os.path.splitext(mslist[0])[0].replace('-','_')
     step = 'img_wsclean-{0:s}-{1:s}'.format(mspref,field)
     recipe.add('cab/wsclean', step, {
         "msname": mslist,
         "column": config[key]['img_ws_col'],
         "weight": imweight if not imweight == 'briggs' else 'briggs {}'.format(config[key]['img_ws_robust']),
         "nmiter": sdm.dismissable(config[key]['img_ws_nmiter']),
         "npix": config[key]['img_ws_npix'],
         "padding": config[key]['img_ws_padding'],
         "scale": config[key]['img_ws_cell', cell],
         "prefix": '{0:s}_{1:s}'.format(pref, field),
         "niter": config[key]['img_ws_niter'],
         "mgain": config[key]['img_ws_mgain'],
         "pol": config[key]['img_ws_stokes'],
         "taper-gaussian": sdm.dismissable(config[key]['img_ws_uvtaper']),
         "channelsout": config[key]['img_ws_nchans'],
         "joinchannels": config[key]['img_ws_joinchans'],
         "local-rms": config[key]['img_ws_local_rms'],
         "fit-spectral-pol": config[key]['img_ws_specfit_nrcoeff'],
         "auto-threshold": config[key]['img_ws_auto_thr'],
         "auto-mask": config[key]['img_ws_auto_mask'],
         "multiscale": config[key]['img_ws_multi_scale'],
         "multiscale-scales": sdm.dismissable(config[key]['img_ws_multi_scale_scales']),
         "savesourcelist": True if config[key]['img_ws_niter']>0 else False,
     },
     input=INPUT,
     output=OUTPUT+"/"+outdir,
     version='2.6' if config[key]['img_ws_multi_scale'] else None,
     label='img_wsclean-{0:s}-{1:s}:: Image DD-calibrated data with WSClean'.format(mspref,field),shared_memory=shared_mem)
示例#2
0
 def run_crystalball(mslist,field):
     key='transfer_model_dd'
     outdir = field+"_ddcal"
     pref = "DD_wsclean"
     crystalball_model = '{0:s}_{1:s}-sources.txt'.format(pref, field)
     for ms in mslist:
        mspref =os.path.splitext(ms)[0].replace('-','_')
        step = 'crystalball-{0:s}-{1:s}'.format(mspref,field)
        recipe.add('cab/crystalball', step, {
            "ms": ms,
            "sky-model": crystalball_model+':output',
            "spectra": config[key]['dd_spectra'],
            "row-chunks": config[key]['dd_row_chunks'],
            "model-chunks": config[key]['dd_model_chunks'],
            "exp-sign-convention": config[key]['dd_exp_sign_convention'],
            "within": sdm.dismissable(config[key]['dd_within'] or None),
            "points-only": config[key]['dd_points_only'],
            "num-sources": sdm.dismissable(config[key]['dd_num_sources']),
            "num-workers": sdm.dismissable(config[key]['dd_num_workers']),
            "memory-fraction": config[key]['dd_mem_frac'],
          },
            input=INPUT,
            output=OUTPUT+"/"+outdir,shared_memory=shared_mem,
            label='crystalball-{0:s}-{1:s}:: Run Crystalball'.format(mspref,field))
示例#3
0
def image_calibrator(recipe, label="prelim"):
    imfields = [FDB[a] for a in ALTCAL] + \
               ([FDB[t] for t in GCALIBRATOR] if (DO_USE_GAINCALIBRATOR and
                                                  DO_USE_GAINCALIBRATOR_DELAY) else [])
    steps = []
    for f in imfields:
        imopts = {
            "msname": ZEROGEN_DATA,
            "join-channels": True,
            "channels-out": 9,
            "size": 4096,
            "scale": "1.6asec",
            "mgain": 0.8,
            "gain": 0.1,
            "niter": 3000,
            "name": "calfield-{}-{}".format(f, label),
            "field": f,
            "fits-mask": sdm.dismissable(None),
            ###"save-source-list": True,
            "fit-spectral-pol": 3,
        }
        maskname = "MASK-{}-{}.fits".format(f, label)

        recipe.add("cab/wsclean",
                   "image_{}_field{}".format(label, f),
                   imopts,
                   input=INPUT,
                   output=OUTPUT,
                   label="image_calfield_{}_{}".format(f, label))

        recipe.add(
            "cab/cleanmask",
            "mask_{}_{}".format(label, f), {
                'image': "calfield-{}-{}-MFS-image.fits:output".format(
                    f, label),
                'output': maskname,
                'sigma': 35,
                'boxes': 9,
                'iters': 20,
                'overlap': 0.3,
                'no-negative': True,
                'tolerance': 0.75,
            },
            input=INPUT,
            output=OUTPUT,
            label='mask_{}_{}'.format(label, f))
        imopts2 = {k: v for k, v in list(imopts.items())}

        imopts2["fits-mask"] = maskname + ":output"
        imopts2["local-rms"] = True
        imopts2["auto-threshold"] = 5

        recipe.add("cab/wsclean",
                   "image_{}_field{}_rnd2".format(label, f),
                   imopts2,
                   input=INPUT,
                   output=OUTPUT,
                   label="image_calfield_{}_{}_rnd2".format(f, label))

        steps += [
            "image_calfield_{}_{}".format(f, label),
            'mask_{}_{}'.format(label, f),
            "image_calfield_{}_{}_rnd2".format(f, label)
        ]
    return steps
示例#4
0
def rfiflag_data(
        do_flag_targets=False,
        steplabel="flagpass1",
        exec_strategy="mk_rfi_flagging_calibrator_fields_firstpass.yaml",
        on_corr_residuals=False,
        dc="DATA"):
    recipe.add(
        'cab/tricolour',
        steplabel, {
            "ms":
            ZEROGEN_DATA,
            "data-column":
            dc,
            "window-backend":
            'numpy',
            "field-names": [FDB[BPCALIBRATOR]],
            "flagging-strategy":
            "total_power" if not do_flag_targets else "polarisation",
            "config":
            exec_strategy,
            "subtract-model-column":
            sdm.dismissable("MODEL_DATA" if on_corr_residuals else None),
            "dilate-masks":
            sdm.dismissable(None),
            "ignore-flags":
            sdm.dismissable(None),
            "scan-numbers":
            sdm.dismissable(None),
        },
        input=INPUT,
        output=OUTPUT,
        label=steplabel)
    recipe.add(
        'cab/tricolour',
        steplabel + "_gc", {
            "ms":
            ZEROGEN_DATA,
            "data-column":
            dc,
            "window-backend":
            'numpy',
            "field-names": [FDB[t] for t in TARGET]
            if do_flag_targets else [FDB[t] for t in GCALIBRATOR + ALTCAL],
            "flagging-strategy":
            "total_power" if not do_flag_targets else "polarisation",
            "subtract-model-column":
            sdm.dismissable("MODEL_DATA" if on_corr_residuals else None),
            "config":
            exec_strategy,
            "dilate-masks":
            sdm.dismissable(None),
            "ignore-flags":
            sdm.dismissable(None),
            "scan-numbers":
            sdm.dismissable(None),
        },
        input=INPUT,
        output=OUTPUT,
        label=steplabel + ".gc" if not do_flag_targets else steplabel +
        ".targets")

    recipe.add("cab/casa_flagdata",
               "flag_summary_{}".format(steplabel), {
                   "vis": ZEROGEN_DATA,
                   "mode": "summary"
               },
               input=INPUT,
               output=OUTPUT,
               label="flagging_summary_{}".format(steplabel))

    return (([steplabel, steplabel + ".gc"] if len(ALTCAL) > 0 or DO_USE_GAINCALIBRATOR else [steplabel])
            if not do_flag_targets else [steplabel + ".targets"]) + \
    [
      "flagging_summary_{}".format(steplabel)
    ]
示例#5
0
def worker(pipeline, recipe, config):
    label = config['label_in']
    wname = pipeline.CURRENT_WORKER
    flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
    flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)

    nobs = pipeline.nobs
    msiter=0
    for i in range(nobs):
        prefix_msbase = pipeline.prefix_msbases[i]
        mslist  = pipeline.get_mslist(i, label, target=(config['field'] == "target"))
        target_ls = pipeline.target[i] if config['field'] == "target" else []

        for j, msname in enumerate(mslist):
            msdict = pipeline.get_msinfo(msname)
            prefix = os.path.splitext(msname)[0]

            if not os.path.exists(os.path.join(pipeline.msdir, msname)):
                raise IOError("MS file {0:s} does not exist. Please check that is where it should be.".format(msname))

            # Write/rewind flag versions
            available_flagversions = manflags.get_flags(pipeline, msname)
            if config['rewind_flags']['enable']:
                if config['rewind_flags']['mode'] == 'reset_worker':
                    version = flags_before_worker
                    stop_if_missing = False
                elif config['rewind_flags']['mode'] == 'rewind_to_version':
                    version = config['rewind_flags']['version']
                    if version == 'auto':
                        version = flags_before_worker
                    stop_if_missing = True
                if version in available_flagversions:
                    if flags_before_worker in available_flagversions and available_flagversions.index(flags_before_worker) < available_flagversions.index(version) and not config['overwrite_flagvers']:
                        manflags.conflict('rewind_too_little', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
                    substep = 'version-{0:s}-ms{1:d}'.format(version, msiter)
                    manflags.restore_cflags(pipeline, recipe, version, msname, cab_name=substep)
                    if version != available_flagversions[-1]:
                        substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, msiter)
                        manflags.delete_cflags(pipeline, recipe,
                            available_flagversions[available_flagversions.index(version)+1],
                            msname, cab_name=substep)
                    if version != flags_before_worker:
                        substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
                        manflags.add_cflags(pipeline, recipe, flags_before_worker,
                            msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
                elif stop_if_missing:
                    manflags.conflict('rewind_to_non_existing', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
                else:
                    substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
                    manflags.add_cflags(pipeline, recipe, flags_before_worker,
                        msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
            else:
                if flags_before_worker in available_flagversions and not config['overwrite_flagvers']:
                    manflags.conflict('would_overwrite_bw', pipeline, wname, msname, config, flags_before_worker, flags_after_worker)
                else:
                    substep = 'save-{0:s}-ms{1:d}'.format(flags_before_worker, msiter)
                    manflags.add_cflags(pipeline, recipe, flags_before_worker,
                        msname, cab_name=substep, overwrite=config['overwrite_flagvers'])

            # Define fields and field_ids to be used to only flag the fields selected with
            # flagging:field (either 'target' or 'calibrators') and with
            # flagging:calfields (for further selection among the calibrators)
            if config['field'] == 'target':
                fields = [target_ls[j]]
            else:
                fields = []
                fld_string = config['calfields']
                if fld_string == "auto":
                    iter_fields = "gcal bpcal xcal fcal".split()
                else:
                    iter_fields = fld_string.split(",")
                for item in iter_fields:
                    if hasattr(pipeline, item):
                        tfld = getattr(pipeline, item)[i]
                    else:
                        raise ValueError("Field given is invalid. Options are 'xcal bpcal gcal fcal'.")
                    if tfld:
                        fields += tfld
                fields = list(set(fields))
            field_ids = utils.get_field_id(msdict, fields)
            fields = ",".join(fields)

            if pipeline.enable_task(config, 'unflag'):
                step = '{0:s}-unflag-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'unflag',
                               "field": fields,
                               "flagbackup": False,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Unflag ms={1:s}'.format(step, msname))

            # flag antennas automatically based on drifts in the scan average of the
            # auto correlation spectra per field. This doesn't strictly require any calibration. It is also
            # not field structure dependent, since it is just based on the DC of the field
            # Compares scan to median power of scans per field per channel
            # Also compares antenna to median of the array per scan per field per channel
            # This should catch any antenna with severe temperature problems
            if pipeline.enable_task(config, 'flag_autopowerspec'):
                step = '{0:s}-autopowerspec-ms{1:d}'.format(wname, msiter)
                recipe.add("cab/politsiyakat_autocorr_amp", step,
                           {
                               "msname": msname,
                               "field": ",".join([str(id) for id in field_ids]),
                               "cal_field": ",".join([str(id) for id in field_ids]),
                               "scan_to_scan_threshold": config["flag_autopowerspec"]["scan_thr"],
                               "antenna_to_group_threshold": config["flag_autopowerspec"]["ant_group_thr"],
                               "dpi": 300,
                               "plot_size": 6,
                               "nproc_threads": config['flag_autopowerspec']['threads'],
                               "data_column": config['flag_autopowerspec']['col']
                           },
                           input=pipeline.input, output=pipeline.output,
                           label="{0:s}:: Flag out antennas with drifts in autocorrelation powerspectra ms={1:s}".format(step,msname))

            if pipeline.enable_task(config, 'flag_autocorr'):
                step = '{0:s}-autocorr-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'manual',
                               "autocorr": True,
                               "field": fields,
                               "flagbackup": False,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Flag auto-correlations ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_quack'):
                step = '{0:s}-quack-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'quack',
                               "quackinterval": config['flag_quack']['interval'],
                               "quackmode": config['flag_quack']['mode'],
                               "field": fields,
                               "flagbackup": False,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Quack flagging ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_elevation'):
                step = '{0:s}-elevation-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'elevation',
                               "lowerlimit": config['flag_elevation']['low'],
                               "upperlimit": config['flag_elevation']['high'],
                               "field": fields,
                               "flagbackup": False,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Flag elevation ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_shadow'):
                if config['flag_shadow']['full_mk64']:
                    addantennafile = '{0:s}/mk64.txt'.format(pipeline.input)
                    subarray = msdict['ANT']['NAME']
                    idleants = open(addantennafile, 'r').readlines()
                    for aa in subarray:
                        for kk in range(len(idleants)):
                            if aa in idleants[kk]:
                                del(idleants[kk:kk+3])
                                break
                    addantennafile = 'idleants.txt'
                    with open('{0:s}/{1:s}'.format(pipeline.input, addantennafile), 'w') as ia:
                        for aa in idleants:
                            ia.write(aa)
                    addantennafile += ':input'
                else:
                    addantennafile = None
                step = '{0:s}-shadow-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'shadow',
                               "tolerance": config['flag_shadow']['tol'],
                               "addantenna": addantennafile,
                               "flagbackup": False,
                               "field": fields,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Flag shadowed antennas ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_spw'):
                step = '{0:s}-spw-ms{1:d}'.format(wname, msiter)
                flagspwselection = config['flag_spw']['chans']
                firsts = [min(ff) for ff in msdict['SPW']['CHAN_FREQ']]
                lasts = [max(ff) for ff in msdict['SPW']['CHAN_FREQ']]
                nrs = msdict['SPW']['NUM_CHAN']
                nspws = len(nrs)
                found_valid_data = 0
                if config['flag_spw']['ensure_valid']:
                    scalefactor, scalefactor_dict = 1, {
                        'GHz': 1e+9, 'MHz': 1e+6, 'kHz': 1e+3 }
                    for ff in flagspwselection.split(','):
                        found_units = False
                        for dd in scalefactor_dict:
                            if dd.lower() in ff.lower():
                                ff, scalefactor = ff.lower().replace(
                                    dd.lower(), ''), scalefactor_dict[dd]
                                found_units = True
                        if 'hz' in ff.lower():
                          ff = ff.lower().replace('hz', '')
                          found_units = True
                        ff = ff.split(':')
                        if len(ff) > 1:
                            spws = ff[0]
                        else:
                            spws = '*'
                        edges = [
                            ii*scalefactor for ii in map(float, ff[-1].split('~'))]
                        if '*' in spws:
                            spws = list(range(nspws))
                        elif '~' in spws:
                            spws = list(
                                range(int(spws.split('~')[0]), int(spws.split('~')[1])+1))
                        else:
                            spws = [int(spws), ]
                        edges = [edges for uu in range(len(spws))]
                        for ss in spws:
                            if found_units and ss < nspws and min(edges[ss][1], lasts[ss]) - max(edges[ss][0], firsts[ss]) > 0:
                                found_valid_data = 1
                            elif not found_units and ss < nspws and edges[ss][0]>=0 and edges[ss][1] < nrs[ss]:
                                found_valid_data = 1
                    if not found_valid_data:
                        caracal.log.warn(
                            'The following channel selection has been made in the flag_spw module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error "No valid SPW & Chan combination found" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, flagspwselection, step))

                if found_valid_data or not config['flag_spw']['ensure_valid']:
                    recipe.add('cab/casa_flagdata', step,
                               {
                                   "vis": msname,
                                   "mode": 'manual',
                                   "spw": flagspwselection,
                                   "field": fields,
                                   "flagbackup": False,
                               },
                               input=pipeline.input,
                               output=pipeline.output,
                               label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_time'):
                step = '{0:s}-time-ms{1:d}'.format(wname, msiter)
                found_valid_data = 0
                if config['flag_time']['ensure_valid']:
                    if pipeline.startdate[i]:
                        start_flagrange,end_flagrange=config['flag_time']['timerange'].split('~')
                        flag_start = float(''.join(re.split('/|:', start_flagrange)))
                        flag_end  = float(''.join(re.split('/|:', end_flagrange)))
                        if (flag_start <= pipeline.enddate[i]) and (pipeline.startdate[i] <= flag_end):
                            found_valid_data = 1
                    else:
                        raise ValueError("You wanted to ensure a valid time range but we could not find a start and end time")
                    if not found_valid_data:
                        caracal.log.warn(
                            'The following time selection has been made in the flag_time module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error " The selected table has zero rows" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, config['flag_time']['timerange'], step))
                if found_valid_data or not config['flag_time']['ensure_valid']:
                    recipe.add('cab/casa_flagdata', step,
                               {
                                    "vis": msname,
                                    "mode": 'manual',
                                    "timerange": config['flag_time']['timerange'],
                                    "flagbackup": False,
                                    "field": fields,
                                },
                                input=pipeline.input,
                                output=pipeline.output,
                                label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_scan'):
                step = '{0:s}-scan-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'manual',
                               "scan": config['flag_scan']['scans'],
                               "flagbackup": False,
                               "field": fields,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}::Flag out channels ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_antennas'):
                # step = '{0:s}-antennas-ms{1:d}'.format(wname, msiter)
                antennas = [config['flag_antennas']['antennas']]
                times = [config['flag_antennas']['timerange']]
                found_valid_data = [0]
                ensure = config['flag_antennas']['ensure_valid']
                if times[0] == '':
                    ensure = False
                if ensure:
                    if pipeline.startdate[i]:
                        antennas = config['flag_antennas']['antennas'].split(',')
                        times = config['flag_antennas']['timerange'].split(',')
                        while len(times) < len(antennas):
                            times.append(times[-1])
                        while len(found_valid_data) < len(antennas):
                            found_valid_data.append(0)
                        for nn,time_range in enumerate(times):
                            start_flagrange,end_flagrange=time_range.split('~')
                            flag_start = float(''.join(re.split('/|:', start_flagrange)))
                            flag_end  = float(''.join(re.split('/|:', end_flagrange)))
                            if (flag_start <= pipeline.enddate[i]) and (pipeline.startdate[i] <= flag_end):
                                found_valid_data[nn] = 1
                    else:
                        raise ValueError("You wanted to ensure a valid time range but we could not find a start and end time")
                for nn,antenna in enumerate(antennas):
                    antstep = 'ant-{0:s}-ms{1:d}-antsel{2:d}'.format(wname, i, nn)
                    if found_valid_data[nn] or not ensure:
                        recipe.add('cab/casa_flagdata', antstep,
                                    {
                                        "vis": msname,
                                        "mode": 'manual',
                                        "antenna": antenna,
                                        "timerange": times[nn],
                                        "field": fields,
                                        "flagbackup": False,
                                    },
                                    input=pipeline.input,
                                    output=pipeline.output,
                                    label='{0:s}:: Flagging bad antenna {2:s} ms={1:s}'.format(antstep, msname,antenna))
                    elif ensure and not found_valid_data[nn]:
                        caracal.log.warn(
                            'The following time selection has been made in the flag_antennas module of the flagging worker: "{1:s}". This selection would result in no valid data in {0:s}. This would lead to the FATAL error " The selected table has zero rows" in CASA/FLAGDATA. To avoid this error the corresponding cab {2:s} will not be added to the Stimela recipe of the flagging worker.'.format(msname, times[nn], antstep))

            if pipeline.enable_task(config, 'flag_mask'):
                step = '{0:s}-mask-ms{1:d}'.format(wname, msiter)
                recipe.add('cab/rfimasker', step,
                           {
                               "msname": msname,
                               "mask": config['flag_mask']['mask'],
                               "accumulation_mode": 'or',
                               "uvrange": sdm.dismissable(config['flag_mask']['uvrange'] or None),
                               "memory": 4096,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Apply flag mask ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'flag_manual'):
                rules = config['flag_manual']['rules']
                for irule, rule in enumerate(rules):
                    # a manual flagging rule has a pattern to match the MS name, followed by key:value pairs
                    rule_elements = rule.split()
                    if len(rule_elements) < 2 or not all(':' in el for el in rule_elements[1:]):
                        raise ValueError(f"invalid flag_manual rule '{rule}'")
                    pattern = rule_elements[0]
                    keywords = {tuple(elem.split(":", 1)) for elem in rule_elements[1:]}
                    # end of parsing block. Replace this with file if you like
                    if not fnmatch.fnmatch(msname, pattern):
                        continue
                    caracal.log.info(f"adding manual flagging rule for {pattern}")
                    step = f'{wname}-manual-ms{msiter}-{irule}'
                    args = {
                                   "vis": msname,
                                   "mode": 'manual',
                                   "flagbackup": False,
                                   "field": fields,
                           }
                    args.update(keywords)
                    recipe.add('cab/casa_flagdata', step, args,
                               input=pipeline.input,
                               output=pipeline.output,
                               label=f'{step}::Flag ms={msname} using {rule}')

            if pipeline.enable_task(config, 'flag_rfi'):
                step = '{0:s}-rfi-ms{1:d}'.format(wname, msiter)
                if config['flag_rfi']["flagger"] == "aoflagger":
                    if config['flag_rfi']['aoflagger']['ensure_valid']:
                        ms_corr = msdict['CORR']['CORR_TYPE']
                        flag_corr=[]
                        with open('{0:s}/{1:s}'.format(pipeline.input,config['flag_rfi']['aoflagger']['strategy'])) as stdr:
                            for ss in stdr.readlines():
                                for pp in 'xx,xy,yx,yy,stokes-i,stokes-q,stokes-u,stokes-v'.split(','):
                                    if '<on-{0:s}>1</on-{0:s}>'.format(pp) in ss: flag_corr.append(pp)
                        if ('stokes-u' in flag_corr and (('XY' not in ms_corr and 'RL' not in ms_corr) or ('YX' not in ms_corr and 'LR' not in ms_corr))) or\
                             ('stokes-v' in flag_corr and (('XY' not in ms_corr and 'RR' not in ms_corr) or ('YX' not in ms_corr and 'LL' not in ms_corr))) or\
                             ('stokes-i' in flag_corr and (('XX' not in ms_corr and 'RR' not in ms_corr) or ('YY' not in ms_corr and 'LL' not in ms_corr))) or\
                             ('stokes-q' in flag_corr and (('XX' not in ms_corr and 'RL' not in ms_corr) or ('YY' not in ms_corr and 'LR' not in ms_corr))) or\
                             ('xy' in flag_corr and ('XY' not in ms_corr and 'RL' not in ms_corr )) or\
                             ('yx' in flag_corr and ('YX' not in ms_corr and 'LR' not in ms_corr)) or\
                             ('xx' in flag_corr and ('XX' not in ms_corr and 'RR' not in ms_corr)) or\
                             ('yy' in flag_corr and ('YY' not in ms_corr and 'LL' not in ms_corr)):
                            raise ValueError("The selected flagging strategy {0:s}/{1:s} will attempt to flag on {2:} but this is"\
                                             " not compatible with the {3:} correlations available in {4:s}. To proceed you can edit the flagging"\
                                             " strategy or, if you know what you are doing, disable aoflagger: ensure_valid.".format(
                                             pipeline.input,config['flag_rfi']['aoflagger']['strategy'],flag_corr,ms_corr,msname))

                    recipe.add('cab/autoflagger', step,
                               {
                                   "msname": msname,
                                   "column": config['flag_rfi']['col'],
                                   "fields": ",".join(map(str, field_ids)),
                                   "strategy": config['flag_rfi']['aoflagger']['strategy'],
                                   "indirect-read": True if config['flag_rfi']['aoflagger']['readmode'] == 'indirect' else False,
                                   "memory-read": True if config['flag_rfi']['aoflagger']['readmode'] == 'memory' else False,
                                   "auto-read-mode": True if config['flag_rfi']['aoflagger']['readmode'] == 'auto' else False,
                               },
                               input=pipeline.input,
                               output=pipeline.output,
                               label='{0:s}:: AOFlagger auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))

                elif config['flag_rfi']["flagger"] == "tricolour":
                    tricolour_strat=config['flag_rfi']['tricolour']['strategy']
                    if config['flag_rfi']['tricolour']['mode'] == 'auto':
                        bandwidth = msdict['SPW']['TOTAL_BANDWIDTH'][0]/10.0**6
                        caracal.log.info("Total Bandwidth = {0:} MHz".format(bandwidth))
                        if bandwidth <= 20.0:
                            caracal.log.info("Narrowband data detected, selecting appropriate flagging strategy")
                            tricolour_strat = config['flag_rfi']['tricolour']['strat_narrow']

                    caracal.log.info("Flagging strategy in use: {0:}".format(tricolour_strat))
                    recipe.add('cab/tricolour', step,
                               {
                                   "ms": msname,
                                   "data-column": config['flag_rfi']['col'],
                                   "window-backend": config['flag_rfi']['tricolour']['backend'],
                                   "field-names": fields,
                                   "flagging-strategy": 'polarisation',
                                   "config" : tricolour_strat,
                               },
                               input=pipeline.input,
                               output=pipeline.output,
                               label='{0:s}:: Tricolour auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))

                elif config['flag_rfi']["flagger"] == "tfcrop":
                    col = config['flag_rfi']['col'].split("_DATA")[0].lower()
                    recipe.add('cab/casa_flagdata', step,
                               {
                                   "vis" : msname,
                                   "datacolumn" : col,
                                   "mode" : "tfcrop",
                                   "field" : fields,
                                   "usewindowstats" : config["flag_rfi"]["tfcrop"]["usewindowstats"],
                                   "combinescans" : config["flag_rfi"]["tfcrop"]["combinescans"],
                                   "flagdimension" : config["flag_rfi"]["tfcrop"]["flagdimension"],
                                   "flagbackup" : False,
                                   "timecutoff" : config["flag_rfi"]["tfcrop"]["timecutoff"],
                                   "freqcutoff" : config["flag_rfi"]["tfcrop"]["freqcutoff"],
                                   "correlation" : config["flag_rfi"]["tfcrop"]["correlation"],
                               },
                               input=pipeline.input,
                               output=pipeline.output,
                               label='{0:s}:: Tfcrop auto-flagging flagging pass ms={1:s} fields={2:s}'.format(step, msname, fields))
                else:
                    raise RuntimeError(
                        "Flagger, {0:s} is not available. Options are 'aoflagger, tricolour, tfcrop'.")

            if pipeline.enable_task(config, 'inspect'):
                step = '{0:s}-inspect-ms{1:d}'.format(wname,msiter)
                if config['field'] == 'target':
                    field = '0'
                else:
                    field = ",".join(map(str, utils.get_field_id(msdict, manfields.get_field(
                        pipeline, i, config['inspect']['field']).split(","))))
                for f in field.split(','):
                    outlabel = '_{0:d}'.format(i) if len(field.split(',')) == 1 else '_{0:d}_{1:s}'.format(i,f)
                    recipe.add('cab/rfinder', step,
                               {
                                   "msname": msname,
                                   "field": int(f),
                                   "plot_noise": "noise",
                                   "RFInder_mode": "use_flags",
                                   "outlabel": outlabel,  # The output will be rfi_<pol>_<outlabel>
                                   "polarization": config['inspect']['polarization'],
                                   "spw_width": config['inspect']['spw_width'],
                                   "time_step": config['inspect']['time_step'],
                                   "time_enable": config['inspect']['time_enable'],
                                   "spw_enable": config['inspect']['spw_enable'],
                                   "1d_gif": config['inspect']['time_enable'],
                                   "2d_gif": config['inspect']['time_enable'],
                                   "altaz_gif": config['inspect']['spw_enable'],
                                   "movies_in_report": config['inspect']['time_enable'] or config['spw_enable']
                               },
                               input=pipeline.input,
                               output=pipeline.output,
                               label='{0:s}:: Investigate presence of rfi in ms={1:s}'.format(step, msname))

            if pipeline.enable_task(config, 'summary'):
                __label = config['label_in']
                step = '{0:s}-summary-ms{1:d}'.format(wname,msiter)
                recipe.add('cab/casa_flagdata', step,
                           {
                               "vis": msname,
                               "mode": 'summary',
                               "field": fields,
                               "flagbackup": False,
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Flagging summary  ms={1:s}'.format(step, msname))
                recipe.run()
                # Empty job que after execution
                recipe.jobs = []
                summary_log = glob.glob("{0:s}/log-flag-{1:s}-*.txt".format(pipeline.logs,
                    step))[0]
                json_summary = manflags.get_json_flag_summary(pipeline, summary_log,
                                                              prefix_msbase, wname)
                manflags.flag_summary_plots(pipeline, json_summary, prefix_msbase, wname, i)


            substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, msiter)
            manflags.add_cflags(pipeline, recipe, flags_after_worker, msname, cab_name=substep, overwrite=config['overwrite_flagvers'])
            msiter+=1
示例#6
0
def image(
        incol="DATA",
        label='initial',
        tmpimlabel="",
        nfacets=19,
        masksig=25,
        briggs=args.cal_briggs,
        do_mask=True,
        restore=None,
        do_taper=False,
        taper_inner_cut=100,  #meters
        taper_outer_cut=1500,  #meters
        taper_gamma=200,
        rime_forward=None,
        model_data="MODEL_DATA",
        weight_col="WEIGHT"):
    steps = []
    for ti, t in enumerate(TARGET):
        image_opts = {
            "Data-MS":
            DATASET,
            "Data-ColName":
            incol,
            "Data-ChunkHours":
            args.imaging_data_chunk_hours,
            "Selection-Field":
            int(FDB[t]),
            "Output-Mode":
            "Clean" if not restore else "Predict",
            "Output-Name":
            t + tmpimlabel,
            "Output-Images":
            "dDmMcCrRiInNSoekz",
            "Output-Cubes":
            "all",
            "Image-NPix":
            args.npix,
            "Image-Cell":
            args.cellsize,
            "Facets-NFacets":
            nfacets,
            "Weight-ColName":
            weight_col,
            "Weight-Mode":
            "Briggs",
            "Weight-Robust":
            briggs,
            #"Beam-Model": "FITS",
            #"Beam-FITSFile": "'MeerKAT_VBeam_10MHz_53Chans_$(corr)_$(reim).fits':output",
            "Freq-NBand":
            args.mfs_bands,
            "Freq-NDegridBand":
            args.mfs_predictbands,
            "Deconv-RMSFactor":
            0,
            "Deconv-PeakFactor":
            0.25,
            "Deconv-Mode":
            "Hogbom",
            "Deconv-MaxMinorIter":
            args.ncc,
            "Hogbom-PolyFitOrder":
            6,
            "Deconv-Gain":
            0.1,
            "Deconv-FluxThreshold":
            1.0e-6,
            "Deconv-AllowNegative":
            True,
            "Log-Boring":
            True,
            "Log-Memory":
            True,
            "RIME-ForwardMode":
            sdm.dismissable(rime_forward),
            "Predict-ColName":
            model_data,
            "Predict-FromImage":
            sdm.dismissable(t + restore +
                            ":output" if restore is not None else restore),
        }
        if do_taper:

            def taper_weigh(ms):
                from pyrap.tables import table as tbl
                with tbl(ms, readonly=False) as t:
                    uvw = t.getcol("UVW")
                    max_uv = np.sqrt(np.max(uvw[:, 0]**2 + uvw[:, 1]**2))
                    taper = lambda u, v, a, b, gamma: (1.0 / (1 + np.exp(
                        (np.sqrt(u**2 + v**2) - b) /
                        (2.0 * max_uv / gamma))) + 1.0 / (1 + np.exp(
                            (-np.sqrt(u**2 + v**2) + a) /
                            (2.0 * max_uv / gamma))) + 1.0 / (1 + np.exp(
                                (-np.sqrt(u**2 + v**2) - b) /
                                (2.0 * max_uv / gamma))) + 1.0 / (1 + np.exp(
                                    (np.sqrt(u**2 + v**2) + a) /
                                    (2.0 * max_uv / gamma)))) - 2.0

                    weight = t.getcol("WEIGHT")
                    weight_new = weight.copy()
                    tp_weight = taper(
                        uvw[:, 0],
                        uvw[:, 1],
                        taper_inner_cut,  # inner cut
                        taper_outer_cut,  # outer cut
                        taper_gamma)
                    weight_new *= tp_weight[:, None]
                    import matplotlib
                    matplotlib.use('Agg')
                    from matplotlib import pyplot as plt
                    import os
                    from scipy.interpolate import griddata
                    plt.figure()
                    x = np.linspace(np.min(uvw), np.max(uvw), 1024)
                    xx, xy = np.meshgrid(x, x, sparse=False)
                    ###grid = griddata(uvw[:, 0:2], tp_weight, (xx, xy), method="linear")
                    grid = taper(xx, xy, taper_inner_cut, taper_outer_cut,
                                 taper_gamma)
                    plt.imshow(grid,
                               cmap="magma",
                               extent=[
                                   np.min(xx),
                                   np.max(xx),
                                   np.max(xx),
                                   np.min(xx)
                               ])
                    plt.xlabel("u (m)")
                    plt.ylabel("v (m)")
                    plt.savefig(
                        os.path.join(OUTPUT, "uvtaper_{0:d}.png".format(ti)))

                    t.putcol("WEIGHT", weight_new)
                    t.close()

            recipe.add(taper_weigh,
                       "taper_target_%d" % ti,
                       {"ms": "%s/%s.%s.1GC.ms" % (MSDIR, PREFIX, t)},
                       input=INPUT,
                       output=OUTPUT,
                       label="taper %s %s" % (label, t))
            steps.append("taper %s %s" % (label, t))

        recipe.add("cab/ddfacet",
                   "image_target_%d" % ti,
                   image_opts,
                   input=INPUT,
                   output=OUTPUT,
                   label="image %s %s" % (label, t),
                   shared_memory="500g")
        steps.append("image %s %s" % (label, t))

        if not restore:
            if do_mask:
                recipe.add("cab/cleanmask",
                           "mask_target_%d" % ti, {
                               'image': "%s.app.restored.fits:output" % t,
                               'output': "%s.mask.fits" % t,
                               'sigma': masksig,
                               'boxes': 9,
                               'iters': 20,
                               'overlap': 0.3,
                               'no-negative': True,
                               'tolerance': 0.75,
                           },
                           input=INPUT,
                           output=OUTPUT,
                           label='mask %s %s' % (label, t))
                steps.append('mask %s %s' % (label, t))

                maskimage_opts = copy.deepcopy(image_opts)
                maskimage_opts["Predict-ColName"] = "MODEL_DATA"
                maskimage_opts["Mask-External"] = "%s.mask.fits:output" % t
                maskimage_opts["Output-Name"] = t + "_" + label

                recipe.add("cab/ddfacet",
                           "image_target_%d" % ti,
                           maskimage_opts,
                           input=INPUT,
                           output=OUTPUT,
                           label="mask image %s %s" % (label, t),
                           shared_memory="500g")
                steps.append("mask image %s %s" % (label, t))
    return steps
def worker(pipeline, recipe, config):
    wname = pipeline.CURRENT_WORKER
    flags_before_worker = '{0:s}_{1:s}_before'.format(pipeline.prefix, wname)
    flags_after_worker = '{0:s}_{1:s}_after'.format(pipeline.prefix, wname)
    label_in = config['label_in']
    label_out = config['label_out']
    from_target = True if label_in and config['field'] == 'target' else False
    field_to_split = get_fields_to_split(config)
    # are we splitting calibrators
    splitting_cals = field_to_split.intersection(_cal_fields)

    for i, (msbase, prefix_msbase) in enumerate(zip(pipeline.msbasenames, pipeline.prefix_msbases)):
        # if splitting from target, we have multiple MSs to iterate over
        from_mslist = pipeline.get_mslist(i, label_in, target=from_target)
        to_mslist  = pipeline.get_mslist(i, label_out, target=not splitting_cals)

        # if splitting cals, we'll split one (combined) target to one output MS
        if splitting_cals:
           calfields = set()
           for fd in field_to_split:
               for elem in getattr(pipeline, fd)[i]:
                   calfields.add(elem)
           target_ls = [','.join(calfields)]
        # else splitting target -- we'll split a list of targets to a list of output MSs
        else:
           target_ls = pipeline.target[i]
           # repeat the from-ms once per target, if not splitting from the target MS
           if not from_target:
               from_mslist = from_mslist * len(target_ls)

        #use existing calibration library if user gives one
        if pipeline.enable_task(config['split_field'], 'otfcal') and config['split_field']['otfcal']['callib']:
            callib = 'caltables/callibs/{}'.format(config['split_field']['otfcal']['callib'])

            if not os.path.exists(os.path.join(pipeline.output,callib)):
                raise IOError(
                    "Callib file {0:s} does not exist. Please check that it is where it should be.".format(callib))

            docallib = True

            if config['split_field']['col'] != 'corrected':
                caracal.log.info("Datacolumn was set to '{}'. by the user." \
                                   "Will be changed to 'corrected' for OTF calibration to work.".format(config['split_field']['col']))
            dcol = 'corrected'

        # write calibration library file for OTF cal
        elif pipeline.enable_task(config['split_field'], 'otfcal'):
            caltablelist, gainfieldlist, interplist = [], [], []
            calprefix = '{0:s}-{1:s}'.format(prefix_msbase,
                                             config['split_field']['otfcal']['label_cal'])
            callib = 'caltables/callibs/callib_{1:s}.txt'.format(prefix_msbase, calprefix)

            with open(os.path.join('{}/callibs'.format(pipeline.caltables),
                                  'callib_{0:s}-{1:s}.json'.format(prefix_msbase,
                                  config['split_field']['otfcal']['label_cal']))) as f:
                callib_dict = json.load(f)

            for applyme in callib_dict: 
                caltablelist.append(callib_dict[applyme]['caltable'])
                gainfieldlist.append(callib_dict[applyme]['fldmap'])
                interplist.append(callib_dict[applyme]['interp'])

            with open(os.path.join(pipeline.output, callib), 'w') as stdw:
                for j in range(len(caltablelist)):
                    stdw.write('caltable="{0:s}/{1:s}/{2:s}"'.format(
                        stimela.recipe.CONT_IO["output"], 'caltables',  caltablelist[j]))
                    stdw.write(' calwt=False')
                    stdw.write(' tinterp=\''+str(interplist[j])+'\'')
                    stdw.write(' finterp=\'linear\'')
                    stdw.write(' fldmap=\'' + str(gainfieldlist[j])+'\'')
                    stdw.write(' spwmap=0\n')

            docallib = True
            if config['split_field']['col'] != 'corrected':
                caracal.log.info("Datacolumn was set to '{}'. by the user." \
                                   "Will be changed to 'corrected' for OTF calibration to work.".format(config['split_field']['col']))
            dcol = 'corrected'

        else:
            docallib = False
            dcol = config['split_field']['col']

        for target_iter, (target, from_ms, to_ms) in enumerate(zip(target_ls, from_mslist, to_mslist)):
            # Rewind flags
            available_flagversions = manflags.get_flags(pipeline, from_ms)
            if config['rewind_flags']['enable'] and label_in:
                version = config['rewind_flags']['version']
                if version in available_flagversions:
                    substep = 'rewind-{0:s}-ms{1:d}'.format(version, target_iter)
                    manflags.restore_cflags(pipeline, recipe, version, from_ms, cab_name=substep)
                    if available_flagversions[-1] != version:
                        substep = 'delete-flag_versions-after-{0:s}-ms{1:d}'.format(version, target_iter)
                        manflags.delete_cflags(pipeline, recipe,
                            available_flagversions[available_flagversions.index(version)+1],
                            from_ms, cab_name=substep)
                else:
                    manflags.conflict('rewind_to_non_existing', pipeline, wname, from_ms,
                        config, flags_before_worker, flags_after_worker)

            flagv = to_ms + '.flagversions'

            if pipeline.enable_task(config, 'split_field'):
                step = 'split_field-ms{0:d}-{1:d}'.format(i,target_iter)
                # If the output of this run of mstransform exists, delete it first
                if os.path.exists('{0:s}/{1:s}'.format(pipeline.msdir, to_ms)) or \
                        os.path.exists('{0:s}/{1:s}'.format(pipeline.msdir, flagv)):
                    os.system(
                        'rm -rf {0:s}/{1:s} {0:s}/{2:s}'.format(pipeline.msdir, to_ms, flagv))

                recipe.add('cab/casa_mstransform', step,
                           {
                               "vis": from_ms if label_in else from_ms + ":input",
                               "outputvis": to_ms,
                               "timeaverage": True if (config['split_field']['time_avg'] != '' and config['split_field']['time_avg'] != '0s') else False,
                               "timebin": config['split_field']['time_avg'],
                               "chanaverage": True if config['split_field']['chan_avg'] > 1 else False,
                               "chanbin": config['split_field']['chan_avg'],
                               "spw": config['split_field']['spw'],
                               "datacolumn": dcol,
                               "correlation": config['split_field']['correlation'],
                               "usewtspectrum": config['split_field']['create_specweights'],
                               "field": target,
                               "keepflags": True,
                               "docallib": docallib,
                               "callib": sdm.dismissable(callib+':output' if pipeline.enable_task(config['split_field'], 'otfcal') else None),
                           },
                           input=pipeline.input if label_in else pipeline.rawdatadir,
                           output=pipeline.output,
                           label='{0:s}:: Split and average data ms={1:s}'.format(step, "".join(from_ms)))

                substep = 'save-{0:s}-ms{1:d}'.format(flags_after_worker, target_iter)
                manflags.add_cflags(pipeline, recipe, 'caracal_legacy', to_ms,
                    cab_name=substep, overwrite=False)

            obsinfo_msname = to_ms if pipeline.enable_task(config, 'split_field') else from_ms

            if pipeline.enable_task(config, 'changecentre'):
                if config['changecentre']['ra'] == '' or config['changecentre']['dec'] == '':
                    caracal.log.error(
                        'Wrong format for RA and/or Dec you want to change to. Check your settings of split_target:changecentre:ra and split_target:changecentre:dec')
                    caracal.log.error('Current settings for ra,dec are {0:s},{1:s}'.format(
                        config['changecentre']['ra'], config['changecentre']['dec']))
                    sys.exit(1)
                step = 'changecentre-ms{0:d}-{1:d}'.format(i,target_iter)
                recipe.add('cab/casa_fixvis', step,
                           {
                               "msname": to_ms,
                               "outputvis": to_ms,
                               "phasecenter": 'J2000 {0:s} {1:s}'.format(config['changecentre']['ra'], config['changecentre']['dec']),
                           },
                           input=pipeline.input,
                           output=pipeline.output,
                           label='{0:s}:: Change phase centre ms={1:s}'.format(step, to_ms))

            if pipeline.enable_task(config, 'obsinfo'):
                if (config['obsinfo']['listobs']):
                    if pipeline.enable_task(config, 'split_field'):
                        listfile = '{0:s}-obsinfo.txt'.format(os.path.splitext(to_ms)[0])
                    else:
                        listfile = '{0:s}-obsinfo.txt'.format(pipeline.msbasenames[i])

                    step = 'listobs-ms{0:d}-{1:d}'.format(i,target_iter)
                    recipe.add('cab/casa_listobs', step,
                               {
                                   "vis": obsinfo_msname,
                                   "listfile": listfile+":msfile",
                                   "overwrite": True,
                               },
                               input=pipeline.input,
                               output=pipeline.obsinfo,
                               label='{0:s}:: Get observation information ms={1:s}'.format(step, obsinfo_msname))

                if (config['obsinfo']['summary_json']):
                    if pipeline.enable_task(config, 'split_field'):
                        listfile = '{0:s}-summary.json'.format(os.path.splitext(to_ms)[0])
                    else:
                        listfile = '{0:s}-summary.json'.format(pipeline.msbasenames[i])

                    step = 'summary_json-ms{0:d}-{1:d}'.format(i,target_iter)
                    recipe.add('cab/msutils', step,
                               {
                                   "msname": obsinfo_msname,
                                   "command": 'summary',
                                   "display": False,
                                   "outfile": listfile+":msfile"
                               },
                               input=pipeline.input,
                               output=pipeline.obsinfo,
                               label='{0:s}:: Get observation information as a json file ms={1:s}'.format(step, obsinfo_msname))