Ejemplo n.º 1
0
            qa_validation_dir))
        os.mkdir(qa_validation_dir)

    # # 'create another directory to store the pybdsf output
    # qa_validation_dir = '{0:s}/validation'.format(qa_validation_dir)
    # if not os.path.exists(qa_validation_dir):
    #     print("Directory {0:s} does not exist and will be created".format(
    #         qa_validation_dir))
    #     os.mkdir(qa_validation_dir)

    # Run validation depending on the chosen mode
    # +++++++++++++++++++++++++++++++++++++++++++

    # Create logging file
    lib.setup_logger('debug',
                     logfile='{0:s}/{1:s}_{2:s}_validation.log'.format(
                         qa_validation_dir, obs_id, run_mode))
    logger = logging.getLogger(__name__)

    # logging.basicConfig(filename='{0:s}/{1:d}_{2:s}_pybdsf.log'.format(qa_validation_dir, obs_id, run_mode), level=logging.DEBUG,
    #                     format='%(asctime)s - %(levelname)s: %(message)s')

    # logger = logging.getLogger(__name__)

    # run through continuum mode
    if run_mode == 'continuum':

        # base directory for data
        if args.trigger_mode:
            logger.info(
                "--> Running continuum QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--"
Ejemplo n.º 2
0
if args.path is None:
    if args.basedir is not None:
        qa_dir = get_default_imagepath(args.scan, basedir=args.basedir)
    else:
        qa_dir = get_default_imagepath(args.scan)

    # check that selfcal qa directory exists
    qa_preflag_dir = os.path.join(qa_dir, "preflag")

    if not os.path.exists(qa_preflag_dir):
        os.mkdir(qa_preflag_dir)
else:
    qa_preflag_dir = args.path

# Create log file
lib.setup_logger('info',
                 logfile=os.path.join(qa_preflag_dir, 'run_preflag_qa.log'))
logger = logging.getLogger(__name__)

logger.info("Running preflag QA")

# now combine the plots
try:
    start_time = time.time()
    preflag_plots.combine_preflag_plots(qa_preflag_dir,
                                        trigger_mode=args.trigger_mode)
except Exception as e:
    logger.warning("Running preflag QA failed")
    logger.exception(e)
else:
    logger.warning(
        "Running preflag QA ... Done ({0:.0f}s)".format(time.time() -
Ejemplo n.º 3
0
args = parser.parse_args()

# If no path is given change to default QA path
if args.path is None:
    output_path = get_default_imagepath(args.scan, basedir=args.basedir)

    # check that selfcal qa directory exists
    output_path = os.path.join(output_path, "selfcal/")

    if not os.path.exists(output_path):
        os.mkdir(output_path)
else:
    output_path = args.path

# Create log file
lib.setup_logger('info', logfile='{0:s}run_scal_plots.log'.format(output_path))
logger = logging.getLogger(__name__)

# Get selfcal maps
if args.maps:
    try:
        logger.info("#### Creating selfcal maps ...")
        start_time_maps = time.time()
        get_selfcal_maps(args.scan,
                         output_path,
                         trigger_mode=args.trigger_mode)
        logger.info("#### Creating selfcal maps. Done ({0:.0f}s)".format(
            time.time() - start_time_maps))
    except Exception as e:
        logger.error(e)
        logger.error("#### Creating selfcal maps failed")
Ejemplo n.º 4
0
def main():

    start_time = time.time()

    args = parse_args()

    obs_id = args.obs_id
    flux_cal = args.calibrator
    qa_dir = args.path
    base_dir = args.base_dir
    n_threads = args.threads
    subband_step = args.subband_step

    # set output directory
    if qa_dir is None:
        if base_dir is not None:
            qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
        else:
            qa_dir = get_default_imagepath(obs_id)

        # check that path exists
        if not os.path.exists(qa_dir):
            print("Directory {0:s} does not exist and will be created".format(
                qa_dir))
            os.makedirs(qa_dir)

    data_dir = os.path.dirname(qa_dir).rsplit("qa")[0]

    # check the mode to run the validation
    qa_beamweights_dir = os.path.join(qa_dir, "beamweights")

    # check that this directory exists (just in case)
    if not os.path.exists(qa_beamweights_dir):
        print("Directory {0:s} does not exist and will be created".format(
            qa_beamweights_dir))
        os.makedirs(qa_beamweights_dir)

    lib.setup_logger(
        'debug',
        logfile='{0:s}/create_beamweights.log'.format(qa_beamweights_dir))
    logger = logging.getLogger(__name__)

    logger.info("Getting beamweight plots for {}".format(flux_cal))

    # get a list of beams if no beam was provided
    if args.beam is None:
        data_dir_beam_list = glob.glob(os.path.join(data_dir, "[0-3][0-9]"))
        # check that there are beams
        if len(data_dir_beam_list) == 0:
            logger.warning("No beams found in {}".format(data_dir))
            return None
        else:
            beam_list = [
                int(os.path.basename(beam)) for beam in data_dir_beam_list
            ]
    else:
        beam_list = [args.beam]

    # now go through the beams
    for beam_nr in beam_list:

        start_time_beam = time.time()

        logger.info("Processing beam {}".format(beam_nr))

        # check that the given calibrator exists
        data_cal_dir = os.path.join(data_dir, "{0:02d}".format(beam_nr))

        # calibrator file
        cal_file = os.path.join(data_cal_dir, "raw/{}.MS".format(flux_cal))

        # check that it exists
        if not os.path.exists(cal_file):
            logger.warning(
                "Could not find calibrator {}. Continue with next beam".format(
                    cal_file))
            continue
        else:
            logger.info("Found calibrator {}".format(cal_file))

        # set output directory for plots
        qa_beamweights_beam_dir = os.path.join(qa_beamweights_dir,
                                               "{0:02d}".format(beam_nr))
        # check that this directory exists (just in case)
        if not os.path.exists(qa_beamweights_beam_dir):
            logger.info(
                "Directory {0:s} does not exist and will be created".format(
                    qa_beamweights_beam_dir))
            os.makedirs(qa_beamweights_beam_dir)

        # Start with one measurement set to set up the size of the array
        #
        # cal = pt.table(
        #     "/data/hess/apertif/{}/{}/WSRTA{}_B000.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid, args.taskid),
        #     ack=False)
        cal = pt.table(os.path.join(cal_file, "APERTIF_CALIBRATION"),
                       ack=False)

        num_beams = 40
        num_subbands = pt.taql(
            'select distinct SPECTRAL_WINDOW_ID FROM $cal').nrows()
        num_antennas = pt.taql('select distinct ANTENNA_ID FROM $cal').nrows()

        beamweights = np.zeros((num_beams, num_subbands, num_antennas, 11, 11),
                               dtype=np.complex64)

        logger.info("Number of subbands in {0} is {1}".format(
            os.path.basename(cal_file), num_subbands))

        # in case there are no subbands or antennas better check
        if num_subbands != 0 and num_antennas != 0:

            # Old implementation looped over beams (and I just picked a subband for simplicity, but this could be expanded to loop over subbands)
            #
            # plot_sub = 350
            # for beam_nr in range(40):
            #     ms_name = "/data/hess/apertif/{}/{}/WSRTA{}_B0{:02}.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid,
            #                                                                                         args.taskid, beam_nr)
            #     print(ms_name)
            #     cal = pt.table(ms_name, ack=False)
            #     weights_gershape = cal.getcol('BEAM_FORMER_WEIGHTS').reshape((num_subbands, -1, 2, 64))
            #
            #     for subband in range(num_subbands):
            #         for antenna in range(num_antennas):
            #             beamweights[beam_nr, subband, antenna] = convert_weights(weights_gershape[subband, antenna])
            #
            #     print("BEAM NUMBER {}".format(beam_nr))
            #     # fig, axs = plt.subplots(3, 4, figsize=(15, 11))
            #     fig, axs = plt.subplots(3, 4, figsize=(10, 7))
            #     fig.suptitle("Beam {}; Subband {}".format(beam_nr, plot_sub), fontsize=14)
            #     for ax, plot_ant in zip(np.array(axs).flatten(), range(num_antennas)):
            #         ax.imshow(np.abs(beamweights[beam_nr, plot_sub, plot_ant]), cmap='plasma')
            #         ax.set_title("Antenna " + str(plot_ant))
            #         if plot_ant < 8:
            #             ax.set_xticklabels([])
            #         for i in range(61):
            #             x, y = give_coord('X', i)
            #             ax.text(x - 0.35, y + 0.18, 'X' + str(i), color='white', fontsize=5)
            #             x, y = give_coord('Y', i)
            #             ax.text(x - 0.35, y + 0.18, 'Y' + str(i), color='white', fontsize=5)
            #
            #     plt.savefig('/data/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.cal_date, args.cal_date,
            #                                                                              beam_nr, plot_sub))
            #     plt.close()

            # New implementation because I was just thinking of using a single beam and plotting a bunch of subbands. (quick and dirty solution)
            # Beam is chosen by the user and saved in args.beam
            # ms_name = "/home/hess/apertif/{}/{:02}/3C147.MS/APERTIF_CALIBRATION".format(
            #     args.taskid, beam_nr)
            # cal = pt.table(ms_name, ack=False)

            logger.info("Getting weights")
            weights_gershape = cal.getcol('BEAM_FORMER_WEIGHTS').reshape(
                (num_subbands, -1, 2, 64))
            logger.info("Getting weights ... Done")

            # parallelise it to plot faster
            with pymp.Parallel(n_threads) as p:
                # go throught the subband
                for subband in p.range(0, num_subbands, subband_step):
                    for antenna in range(num_antennas):
                        beamweights[beam_nr, subband,
                                    antenna] = convert_weights(
                                        weights_gershape[subband, antenna])

                    fig, axs = plt.subplots(3, 4, figsize=(10, 7))
                    fig.suptitle("Beam {}; Subband {}".format(
                        beam_nr, subband),
                                 fontsize=14)
                    for ax, plot_ant in zip(
                            np.array(axs).flatten(), range(num_antennas)):
                        ax.imshow(np.abs(beamweights[beam_nr, subband,
                                                     plot_ant]),
                                  cmap='plasma')
                        ax.set_title("Antenna " + str(plot_ant))
                        if plot_ant < 8:
                            ax.set_xticklabels([])
                        for i in range(61):
                            x, y = give_coord('X', i)
                            ax.text(x - 0.35,
                                    y + 0.18,
                                    'X' + str(i),
                                    color='white',
                                    fontsize=5)
                            x, y = give_coord('Y', i)
                            ax.text(x - 0.35,
                                    y + 0.18,
                                    'Y' + str(i),
                                    color='white',
                                    fontsize=5)

                    plot_name = os.path.join(
                        qa_beamweights_beam_dir,
                        "{0}_{1}_B{2:02d}_S{3:03d}_weights.png".format(
                            obs_id, flux_cal, beam_nr, subband))
                    # plt.savefig('/home/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.taskid, args.cal_date,
                    #                                                                          beam_nr, subband))
                    plt.savefig(plot_name, overwrite=True)
                    logger.info("Saving plot {}".format(plot_name))
                    plt.close('all')

            logger.info("Processing beam {0} ... Done ({1:.0f}s)".format(
                beam_nr,
                time.time() - start_time_beam))
        else:
            logger.warning(
                "Found {0} subbands and {1} antennas for beam {2} in {3}".
                format(num_subbands, num_antennas, beam_nr, flux_cal))

    logger.info("Getting beamweight plots for {0} ... Done ({1:.0f}s)".format(
        flux_cal,
        time.time() - start_time))
Ejemplo n.º 5
0
    """
    Trigger the start of a fluxcal pipeline. Returns immediately.

    Args:
        taskid_* (int): something like 181204020
        name_* (str): something like '3C295'
        beamlist_* (List[int]): something like [0, 1, 2, ..., 9]

    Returns:
        bool: True if apercal started successfully
    """
    basedir = '/data/apertif/{}/'.format(taskid_target)
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    lib.setup_logger('debug', logfile=os.path.join(basedir, 'apercal.log'))
    logger = logging.getLogger(__name__)
    gitinfo = subprocess.check_output('cd ' + os.path.dirname(apercal.__file__) +
                                      '&& git describe --tag; cd', shell=True).strip()
    logger.info("Apercal version: " + gitinfo)

    name_fluxcal = str(name_fluxcal).strip()
    name_polcal = str(name_polcal).strip()
    name_target = str(name_target).strip()

    p0 = prepare()
    p0.basedir = basedir
    p0.fluxcal = name_fluxcal + ".MS"
    p0.polcal = name_polcal + ".MS"
    p0.target = name_target + ".MS"
Ejemplo n.º 6
0
def run_triggered_qa(targets,
                     fluxcals,
                     polcals,
                     steps=None,
                     basedir=None,
                     osa=''):
    """Function to run all QA steps.

    | Args:
    |   target (list(int, str, list(str))): the target field to be reduced
    |   fluxcals (list(int, str, int)): the flux calibrators for the beams to be reduced
    |   polcals (list(int, str, int)): the polarisation calibrator for the beams to be reduced
    |   steps (list(str)): The QA steps that should be exectuted
    |   basedir (str): The directory where the taskid is located
    |   osa (str): The OSA of the given taskid

    | Function is called from autocal as
    |   ``run_triggered_qa(tdict['target'], tdict['cal1'], tdict['cal2'])``

    | With the first three variables defined (the same way as autocal) as
    |    targets = (190505048, 'LH_WSRT', array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]))

        fluxcals = [(190505017, '3C147_9', 9), (190505016, '3C147_8', 8), (190505015, '3C147_7', 7), (190505014, '3C147_6', 6), (190505013, '3C147_5', 5), (190505012, '3C147_4', 4), (190505011, '3C147_3', 3), (190505010, '3C147_2', 2), (190505009, '3C147_1', 1), (190505008, '3C147_0', 0)]

        polcals = [(190506001, '3C286_0', 0), (190506002, '3C286_1', 1), (190506003, '3C286_2', 2), (190506004, '3C286_3', 3), (190506005,'3C286_4', 4), (190506006, '3C286_5', 5), (190506007, '3C286_6', 6), (190506008, '3C286_7', 7), (190506009, '3C286_8', 8), (190506010, '3C286_9', 9)]

    | If steps is not provided then all steps except mosaic will be performed:
    |    steps = ['inspection_plots', 'preflag', 'crosscal', 'selfcal', 'continuum', 'line', 'report']

    | It is possible to select a certain step:
    |    steps = ['inspection_plots']

    | test call can look like this: 
    |    ``from dataqa.run_qa import run_triggered_qa``
    |    ``run_triggered_qa((190505048, 'LH_WSRT', [0]), [(190505048, '3C147_10', 10)], [(190505048, '3C286_10', 10)], steps=['report'])``
    """

    # for time measurement
    start_time = time.time()

    # Process input parameters
    # (same as in start_apercal_pipeline)
    # ========================

    (taskid_target, name_target, beamlist_target) = targets

    if fluxcals:
        name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()
    else:
        name_fluxcal = ''
    if polcals:
        name_polcal = str(polcals[0][1]).strip().split('_')[0].upper()
    else:
        name_polcal = ''

    if steps is None:
        # steps = ['preflag', 'crosscal', 'selfcal',
        #          'continuum', 'line', 'mosaic', 'report']
        # steps = ['inspection_plots', 'beamweights', 'preflag', 'crosscal', 'selfcal',
        #          'continuum', 'line', 'report']

        # due to isssue with beamweights script, this step is currently not performed
        steps = [
            'inspection_plots', 'preflag', 'crosscal', 'selfcal', 'continuum',
            'line', 'report'
        ]

    # Set up
    # ======

    # Get host name
    host_name = socket.gethostname()

    # QA directory
    if basedir is not None:
        qa_dir = get_default_imagepath(taskid_target, basedir=basedir)
    else:
        qa_dir = get_default_imagepath(taskid_target)
        basedir = "/data/apertif"

    # check that path exists
    if not os.path.exists(qa_dir):
        print("Directory {0:s} does not exist and will be created".format(
            qa_dir))
        try:
            os.mkdir(qa_dir)
        except Exception as e:
            print(e)

    # start log file
    # logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
    #                     filename='{0:s}{1:s}_triggered_qa.log'.format(qa_dir, host_name), level=logging.DEBUG)

    lib.setup_logger('debug',
                     logfile='{0:s}{1:s}_triggered_qa.log'.format(
                         qa_dir, host_name))
    logger = logging.getLogger(__name__)

    logger.info("#######################")
    logger.info("Input parameters:")
    logger.info("target={0:s}".format(str(targets)))
    logger.info("fluxcals={0:s}".format(str(fluxcals)))
    logger.info("polcals={0:s}".format(str(polcals)))
    logger.info("#######################")

    logger.info('#######################')
    logger.info('#### Running all QA steps on {0:s}'.format(host_name))
    logger.info('#######################')

    # If both fluxcal and polcal polarized, remove polcal
    # (taken from start_pipeline)
    if subs_calmodels.is_polarised(
            name_polcal) and subs_calmodels.is_polarised(name_fluxcal):
        name_polcal = ""

    if (fluxcals and fluxcals != '') and (polcals and polcals != ''):
        assert (len(fluxcals) == len(polcals))

    # avoid symmetry bias, if there is only a polcal but no fluxcal, switch them
    if fluxcals is None and polcals is not None:
        logger.info(
            "Only polcal was provided. Setting polcal {} to fluxcal".format(
                name_polcal))
        fluxcals, polcals = polcals, fluxcals
        name_fluxcal, name_polcal = name_polcal, name_fluxcal
        name_polcal = ""
    # Exchange polcal and fluxcal if specified in the wrong order
    # (taken from start_pipeline)
    # (except for how the names are switched)
    elif not subs_calmodels.is_polarised(name_polcal) and name_polcal != '':
        if subs_calmodels.is_polarised(name_fluxcal):
            logger.debug("Switching polcal and fluxcal because " +
                         name_polcal + " is not polarised")
            fluxcals, polcals = polcals, fluxcals
            name_fluxcal, name_polcal = name_polcal, name_fluxcal
            #name_polcal = str(polcals[0][1]).strip()
        else:
            logger.debug("Setting polcal to '' since " + name_polcal +
                         " is not polarised")
            name_polcal = ""
    elif name_polcal != '':
        logger.debug("Polcal " + name_polcal + " is polarised, all good")

    logger.info(
        "## Observation of target: {0:s}, flux calibrator: {1:s}, polarisation calibrator: {2:s}"
        .format(name_target, name_fluxcal, name_polcal))

    # Write information about the observation into a txt file for later
    # This information is important for the OSA report
    # =================================================================

    # flux calibrator tid list
    flux_cal_tid_list = [cal[0] for cal in fluxcals]

    # pol calibrator tid list
    if name_polcal != '':
        pol_cal_tid_list = [cal[0] for cal in polcals]
    else:
        pol_cal_tid_list = []

    summary_table = Table(
        [[taskid_target], [name_target], [name_fluxcal],
         [str(flux_cal_tid_list).replace("]", "").replace("[", "")],
         [name_polcal],
         [str(pol_cal_tid_list).replace("]", "").replace("[", "")], [osa]],
        names=('Obs_ID', 'Target', 'Flux_Calibrator',
               'Flux_Calibrator_Obs_IDs', 'Pol_Calibrator',
               'Pol_Calibrator_Obs_IDs', 'OSA'))

    table_name = "{0}_obs.ecsv".format(taskid_target)

    table_name_with_path = os.path.join(qa_dir, table_name)

    try:
        summary_table.write(table_name_with_path,
                            format='ascii.ecsv',
                            overwrite=True)
    except Exception as e:
        logger.warning("Saving observation information in {0} failed.".format(
            table_name_with_path))
        logger.exception(e)
    else:
        logger.info(("Saving observation information in {0} ... Done.".format(
            table_name_with_path)))

    # Inspection Plots
    # ================

    if 'inspection_plots' in steps:

        start_time_inspection_plot = time.time()

        # for the target it is enough to do it only for happili-01
        # as they do not depend on the beam
        # for the flux and pol calibrator, they have to be run on every node

        # get inspection plots for target
        if host_name == "happili-01":

            logger.info(
                "#### Inspection plot QA for {}...".format(name_target))

            try:
                inspection_plot_msg = os.system(
                    'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} --basedir={2}'
                    .format(taskid_target, name_target, basedir))
                logger.info(
                    "Getting inspection plots finished with msg {0}".format(
                        inspection_plot_msg))
                logger.info(
                    "#### Inspection plot QA {0}... Done ".format(name_target))
            except Exception as e:
                logger.warning(
                    "Inspection plot QA for {} failed.".format(name_target))
                logger.exception(e)

        # get inspection plot for flux calibrator
        logger.info("#### Inspection plot QA for {}...".format(name_fluxcal))

        for (taskid_cal, name_cal, beamnr_cal) in fluxcals:

            try:
                inspection_plot_msg = os.system(
                    'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} -c --beam={2:d} --cal_id={3:d} --basedir={4}'
                    .format(taskid_target, name_fluxcal, beamnr_cal,
                            taskid_cal, basedir))
                logger.info(
                    "Getting inspection plots finished with msg {0}".format(
                        inspection_plot_msg))
                logger.info(
                    "#### Inspection plot QA for {0} beam {1} ... Done".format(
                        name_fluxcal, beamnr_cal))
            except Exception as e:
                logger.warning(
                    "Inspection plot QA for {} beam {1} failed.".format(
                        name_fluxcal, beamnr_cal))
                logger.exception(e)

        # get inspection plot for pol calibrator if it exists
        if name_polcal != '':
            logger.info(
                "#### Inspection plot QA for {}...".format(name_polcal))

            for (taskid_cal, name_cal, beamnr_cal) in polcals:

                try:
                    inspection_plot_msg = os.system(
                        'python /home/apercal/dataqa/run_inspection_plot.py {0:d} {1:s} -c --beam={2:d} --cal_id={3:d} --basedir={4}'
                        .format(taskid_target, name_polcal, beamnr_cal,
                                taskid_cal, basedir))
                    logger.info(
                        "Getting inspection plots finished with msg {0}".
                        format(inspection_plot_msg))
                    logger.info(
                        "#### Inspection plot QA for {0} beam {1} ... Done".
                        format(name_polcal, beamnr_cal))
                except Exception as e:
                    logger.warning(
                        "Inspection plot QA for {} beam {1} failed.".format(
                            name_polcal, beamnr_cal))
                    logger.exception(e)

        logger.info("#### Inspection plot QA ... Done (time {0:.1f}s)".format(
            time.time() - start_time_inspection_plot))
    else:
        logger.warning("#### Did not perform inspection plot QA")

    # Beamweights Plots
    # =================

    if 'beamweights' in steps:

        start_time_beamweights = time.time()

        # this needs to run on every node

        logger.info("#### Beamweights QA for {}...".format(name_fluxcal))

        try:
            beamweights_msg = os.system(
                'python /home/apercal/dataqa/run_beamweights_plots.py {0:d} {1:s} -t 20'
                .format(taskid_target, name_fluxcal))
            logger.info("Getting Beamweightss finished with msg {0}".format(
                beamweights_msg))

        except Exception as e:
            logger.warning(
                "Beamweights QA for {} failed.".format(name_fluxcal))
            logger.exception(e)
        else:
            logger.info("#### Beamweights QA ... Done (time {0:.1f}s)".format(
                time.time() - start_time_beamweights))
    else:
        logger.warning("#### Did not perform Beamweights QA")

    # Preflag QA
    # ==========

    if 'preflag' in steps and host_name == "happili-01":

        logger.info("#### Running preflag QA ...")

        start_time_preflag = time.time()

        try:
            preflag_msg = os.system(
                'python /home/apercal/dataqa/run_preflag_qa.py {0:d} --basedir={1}'
                .format(taskid_target, basedir))
            logger.info("Preflag QA finished with msg {0}".format(preflag_msg))
            logger.info(
                "#### Running preflag QA ... Done (time {0:.1f}s)".format(
                    time.time() - start_time_preflag))
        except Exception as e:
            logger.warning("Preflag QA failed. Continue with next QA")
            logger.exception(e)

        # Disabled rfinder
        # try:
        #     preflag_msg = os.system(
        #         'python /home/apercal/dataqa/run_rfinder.py {0:d} {1:s} --trigger_mode'.format(taskid_target, name_fluxcal))
        #     logger.info(
        #         "Preflag QA finished with msg {0}".format(preflag_msg))
        #     logger.info("#### Running preflag QA ... Done (time {0:.1f}s)".format(
        #         time.time()-start_time_preflag))
        # except Exception as e:
        #     logger.warning("Preflag QA failed. Continue with next QA")
        #     logger.exception(e)
    else:
        logger.warning("#### Did not perform preflag QA")

    # Crosscal QA
    # ===========

    if 'crosscal' in steps and name_fluxcal != '':

        logger.info('#### Running crosscal QA ...')

        start_time_crosscal = time.time()

        try:
            crosscal_msg = os.system(
                'python /home/apercal/dataqa/run_ccal_plots.py {0:d} "{1:s}" "{2:s}" --basedir={3} --trigger_mode'
                .format(taskid_target, name_fluxcal, name_polcal, basedir))
            logger.info(
                "Crosscal QA finished with msg {0}".format(crosscal_msg))
            logger.info(
                "#### Running crosscal QA ... Done (time {0:.1f}s)".format(
                    time.time() - start_time_crosscal))
        except Exception as e:
            logger.warning("Crosscal QA failed. Continue with next QA")
            logger.exception(e)
    else:
        logger.warning("#### Did not perform crosscal QA")

    # Selfcal QA
    # ==========

    if 'selfcal' in steps:

        logger.info('#### Running selfcal QA ...')

        start_time_selfcal = time.time()

        try:
            selfcal_msg = os.system(
                'python /home/apercal/dataqa/run_scal_plots.py {0:d} {1:s} --basedir={2} --trigger_mode'
                .format(taskid_target, name_target, basedir))
            logger.info("Selfcal QA finished with msg {0}".format(selfcal_msg))
            logger.info(
                "#### Running selfcal QA ... Done (time {0:.1f}s)".format(
                    time.time() - start_time_selfcal))
        except Exception as e:
            logger.warning("Selfcal QA failed. Continue with next QA")
            logger.exception(e)
    else:
        logger.warning("#### Did not perform selfcal QA")

    # Mosaic QA
    # ==========

    if 'mosaic' in steps and host_name == 'happili-01':

        logger.info('#### Mosaic QA is currently not available ...')

        # logger.info('#### Running mosaic QA ...')

        # start_time_mosaic = time.time()

        # try:
        #     # Create the mosaic
        #     logger.info('## Making the mosaic ...')
        #     start_time_make_mosaic = time.time()
        #     make_mosaic_msg = os.system(
        #         'python /home/apercal/dataqa/make_mosaic_image.py {0:d}'.format(taskid_target))
        #     logger.info(
        #         "Making mosaic finished with msg {0}".format(make_mosaic_msg))
        #     logger.info("## Making the mosaic ... Done (time {0:.1f}s)".format(
        #         time.time()-start_time_make_mosaic))

        #     # Run the validation tool
        #     logger.info('## Run validation ...')
        #     start_time_mosaic_validation = time.time()
        #     mosaic_validation_msg = os.system(
        #         'python /home/apercal/dataqa/run_continuum_validation.py {0:d} --for_mosaic'.format(taskid_target))
        #     logger.info(
        #         "Mosaic validation finished with msg {0}".format(mosaic_validation_msg))
        #     logger.info("## Run validation ... Done (time {0:.1f}s)".format(
        #         time.time()-start_time_mosaic_validation))

        #     logger.info("#### Running mosaic QA ... Done (time {0:.1f}s)".format(
        #         time.time()-start_time_mosaic))
        # except Exception as e:
        #     logger.warning("Mosaic QA failed. Continue with next QA")
        #     logger.exception(e)
    else:
        logger.warning("#### Did not perform mosaic QA")

    # Line QA
    # =======

    if 'line' in steps:

        logger.info('#### Running line QA ...')

        start_time_line = time.time()

        try:
            # Get cube statistic without continuum subtraction
            logger.info('## Get cube statistic ...')
            start_time_get_cube_stat = time.time()
            cube_stat_msg = os.system(
                'python /home/apercal/dataqa/run_cube_stats.py {0:d} --basedir={1} --trigger_mode'
                .format(taskid_target, basedir))
            logger.info(
                "Cube stat finished with msg {0}".format(cube_stat_msg))
            logger.info(
                "## Get cube statistic ... Done (time {0:.1f}s)".format(
                    time.time() - start_time_get_cube_stat))

            # Subtract continuum
            # logger.info('## Subtract continuum ...')
            # start_time_subtract_continuum = time.time()
            # subtract_cont_msg = os.system(
            #     'python /home/apercal/dataqa/subtract_continuum.py {0:d} --trigger_mode'.format(taskid_target))
            # logger.info(
            #     "Continuum subtraction finished with msg {0}".format(subtract_cont_msg))
            # logger.info("## Subtract continuum ... Done (time {0:.1f}s)".format(
            #     time.time()-start_time_subtract_continuum))

            # # Get cube statistic after continuum subtraction
            # logger.info(
            #     '## Get cube statistic after continuum subtraction ...')
            # start_time_get_cube_stat_cont = time.time()
            # get_cube_stat_cont_msg = os.system(
            #     'python /home/apercal/dataqa/run_cube_stats_cont.py {0:d} --trigger_mode'.format(taskid_target))
            # logger.info(
            #     "Cube stat cont finished with msg {0}".format(get_cube_stat_cont_msg))
            # logger.info("## Get cube statistic after continuum subtraction ... Done (time {0:.1f}s)".format(
            #     time.time()-start_time_get_cube_stat_cont))

            logger.info("#### Running line QA ... Done (time {0:.1f}s)".format(
                time.time() - start_time_line))
        except Exception as e:
            logger.warning("Line QA failed. Continue with next QA")
            logger.exception(e)
    else:
        logger.warning("#### Did not perform line QA")

    # Continuum QA
    # ============

    if 'continuum' in steps:

        logger.info('#### Running continuum QA ...')

        start_time_continuum = time.time()

        try:
            continuum_msg = os.system(
                'python /home/apercal/dataqa/run_continuum_validation.py {0:d} --basedir={1} --trigger_mode'
                .format(taskid_target, basedir))
            logger.info(
                "Continuum QA finished with msg {0}".format(continuum_msg))
            logger.info(
                "#### Running continuum QA ... Done (time {0:.1f}s)".format(
                    time.time() - start_time_continuum))
        except Exception as e:
            logger.warning("Continuum QA failed. Continue with next QA")
            logger.exception(e)
    else:
        logger.warning("#### Did not perform continuum QA")

    # Create report
    # =============

    if 'report' in steps:

        # merge the crosscal and selfcal plots for the report
        if host_name == 'happili-01':
            logger.info('#### Merge crosscal and selfcal plots...')

            start_time_merge = time.time()

            try:
                report_msg = os.system(
                    'python /home/apercal/dataqa/run_merge_plots.py {0:d} --basedir={1} --do_ccal --do_scal --run_parallel'
                    .format(taskid_target, basedir))
                logger.info("Merging finished with msg {0}".format(report_msg))
                logger.info(
                    "#### Merge crosscal and selfcal plots ... Done (time {0:.1f}s)"
                    .format(time.time() - start_time_merge))
            except Exception as e:
                logger.warning("Merge crosscal and selfcal plots failed.")
                logger.exception(e)

        # now create the report
        logger.info('#### Create report ...')

        start_time_report = time.time()

        try:
            report_msg = os.system(
                'python /home/apercal/dataqa/create_report.py {0:d} --basedir={1} --trigger_mode'
                .format(taskid_target, basedir))
            logger.info("Report finished with msg {0}".format(report_msg))
            logger.info("#### Create report ... Done (time {0:.1f}s)".format(
                time.time() - start_time_report))
        except Exception as e:
            logger.warning("Creating report failed.")
            logger.exception(e)
    else:
        logger.warning("#### Did not create a report")

    # Finish
    # ======
    logger.info('#######################')
    logger.info(
        '#### Running all QA steps on {0:s} ... Done (time {1:.1f}s)'.format(
            host_name,
            time.time() - start_time))
    logger.info('#######################')
Ejemplo n.º 7
0
def run_manual_processing():
    """Function to run Apercal and QA"""

    # make sure a use base directory
    if basedir is None:
        print("ERROR: No basedir specified. Abort.")
        return -1

    start_time = time.time()

    host_name = socket.gethostname()

    cwd = os.getcwd()

    name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()

    (task_id, name_target, beamlist_target) = targets

    logfile = os.path.join(cwd,
                           "{0}_{1}_apercal_qa.log".format(task_id, host_name))

    # Setting up log file
    lib.setup_logger('debug', logfile=logfile)
    logger = logging.getLogger(__name__)

    if do_apercal:
        logger.info("Running apercal manually")
        logger.info(
            "When apercal runs, the logging information will be in the apercal log"
        )

        logger.info("Using task id: {0}".format(task_id))
        logger.info("target = {}".format(str(targets)))
        logger.info("flux_cal = {}".format(str(fluxcals)))
        logger.info("pol_cal = {}".format(str(polcals)))

        # logger.info("Steps: {}".format(str(steps)))

        try:
            start_time = time.time()
            logger.info('Running apercal')
            # return_msg = start_apercal_pipeline(
            # targets, fluxcals, polcals, basedir=basedir, steps=steps, configfilename=configfile)
            return_msg = start_apercal_pipeline(targets,
                                                fluxcals,
                                                polcals,
                                                basedir=basedir,
                                                steps=steps,
                                                configfilename=configfile)
            # return_msg = start_apercal_pipeline(
            #     targets, fluxcals, polcals, steps=steps)
        except Exception as e:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.warning("Running apercal failed ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
            logger.warning(return_msg)
            logger.exception(e)
        else:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.info("Running apercal ... Done ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))

    if do_qa:
        logger.info("Running QA manually")
        logger.info(
            "When apercal runs, the logging information will be in the apercal log"
        )

        logger.info("Using task id: {0}".format(task_id))
        logger.info("target = {}".format(str(targets)))
        logger.info("flux_cal = {}".format(str(fluxcals)))
        logger.info("pol_cal = {}".format(str(polcals)))

        logger.info("Running all QA steps")
        try:
            start_time = time.time()
            logger.info('Running QA')
            return_msg = run_triggered_qa(targets,
                                          fluxcals,
                                          polcals,
                                          osa=osa,
                                          basedir=os.path.dirname(basedir),
                                          steps=steps_qa)
        except Exception as e:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.warning("Running QA failed ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
            logger.warning(return_msg)
            logger.exception(e)
        else:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.info("Running QA ... Done ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
def run_manual_processing():
    """Function to run Apercal, QA and Apergest"""

    start_time = time.time()

    host_name = socket.gethostname()

    cwd = os.getcwd()

    host_name = socket.gethostname()

    name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()

    (task_id, name_target, beamlist_target) = targets

    logfile = os.path.join(
        cwd, "{0}_{1}_apercal_qa_apergest.log".format(task_id, host_name))

    # Setting up log file
    lib.setup_logger('debug', logfile=logfile)
    logger = logging.getLogger(__name__)

    if do_apercal:
        logger.info("Running apercal manually")
        logger.info(
            "When apercal runs, the logging information will be in the apercal log"
        )

        logger.info("Using task id: {0}".format(task_id))
        logger.info("target = {}".format(str(targets)))
        logger.info("flux_cal = {}".format(str(fluxcals)))
        logger.info("pol_cal = {}".format(str(polcals)))

        # logger.info("Steps: {}".format(str(steps)))

        try:
            start_time = time.time()
            logger.info('Running apercal')
            # return_msg = start_apercal_pipeline(
            # targets, fluxcals, polcals, basedir=basedir, steps=steps, configfilename=configfile)
            start_apercal_pipeline(targets,
                                   fluxcals,
                                   polcals,
                                   configfilename=configfile,
                                   steps=steps)
            # return_msg = start_apercal_pipeline(
            #     targets, fluxcals, polcals, steps=steps)
        except Exception as e:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.warning("Running apercal failed ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
            logger.exception(e)
        else:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.info("Running apercal ... Done ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))

    if do_qa:
        logger.info("Running QA manually")
        logger.info(
            "When apercal runs, the logging information will be in the apercal log"
        )

        logger.info("Using task id: {0}".format(task_id))
        logger.info("target = {}".format(str(targets)))
        logger.info("flux_cal = {}".format(str(fluxcals)))
        logger.info("pol_cal = {}".format(str(polcals)))

        logger.info("Running all QA steps")
        try:
            start_time = time.time()
            logger.info('Running QA')
            run_triggered_qa(targets, fluxcals, polcals, osa=osa)
        except Exception as e:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.warning("Running QA failed ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
            logger.exception(e)
        else:
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.info("Running QA ... Done ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))

    if do_apergest:
        logger.info("Running Apergest manually")
        apergest_dir = "/home/schulz/apercal/apergest/{}".format(host_name)
        os.chdir(apergest_dir)
        try:
            apergest(task_id,
                     do_make_jsons=True,
                     do_prepare_ingest=True,
                     do_run_ingest=True,
                     do_delete_data=False)
        except Exception as e:
            os.chdir(cwd)
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.warning("Running Apergest failed ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))
            logger.warning(return_msg)
            logger.exception(e)
        else:
            os.chdir(cwd)
            lib.setup_logger('debug', logfile=logfile)
            logger = logging.getLogger(__name__)
            logger.info("Running Apergest ... Done ({0:.3f}h)".format(
                (time.time() - start_time) / 3600.))

    # just to make sure to get back
    os.chdir(cwd)
Ejemplo n.º 9
0
def main():
    start = timer()

    parser = argparse.ArgumentParser(description='Generate selfcal QA plots')

    # 1st argument: File name
    parser.add_argument("obs_id", help='ID of observation of target field')

    parser.add_argument(
        "src_name", help='Name of the calibrator or target of the plots')

    parser.add_argument("-c", "--calibrator", action="store_true", default=False,
                        help='Set if a calibrator is used. Also requires beam and cal_id')

    parser.add_argument("--beam", type=int, default=None,
                        help='If src_name is a calibrator set the beam number')

    parser.add_argument("--cal_id", type=str, default=None,
                        help='Obs ID of the calibrator')

    parser.add_argument('-p', '--path', default=None,
                        help='Destination for images')
    parser.add_argument('-b', '--basedir', default=None,
                        help='Directory of obs id')

    # this mode will make the script look only for the beams processed by Apercal on a given node
    # parser.add_argument("--trigger_mode", action="store_true", default=False,
    #                     help='Set it to run Autocal triggering mode automatically after Apercal.')

    args = parser.parse_args()

    # If no path is given change to default QA path
    if args.path is None:
        if args.basedir is not None:
            output_path = get_default_imagepath(
                args.obs_id, basedir=args.basedir)
        else:
            output_path = get_default_imagepath(args.obs_id)

        # check that selfcal qa directory exists
        qa_plot_dir = os.path.join(output_path, "inspection_plots")

        if not os.path.exists(qa_plot_dir):
            os.mkdir(qa_plot_dir)
    else:
        qa_plot_dir = args.path

    # create a directory with the src_name to put
    if args.src_name is not None:
        qa_plot_dir = os.path.join(qa_plot_dir, args.src_name)

        if not os.path.exists(qa_plot_dir):
            os.mkdir(qa_plot_dir)

    # if it is a calibrator then put the plots into a beam directory
    if args.calibrator:
        if args.beam is None:
            print("ERROR: Please specify beam of calibrator")
            return -1
        elif args.cal_id is None:
            print("ERROR: Please specify id of calibrator")
            return -1
        else:
            is_calibrator = True

            qa_plot_dir = os.path.join(
                qa_plot_dir, "{0:02d}".format(args.beam))

            if not os.path.exists(qa_plot_dir):
                os.mkdir(qa_plot_dir)
    else:
        is_calibrator = False

    # Create log file
    lib.setup_logger(
        'info', logfile=os.path.join(qa_plot_dir, 'get_inspection_plot.log'))
    logger = logging.getLogger(__name__)

    # Run function to get plots
    try:
        logger.info("#### Getting inspection plots ...")
        start_time_plots = time.time()
        get_inspection_plots(args.obs_id, qa_plot_dir,
                             is_calibrator=is_calibrator, cal_id=args.cal_id)
    except Exception as e:
        logger.error(e)
        logger.error("#### Getting inspection plots failed")
    else:
        logger.info("#### Getting inspection plots... Done ({0:.0f}s)".format(
            time.time()-start_time_plots))
Ejemplo n.º 10
0
                        default=False,
                        help='Set to run the script in parallel')

    parser.add_argument('-b', '--basedir', default=None, help='Data directory')

    parser.add_argument('--n_cores', default=5, help='Data directory')

    args = parser.parse_args()

    # get the QA directory
    qa_dir = get_default_imagepath(args.scan, basedir=args.basedir)

    # start logging
    # Create logging file

    lib.setup_logger('info', logfile=os.path.join(qa_dir, 'merge_plots.log'))
    logger = logging.getLogger(__name__)

    start_time = time()

    logger.info("#### Merging plots ...")

    try:
        run_merge_plots(qa_dir,
                        do_ccal=args.do_ccal,
                        do_scal=args.do_scal,
                        run_parallel=args.run_parallel,
                        n_cores=args.n_cores)
    except Exception as e:
        logger.warning(
            "#### Merging plots ... Failed ({0:.0f}s)".format(time() -
Ejemplo n.º 11
0
from merge_ccal_scal_plots import run_merge_plots
import numpy as np
import os
from apercal.libs import lib
import logging

lib.setup_logger('debug', logfile='test_merge_plots.log')
logger = logging.getLogger(__name__)

basedir = '/data/apertif/190602049_flag-strategy-test/qa'

do_ccal = True

do_scal = False

# file_list = np.array([os.path.join(basedir, img) for img in img_list])

# new_file_name = os.path.join(basedir, "merge_test.png")

run_merge_plots(basedir, do_ccal=do_ccal, do_scal=do_scal)
Ejemplo n.º 12
0
def main():
    start_time = time.time()

    # Create and parse argument list
    # ++++++++++++++++++++++++++++++
    parser = argparse.ArgumentParser(
        description='Create overview for QA')

    # 1st argument: Observation number
    parser.add_argument("obs_id", type=str,
                        help='Observation Number')

    parser.add_argument("--target", type=str, default='',
                        help='Name of the target')

    parser.add_argument("--fluxcal", type=str, default='',
                        help='Name of the flux calibrator')

    parser.add_argument("--polcal", type=str, default='',
                        help='Name of the polarisation calibrator')

    parser.add_argument("--osa", type=str, default='',
                        help='Name of the OSA')

    parser.add_argument("-p", "--path", type=str,
                        help='Path to QA output')

    parser.add_argument("-b", "--basedir", type=str,
                        help='Base directory where the obs id is')

    parser.add_argument("--tank", action="store_true", default=False,
                        help='Create the report on new volume')

    parser.add_argument("-a", "--add_osa_report", action="store_true", default=False,
                        help='Add only the osa report to the webpage')

    parser.add_argument("-c", "--combine", action="store_true", default=False,
                        help='(Depracated) Set to create a combined report from all happilis on happili-01. It will overwrite the report on happili-01')

    parser.add_argument("--no_merge", action="store_true", default=False,
                        help='Set to merge selfcal and crosscal plots')

    parser.add_argument("--do_not_read_timing", action="store_true", default=False,
                        help='Set to avoid reading timing information. Makes only sense if script is run multiple times or for debugging')

    parser.add_argument("--page_only", action="store_true", default=False,
                        help='Set only create the webpages themselves')

    # this mode will make the script look only for the beams processed by Apercal on a given node
    parser.add_argument("--trigger_mode", action="store_true", default=False,
                        help='Set it to run Autocal triggering mode automatically after Apercal.')

    parser.add_argument("--single_node", action="store_true", default=False,
                        help='Set it to run QA on a single node and get same result as if running like the OSA. Note, this is different from trigger mode.')

    args = parser.parse_args()

    obs_id = args.obs_id
    qa_dir = args.path
    base_dir = args.basedir
    do_combine = args.combine
    add_osa_report = args.add_osa_report

    # directory where the output will be of pybdsf will be stored
    if qa_dir is None:
        if base_dir is not None:
            qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
        else:
            qa_dir = get_default_imagepath(obs_id)

        # check that path exists
        if not os.path.exists(qa_dir):
            print(
                "Directory {0:s} does not exist and will be created".format(qa_dir))
            os.makedirs(qa_dir)

    # change the base directory from /data to /tank
    if args.tank and "/data" in qa_dir:
        print("Switching to /tank")
        qa_dir = qa_dir.replace("/data", "/tank")

    # check the mode to run the validation
    qa_report_dir = "{0:s}report".format(
        qa_dir)

    # check that this directory exists (just in case)
    if not os.path.exists(qa_report_dir):
        print("Directory {0:s} does not exist and will be created".format(
            qa_report_dir))
        os.makedirs(qa_report_dir)

    lib.setup_logger(
        'debug', logfile='{0:s}/create_report.log'.format(qa_report_dir))
    logger = logging.getLogger(__name__)

    # if osa report should be added, check it is available
    if add_osa_report:
        # name of the osa report for this observation
        osa_report = os.path.join(
            qa_report_dir, "OSA_Report/{}_OSA_report.ecsv".format(obs_id))

        # check that the file is actually there
        if not os.path.exists(osa_report):
            logger.error("No OSA report found. Abort")
            return -1
    else:
        osa_report = ''

    # Saving observation information if they do not exist yet
    # =======================================================

    table_name = "{0}_obs.ecsv".format(obs_id)

    table_name_with_path = os.path.join(qa_dir, table_name)

    if not os.path.exists(table_name_with_path):

        obs_info = Table([
            [obs_id],
            [args.target],
            [args.fluxcal],
            [''],
            [args.polcal],
            [''],
            [args.osa]], names=(
            'Obs_ID', 'Target', 'Flux_Calibrator', 'Flux_Calibrator_Obs_IDs', 'Pol_Calibrator', 'Pol_Calibrator_Obs_IDs', 'OSA'))

        try:
            obs_info.write(
                table_name_with_path, format='ascii.ecsv', overwrite=True)
        except Exception as e:
            logger.warning("Saving observation information in {0} failed.".format(
                table_name_with_path))
            logger.exception(e)
        else:
            logger.info(
                ("Saving observation information in {0} ... Done.".format(table_name_with_path)))
    else:
        logger.info(
            ("Observation information already exists. Reading {0}.".format(table_name_with_path)))
        obs_info = Table.read(table_name_with_path, format="ascii.ecsv")

    # check on which happili we are:
    host_name = socket.gethostname()

    if args.trigger_mode:
        logger.info(
            "--> Running report QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
    elif args.single_node:
        logger.info(
            "--> Running report QA in single-node mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
    elif do_combine:
        logger.info("Combining QAs from different happilis")
        if host_name != "happili-01":
            logger.warning("You are not working on happili-01.")
            logger.warning("Cannot combine QA from different happilis")
            do_combine = False
    elif host_name != "happili-01" and not args.trigger_mode:
        logger.warning("You are not working on happili-01.")
        logger.warning("The script will not process all beams")
        logger.warning("Please switch to happili-01")

    apercal_log_file = "/data/apertif/{0:s}/apercal.log".format(
        obs_id)

    # logging.basicConfig(filename='{0:s}/create_report.log'.format(qa_dir), level=logging.DEBUG,
    #                     format='%(asctime)s - %(levelname)s: %(message)s')

    # getting timing measurment for apercal only in trigger mode
    # if not add_osa_report and not args.do_not_read_timing:
    if args.trigger_mode or args.single_node:
        try:
            get_pipeline_run_time(obs_id, trigger_mode=args.trigger_mode)
        except Exception as e:
            logger.exception(e)

    # the subpages to be created
    subpages = ['observing_log', 'summary',  'beamweights', 'inspection_plots', 'preflag', 'crosscal',
                'selfcal', 'continuum', 'polarisation', 'line', 'mosaic', 'apercal_log']

    logger.info("#### Create report directory structure")

    # copy the js and css files
    js_file_name = "{0:s}/report_fct.js".format(
        hp.__file__.split("/html_report.py")[0])
    css_file_name = "{0:s}/report_style.css".format(
        hp.__file__.split("/html_report.py")[0])

    # for copying osa_files:
    osa_nb_file = "{0:s}/OSA_report.ipynb".format(
        hp.__file__.split("/html_report.py")[0])
    osa_py_file = "{0:s}/osa_functions.py".format(
        hp.__file__.split("/html_report.py")[0])

    osa_files = [osa_nb_file, osa_py_file]

    # Check that directory of the qa exists
    if not os.path.exists(qa_dir):
        logger.error(
            "Directory {0:s} does not exists. Abort".format(qa_report_dir))
        return -1
    else:
        # do things that should only happen on happili-01 when the OSA runs this function
        if not args.trigger_mode and not args.page_only:
            if host_name == "happili-01" or args.single_node:
                # go through some of the subpages and process numpy files
                for page in subpages:
                    # exclude non-apercal modules (and mosaic)
                    if page != "apercal_log" or page != "inspection_plots" or page != "summary" or page != "mosaic":
                        # just run it on preflag for now
                        if page == "preflag" or page == "crosscal" or page == "convert" or page == "selfcal" or page == "continuum":
                            # get information from numpy files
                            try:
                                logger.info(
                                    "## Getting summary table for {}".format(page))
                                make_nptabel_csv(
                                    obs_id, page, qa_dir, output_path=os.path.join(qa_dir, page))
                            except Exception as e:
                                logger.warning(
                                    "## Getting summary table for {} failed".format(page))
                                logger.exception(e)
                            else:
                                logger.info(
                                    "## Getting summary table for {} ... Done".format(page))

                            # merge plots
                            if not args.no_merge and not args.single_node:
                                try:
                                    logger.info(
                                        "## Merging selfcal and crosscal plots")
                                    run_merge_plots(
                                        qa_dir, do_ccal=True, do_scal=True, run_parallel=True, n_cores=5)
                                except Exception as e:
                                    logger.warning(
                                        "## Merging selfcal and crosscal plots ... Failed")
                                    logger.exception(e)
                                else:
                                    logger.info(
                                        "## Merging selfcal and crosscal plots ... Done")

                    # merge the continuum image properties
                    if page == 'continuum':
                        try:
                            merge_continuum_image_properties_table(
                                obs_id, qa_dir, single_node=args.single_node)
                        except Exception as e:
                            logger.warning(
                                "Merging continuum image properties ... Failed")
                            logger.exception(e)
                        else:
                            logger.info(
                                "Merging continuum image properties ... Done")

                    # get line statistics
                    if page == 'line':
                        try:
                            combine_cube_stats(
                                obs_id, qa_dir, single_node=args.single_node)
                        except Exception as e:
                            logger.warning(
                                "Getting cube statistics ... Failed")
                            logger.exception(e)
                        else:
                            logger.info(
                                "Getting cube statistics ... Done")

                # create dish delay plot
                try:
                    logger.info("Getting dish delay plot")
                    get_dish_delay_plots(
                        obs_id, obs_info['Flux_Calibrator'][0], basedir=args.basedir)
                except Exception as e:
                    logger.warning("Getting dish delay plot ... Failed")
                    logger.exception(e)
                else:
                    logger.info("Getting dish delay plot ... Done")

                # create compound beam plots
                try:
                    logger.info("Getting compound beam plots")
                    make_cb_plots_for_report(obs_id, qa_dir)
                except Exception as e:
                    logger.warning("Getting compound beam plots ... Failed")
                    logger.exception(e)
                else:
                    logger.info("Getting compound beam plots ... Done")

    # Create directory structure for the report
    if not add_osa_report:
        logger.info("#### Creating directory structrure")
        try:
            hpd.create_report_dirs(
                obs_id, qa_dir, subpages, css_file=css_file_name, js_file=js_file_name, trigger_mode=args.trigger_mode, single_node=args.single_node, do_combine=do_combine, obs_info=obs_info, osa_files=osa_files)
        except Exception as e:
            logger.error(e)
        else:
            logger.info("#### Creating directory structrure ... Done")

    logger.info("#### Creating report")

    try:
        hp.create_main_html(qa_report_dir, obs_id, subpages,
                            css_file=css_file_name, js_file=js_file_name, obs_info=obs_info, osa_report=osa_report)
    except Exception as e:
        logger.error(e)

    logger.info("#### Report. Done ({0:.0f}s)".format(
        time.time()-start_time))
from apercal.modules.mosaic_v2 import mosaic
import os
import socket
import time

start_time = time.time()

# set path to configfile
# note: remove settings below if configfile is used
# configfile = "/home/schulz/pipeline/apercal_tests/mosaic/mosaic_v2_190428055.cfg"
configfile = None

# Setting up log file
task_id = 190428055
logfile = os.path.join(os.getcwd(), "{0}_mosaic.log".format(task_id))
lib.setup_logger('debug', logfile=logfile)
logger = logging.getLogger(__name__)

# Mosaic module
mo = mosaic(file_=configfile)

# List of settings (Change as necessary)
# ======================================

# set the output directory
mo.basedir = '/data/schulz/mosaic_test/190428055/'

# enable continuum mosaic
mo.mosaic_continuum_mf = True

# set the taskid of observation to mosaic
Ejemplo n.º 14
0
    # get taskid/obs_id/scan
    obs_id = args.obs_id

    # get the QA directory for this observation
    qa_dir = get_default_imagepath(obs_id, basedir=args.basedir)

    # get the line QA directory for this observation
    qa_line_dir = "{0:s}line".format(qa_dir)

    if not os.path.exists(qa_line_dir):
        print("Creating directory {0:s}".format(qa_line_dir))
        os.mkdir(qa_line_dir)

    # Create logging file
    lib.setup_logger('debug',
                     logfile='{0:s}/get_cube_stats.log'.format(qa_line_dir))
    logger = logging.getLogger(__name__)

    # check host name
    host_name = socket.gethostname()

    # get data directories depending on the host name
    if args.trigger_mode:
        logger.info(
            "--> Running line QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--"
            .format(host_name))
        data_base_dir_list = ['/data/apertif/{0:s}'.format(obs_id)]
    elif host_name != "happili-01" and not args.trigger_mode:
        logger.warning("You are not working on happili-01.")
        logger.warning("The script will not process all beams")
        logger.warning("Please switch to happili-01")
Ejemplo n.º 15
0
    args = parser.parse_args()

    # If no path is given change to default QA path
    if args.path is None:
        output_path = get_default_imagepath(args.taskID, basedir=args.basedir)

        # check that preflag qa directory exists
        output_path = "{0:s}preflag/".format(output_path)

        if not os.path.exists(output_path):
            os.mkdir(output_path)
    else:
        output_path = args.path

    # Create logging file
    lib.setup_logger('debug',
                     logfile='{0:s}run_rfinder.log'.format(output_path))
    logger = logging.getLogger(__name__)

    # get data directories depending on the host name
    host_name = socket.gethostname()
    if args.trigger_mode:
        logger.info(
            "--> Running line QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--"
            .format(host_name))
        data_beam_dir_list = glob.glob("/data/apertif/{}/[0-3][0-9]".format(
            args.taskID))
    elif host_name != "happili-01" and not args.trigger_mode:
        logger.warning("You are not working on happili-01.")
        logger.warning("The script will not process all beams")
        logger.warning("Please switch to happili-01")
        data_beam_dir_list = glob.glob("/data/apertif/{}/[0-3][0-9]".format(
Ejemplo n.º 16
0
def make_mosaic(task_id,
                basedir,
                centre_ra=None,
                centre_dec=None,
                mosaic_beams=None,
                primary_beam_map_dir=None,
                step_limit=None,
                use_noise_correlation=False,
                do_validation=False,
                continuum_image_dir=None,
                do_not_cleanup=False):

    start_time = time.time()

    # configfile
    # configfile = "/home/schulz/pipeline/apercal_tests/mosaic/mosaic_v2_190428055.cfg"
    configfile = None

    # Setting up log file
    # task_id = 190822046
    logfile = os.path.join(os.getcwd(), "{0}_mosaic.log".format(task_id))
    lib.setup_logger('debug', logfile=logfile)
    logger = logging.getLogger(__name__)

    host_name = socket.gethostname()

    # Mosaic module
    mo = mosaic(file_=configfile)

    # Settings
    # ========

    # set the number of steps for the mosaic to go through
    mo.mosaic_step_limit = step_limit

    # set the output directory
    mo.basedir = os.path.join(basedir, '{}'.format(task_id))

    # enable continuum mosaic
    mo.mosaic_continuum_mf = True

    #disable line and polarization mosaics
    mo.mosaic_line = False
    mo.mosaic_polarisation = False

    # set the taskid of observation to mosaic
    mo.mosaic_taskid = "{}".format(task_id)

    # set the list of beams or all
    if mosaic_beams is None:
        mo.mosaic_beams = 'all'
    else:
        mo.mosaic_beams = mosaic_beams

    # set location of continuum images, default is ALTA
    # continuum images must be located in <path>/<beam_nr>/<image_name>.fits
    if continuum_image_dir is None:
        mo.mosaic_continuum_image_origin = "ALTA"
    else:
        mo.mosaic_continuum_image_origin = continuum_image_dir
    # mo.mosaic_continuum_image_origin = "/data/pisano/190428055/continuum/raw/"

    # set the type of primary beam to be used
    mo.mosaic_primary_beam_type = 'Correct'
    # if the correct primary beam is suppose to be used
    # set the path to files (do not change)
    if primary_beam_map_dir is None:
        mo.mosaic_primary_beam_shape_files_location = "/tank/apertif/driftscans/fits_files/191023/chann_5"
    else:
        mo.mosaic_primary_beam_shape_files_location = primary_beam_map_dir
    # set the cutoff
    mo.mosaic_beam_map_cutoff = 0.1

    # set the projection centre
    if centre_ra is None and centre_dec is None:
        # using the a given beam
        mo.mosaic_continuum_projection_centre_beam = '00'
    else:
        # using ra and dec (untested)
        mo.mosaic_continuum_projection_centre_ra = centre_ra
        mo.mosaic_continuum_projection_centre_dec = centre_dec

    # type of beam for convolution
    mo.mosaic_common_beam_type = 'circular'

    # turn on noise correlation
    if use_noise_correlation:
        mo.mosaic_use_askap_based_matrix = True
    else:
        mo.mosaic_use_askap_based_matrix = False

    # run the image validation tool on the mosaic
    if do_validation:
        mo.mosaic_continuum_image_validation = True
    else:
        mo.mosaic_continuum_image_validation = False

    # clean up
    mo.mosaic_continuum_clean_up_level = 1
    if do_not_cleanup:
        mo.mosaic_continuum_clean_up = False
    else:
        mo.mosaic_continuum_clean_up = True

    # create the mosaic
    # =================
    mo.go()

    logger.info("Finished after {0:.0f}s".format(time.time() - start_time))
Ejemplo n.º 17
0
                    help='Set it to run Autocal triggering mode automatically after Apercal.')

args = parser.parse_args()

# If no path is given change to default QA path
if args.path is None:
    output_path = get_default_imagepath(args.scan, basedir=args.basedir)

    # check that crosscal qa directory exists
    output_path = "{0:s}crosscal/".format(output_path)

    if not os.path.exists(output_path):
        os.mkdir(output_path)
else:
    output_path = args.path

# Create logging file
lib.setup_logger(
    'debug', logfile='{0:s}run_ccal_plots.log'.format(output_path))
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG)

# Create crosscal plots
crosscal_plots.make_all_ccal_plots(
    args.scan, args.fluxcal, args.polcal, output_path=output_path, basedir=args.basedir, trigger_mode=args.trigger_mode)

end = timer()
logger.info('Elapsed time to generate cross-calibration data QA inpection plots is {} minutes'.format(
    (end - start)/60.))
#time in minutes
Ejemplo n.º 18
0
def start_apercal_pipeline(targets,
                           fluxcals,
                           polcals,
                           dry_run=False,
                           basedir=None,
                           flip_ra=False,
                           steps=None,
                           configfilename=None):
    """
    Trigger the start of a fluxcal pipeline. Returns when pipeline is done.
    Example for taskid, name, beamnr: (190108926, '3C147_36', 36)
    Fluxcals and polcals can be specified in the wrong order, if the polcal is not polarised
    they will be flipped.
    If both polcals and fluxcals are set, they should both be the same length.
    A list of config files can be provided, i.e., one for each beam. If a single config file 
    is given, copies of it will be created so that there is one config per beam. If no
    config file is given, the default one is used and copies for each beam are made.

    Args:
        targets (Tuple[int, str, List[int]]): taskid, name, list of beamnrs
        fluxcals (List[Tuple[int, str, int]]): fluxcals: taskid, name, beamnr
        polcals (List[Tuple[int, str, int]]): polcals: taskid, name, beamnr (can be None)
        dry_run (bool): interpret arguments, do not actually run pipeline
        basedir (str): base directory; if not specified will be /data/apertif/{target_taskid}
        flip_ra (bool): flip RA (for old measurement sets where beamweights were flipped)
        steps (List[str]): list of steps to perform
        configfilename (List[str]): Custom configfile (should be full path for now)

    Returns:
        Tuple[Dict[int, List[str]], str], str: Tuple of a dict, the formatted runtime, and possibly
                                          an exception. The dict
                                          contains beam numbers (ints) as keys, a list of failed
                                          steps as values. Failed is defined here as 'threw an
                                          exception', only for target steps. Please also read logs.
    """
    if steps is None:
        steps = [
            "prepare", "split", "preflag", "ccal", "convert", "scal",
            "continuum", "polarisation", "line", "transfer"
        ]

    (taskid_target, name_target, beamlist_target) = targets

    # set the base directory if none was provided
    if not basedir:
        basedir = '/data/apertif/{}/'.format(taskid_target)
    elif len(basedir) > 0 and basedir[-1] != '/':
        basedir = basedir + '/'
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    logfilepath = os.path.join(basedir, 'apercal.log')

    lib.setup_logger('debug', logfile=logfilepath)
    logger = logging.getLogger(__name__)
    gitinfo = subprocess.check_output('cd ' +
                                      os.path.dirname(apercal.__file__) +
                                      '&& git describe --tag; cd',
                                      shell=True).strip()
    logger.info("Apercal version: " + gitinfo)

    logger.info(
        "start_apercal called with arguments targets={}; fluxcals={}; polcals={}"
        .format(targets, fluxcals, polcals))
    logger.info("steps = {}".format(steps))

    # number of beams to process
    n_beams = len(beamlist_target)

    # check the input config file
    # get the default configfile if none was provided
    if not configfilename:
        logger.info("No config file provided, getting default config")
        # create a list of config file name
        configfilename_list = [
            os.path.join(
                basedir,
                "{0}_B{1}_Apercal_settings.cfg".format(taskid_target,
                                                       str(beam).zfill(2)))
            for beam in beamlist_target
        ]
        # get the default config settings
        config = lib.get_default_config()
        # go through the config files and create them
        for beam_index in range(n_beams):
            with open(configfilename_list[beam_index], "w") as fp:
                config.write(fp)
            logger.info("Beam {} config file saved to {}".format(
                beamlist_target[beam_index], configfilename_list[beam_index]))
    # if configfile(s) are given as a list
    elif type(configfilename) is list:
        # if it is just one, create copies for each beam in the base directory
        if len(configfilename) == 1:
            logger.info(
                "A single config file was provided. Creating copies of {}".
                format(configfilename[0]))
            configfilename_list = [
                os.path.join(
                    basedir, "{0}_B{1}_Apercal_settings.cfg".format(
                        taskid_target,
                        str(beam).zfill(2))) for beam in beamlist_target
            ]
            # make the copies
            for config in configfilename_list:
                lib.basher("cp " + str(configfilename[0]) + " " + str(config))
        elif len(configfilename) == n_beams:
            logger.info("Number of config files and target beams match.")
            configfilename_list = configfilename
        else:
            error = "Number of config files and target beams did not match. Abort"
            logger.error(error)
            raise RuntimeError(error)
    # if configfilename is just a string
    elif type(configfilename) is str:
        logger.info(
            "A single config file was provided. Creating copies of {}".format(
                configfilename))
        configfilename_list = [
            os.path.join(
                basedir,
                "{0}_B{1}_Apercal_settings.cfg".format(taskid_target,
                                                       str(beam).zfill(2)))
            for beam in beamlist_target
        ]
        # make the copies
        for config in configfilename_list:
            lib.basher("cp " + str(configfilename) + " " + str(config))
    else:
        error = "Unknown input for configfilename. Abort"
        logger.error(error)
        raise RuntimeError(error)

    status = pymp.shared.dict({beamnr: [] for beamnr in beamlist_target})

    if fluxcals:
        name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()
    else:
        name_fluxcal = ''
    if polcals:
        name_polcal = str(polcals[0][1]).strip().split('_')[0].upper()
    else:
        name_polcal = ''
    name_target = str(name_target).strip()  # .upper()

    # If both fluxcal and polcal polarized, remove polcal
    if subs_calmodels.is_polarised(
            name_polcal) and subs_calmodels.is_polarised(name_fluxcal):
        name_polcal = ""

    if (fluxcals and fluxcals != '') and (polcals and polcals != ''):
        assert (len(fluxcals) == len(polcals))

    # avoid symmetry bias, if there is only a polcal but no fluxcal, switch them
    if fluxcals is None and polcals is not None:
        logger.info(
            "Only polcal was provided. Setting polcal {} to fluxcal".format(
                name_polcal))
        fluxcals, polcals = polcals, fluxcals
        name_polcal = ""
    # Exchange polcal and fluxcal if specified in the wrong order
    elif not subs_calmodels.is_polarised(name_polcal) and name_polcal != '':
        if subs_calmodels.is_polarised(name_fluxcal):
            logger.info("Switching polcal and fluxcal because " + name_polcal +
                        " is not polarised")
            fluxcals, polcals = polcals, fluxcals
            name_polcal = str(polcals[0][1]).strip()
        else:
            logger.info("Setting polcal to '' since " + name_polcal +
                        " is not polarised")
            name_polcal = ""
    elif name_polcal != '':
        logger.info("Polcal " + name_polcal + " is polarised, all good")

    def name_to_ms(name):
        if not name:
            return ''
        elif '3C' in name:
            return name.upper().strip().split('_')[0] + '.MS'
        else:
            return name + '.MS'

    def name_to_mir(name):
        if not name:
            return ''
        elif '3C' in name:
            return name.upper().strip().split('_')[0] + '.mir'
        else:
            return name + '.mir'

    def set_files(p):
        """
        Set the basedir, fluxcal, polcal, target properties

        Args:
            p (BaseModule): apercal step object (e.g. prepare)

        Returns:
            None
        """

        p.basedir = basedir
        p.fluxcal = name_to_ms(name_fluxcal)
        p.polcal = name_to_ms(name_polcal)
        p.target = name_to_ms(name_target)

        # debug_msg = """
        # p.basedir = basedir = {0};
        # p.fluxcal = name_to_ms(name_fluxcal) = {1};
        # p.polcal = name_to_ms(name_polcal) = {2};
        # p.target = name_to_ms(name_target) = {3};
        # """.format(basedir, name_to_ms(name_fluxcal), name_to_ms(name_polcal), name_to_ms(name_target))
        # logger.debug(debug_msg)

    beamnrs_fluxcal = [f[2] for f in fluxcals]
    if len(fluxcals) > 1:
        # Check every target beam has a fluxcal beam
        for beamnr_target in beamlist_target:
            assert (beamnr_target in beamnrs_fluxcal)

    # creating a copy of the target beamlist as a normal array
    # to avoid using np.where() for such a small thing
    if type(beamlist_target) == np.ndarray:
        beamlist_target_for_config = beamlist_target.tolist()
    else:
        beamlist_target_for_config = beamlist_target

    time_start = time()
    try:
        # =======
        # Prepare
        # =======

        # keep a start-finish record of step in the main log file
        if "prepare" in steps:
            logger.info("Running prepare")
            start_time_prepare = time()
        else:
            logger.info("Skipping prepare")

        # Prepare fluxcals
        for (taskid_fluxcal, name_fluxcal, beamnr_fluxcal) in fluxcals:
            p0 = prepare(file_=configfilename_list[
                beamlist_target_for_config.index(beamnr_fluxcal)])
            p0.basedir = basedir
            #set_files(p0)
            p0.prepare_flip_ra = flip_ra
            # the following two need to be empty strings for prepare
            p0.fluxcal = ''
            p0.polcal = ''
            p0.target = name_to_ms(name_fluxcal)
            p0.prepare_target_beams = str(beamnr_fluxcal)
            p0.prepare_date = str(taskid_fluxcal)[:6]
            p0.prepare_obsnum_target = validate_taskid(taskid_fluxcal)
            if "prepare" in steps and not dry_run:
                try:
                    p0.go()
                except Exception as e:
                    logger.warning("Prepare failed for fluxcal " +
                                   str(taskid_fluxcal) + " beam " +
                                   str(beamnr_fluxcal))
                    logger.exception(e)

        if 'prepare' in steps:
            # copy the param file generated here
            param_file = os.path.join(basedir, 'param.npy')
            director(p0,
                     'rn',
                     param_file.replace(
                         ".npy",
                         "_prepare_{}.npy".format(name_fluxcal.split('_')[0])),
                     file_=param_file,
                     ignore_nonexistent=True)

        # Prepare polcals
        if name_polcal != '':
            for (taskid_polcal, name_polcal, beamnr_polcal) in polcals:
                p0 = prepare(file_=configfilename_list[
                    beamlist_target_for_config.index(beamnr_polcal)])
                p0.basedir = basedir
                #set_files(p0)
                p0.prepare_flip_ra = flip_ra
                # the following two need to be empty strings for prepare
                p0.fluxcal = ''
                p0.polcal = ''
                p0.target = name_to_ms(name_polcal)
                p0.prepare_target_beams = str(beamnr_polcal)
                p0.prepare_date = str(taskid_polcal)[:6]
                p0.prepare_obsnum_target = validate_taskid(taskid_polcal)
                if "prepare" in steps and not dry_run:
                    try:
                        p0.go()
                    except Exception as e:
                        logger.warning("Prepare failed for polcal " +
                                       str(taskid_polcal) + " beam " +
                                       str(beamnr_polcal))
                        logger.exception(e)

            if 'prepare' in steps:
                # copy the param file generated here
                param_file = os.path.join(basedir, 'param.npy')
                director(p0,
                         'rn',
                         param_file.replace(
                             ".npy", "_prepare_{}.npy".format(
                                 name_polcal.split('_')[0])),
                         file_=param_file,
                         ignore_nonexistent=True)

        # Prepare target
        for beamnr in beamlist_target:
            p0 = prepare(file_=configfilename_list[
                beamlist_target_for_config.index(beamnr)])
            p0.basedir = basedir
            # set_files(p0)
            p0.prepare_flip_ra = flip_ra
            # the following two need to be empty strings for prepare
            p0.fluxcal = ''
            p0.polcal = ''
            p0.target = name_to_ms(name_target)
            p0.prepare_date = str(taskid_target)[:6]
            p0.prepare_obsnum_target = validate_taskid(taskid_target)
            p0.prepare_target_beams = ','.join(
                ['{:02d}'.format(beamnr) for beamnr in beamlist_target])
            if "prepare" in steps and not dry_run:
                try:
                    p0.go()
                except Exception as e:
                    logger.warning("Prepare failed for target " +
                                   str(taskid_target) + " beam " + str(beamnr))
                    logger.exception(e)
                    status[beamnr] += ['prepare']

        # keep a start-finish record of step in the main log file
        if "prepare" in steps:
            logger.info("Running prepare ... Done ({0:.0f}s)".format(
                time() - start_time_prepare))

            # copy the param file generated here
            param_file = os.path.join(basedir, 'param.npy')
            director(p0,
                     'rn',
                     param_file.replace(".npy",
                                        "_prepare_{}.npy".format(name_target)),
                     file_=param_file,
                     ignore_nonexistent=True)

        # =====
        # Split
        # =====

        # keep a start-finish record of step in the main log file
        if 'split' in steps:
            logger.info("Running split")
            start_time_split = time()
        else:
            logger.info("Skipping split")

        # Splitting a small chunk of data for quicklook pipeline
        # at the moment it all relies on the target beams
        # what if there are more calibrator than target beams-> realistic?
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))
                try:
                    s0 = split(file_=configfilename_list[beam_index])
                    set_files(s0)
                    s0.beam = "{:02d}".format(beamnr)
                    if "split" in steps and not dry_run:
                        s0.go()
                except Exception as e:
                    logger.warning("Split failed for {0} beam {1}".format(
                        str(taskid_target), str(beamnr)))
                    logger.exception(e)
                    # not sure if following line is necessary
                    status[beamnr] += ['split']

        # keep a start-finish record of step in the main log file
        if "split" in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)
            logger.info(
                "Running split ... Done ({0:.0f}s)".format(time() -
                                                           start_time_split))

            # copy the param file generated here
            # param_file = os.path.join(basedir, 'param.npy')
            # director(
            #     p0, 'rn', param_file.replace(".npy", "_split.npy"), file_=param_file, ignore_nonexistent=True)

        # =======
        # Preflag
        # =======

        # keep a record of the parallalised step in the main log file
        if "preflag" in steps:
            logger.info("Running preflag")
            start_time_preflag = time()
        else:
            logger.info("Skipping preflag")

        # In order to run in parallel, the bandpass table needs to exists
        # doing it here is not elegant but requires the least amount of changes
        # to preflage
        # with pymp.Parallel(10) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]
        #         # individual logfiles for each process
        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         logger.debug("Starting logfile for beam " + str(beamnr))
        #         p1 = preflag(filename=configfilename)
        #         p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
        #         p1.basedir = basedir
        #         p1.fluxcal = ''
        #         p1.polcal = ''
        #         p1.target = name_to_ms(name_fluxcal)

        #         p1.beam = "{:02d}".format(beamnr)
        #         p1.preflag_targetbeams = "{:02d}".format(beamnr)
        #         if "preflag" in steps and not dry_run:
        #             try:
        #                 bandpass_start_time = time()
        #                 logger.info("Running aoflagger bandpass for flux calibrator {0} in beam {1}".format(
        #                     p1.target, p1.beam))
        #                 # director(
        #                 #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
        #                 p1.go()
        #                 # director(p1, 'rm', basedir + '/param.npy',
        #                 #         ignore_nonexistent=True)

        #                 # it is necessary to move the param files in order to keep them
        #                 param_file = basedir + \
        #                     '/param_{:02d}.npy'.format(beamnr)
        #                 director(
        #                     p1, 'mv', param_file, file_=param_file.replace(".npy", "_preflag_{0}.npy".format(name_fluxcal)), ignore_nonexistent=True)

        #                 p1.aoflagger_bandpass()
        #             except Exception as e:
        #                 logger.warning("Running aoflagger bandpass for flux calibrator {0} in beam {1} ... Failed ({2:.0f}s)".format(
        #                     p1.target, p1.beam, time() - bandpass_start_time))
        #                 logger.exception(e)
        #                 status[beamnr] += ['preflag_bandpass']
        #             else:
        #                 logger.info("Running aoflagger bandpass for flux calibrator {0} in beam {1} ... Done ({2:.0f}s)".format(
        #                     p1.target, p1.beam, time() - bandpass_start_time))

        # Flag fluxcal (pretending it's a target, parallelised version)
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    p1.fluxcal = ''
                    p1.polcal = ''
                    p1.target = name_to_ms(name_fluxcal)
                    p1.beam = "{:02d}".format(beamnr)
                    p1.preflag_targetbeams = "{:02d}".format(beamnr)
                    if beam_index < 2:
                        p1.preflag_aoflagger_threads = 9
                    else:
                        p1.preflag_aoflagger_threads = 10
                    if "preflag" in steps and not dry_run:
                        logger.info(
                            "Running preflag for flux calibrator {0} in beam {1}"
                            .format(p1.target, p1.beam))
                        preflag_flux_cal_start_time = time()
                        # director(
                        #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p1.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p1,
                                 'rn',
                                 param_file.replace(
                                     ".npy", "_preflag_{0}.npy".format(
                                         name_fluxcal.split('_')[0])),
                                 file_=param_file,
                                 ignore_nonexistent=True)

                        logger.info(
                            "Running preflag for flux calibrator {0} in beam {1} ... Done ({2:.0f}s)"
                            .format(p1.target, p1.beam,
                                    time() - preflag_flux_cal_start_time))
                except Exception as e:
                    logger.warning(
                        "Running preflag for flux calibrator {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_flux_cal_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # Flag polcal (pretending it's a target, parallel version)
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    # remove next line in final version
                    p1.preflag_aoflagger_version = 'local'
                    p1.basedir = basedir
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    if name_polcal != '':
                        p1.fluxcal = ''
                        p1.polcal = ''
                        p1.target = name_to_ms(name_polcal)
                        p1.beam = "{:02d}".format(beamnr)
                        p1.preflag_targetbeams = "{:02d}".format(beamnr)
                        if beam_index < 2:
                            p1.preflag_aoflagger_threads = 9
                        else:
                            p1.preflag_aoflagger_threads = 10
                        if "preflag" in steps and not dry_run:
                            logger.info(
                                "Running preflag for pol calibrator {0} in beam {1}"
                                .format(p1.target, p1.beam))
                            preflag_pol_cal_start_time = time()
                            # director(
                            #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                            p1.go()

                            # it is necessary to move the param files in order to keep them
                            param_file = os.path.join(
                                basedir, 'param_{:02d}.npy'.format(beamnr))
                            director(p1,
                                     'rn',
                                     param_file.replace(
                                         ".npy", "_preflag_{0}.npy".format(
                                             name_polcal.split('_')[0])),
                                     file_=param_file,
                                     ignore_nonexistent=True)

                            logger.info(
                                "Running preflag for pol calibrator {0} in beam {1} ... Done ({2:.0f}s)"
                                .format(p1.target, p1.beam,
                                        time() - preflag_pol_cal_start_time))
                except Exception as e:
                    logger.warning(
                        "Running preflag for pol calibrator {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_pol_cal_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # Flag target
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    # remove next line in final version
                    p1.preflag_aoflagger_version = 'local'
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    p1.fluxcal = ''
                    p1.polcal = ''
                    p1.target = name_to_ms(name_target)
                    p1.beam = "{:02d}".format(beamnr)
                    p1.preflag_targetbeams = "{:02d}".format(beamnr)
                    if beam_index < 2:
                        p1.preflag_aoflagger_threads = 9
                    else:
                        p1.preflag_aoflagger_threads = 10
                    if "preflag" in steps and not dry_run:
                        logger.info(
                            "Running preflag for target {0} in beam {1}".
                            format(p1.target, p1.beam))
                        preflag_target_start_time = time()
                        # director(
                        #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p1.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p1,
                                 'rn',
                                 param_file.replace(
                                     ".npy",
                                     "_preflag_{0}.npy".format(name_target)),
                                 file_=param_file,
                                 ignore_nonexistent=True)

                        logger.info(
                            "Running preflag for target {0} in beam {1} ... Done ({2:.0f}s)"
                            .format(p1.target, p1.beam,
                                    time() - preflag_target_start_time))
                except Exception as e:
                    logger.info(
                        "Running preflag for target {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_target_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # keep a record of the parallalised step in the main log file
        if "preflag" in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running preflag ... Done ({0:.0f}s)".format(
                time() - start_time_preflag))

        # ===============
        # Crosscal
        # ===============

        # keep a record of the parallalised step in the main log file
        if 'ccal' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running crosscal")
            start_time_crosscal = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping crosscal")

        if len(fluxcals) == 1 and fluxcals[0][-1] == 0 and n_beams > 1:
            raise ApercalException(
                "Sorry, one fluxcal is not supported anymore at the moment")

        with pymp.Parallel(10) as p:
            for beam_index in p.range(n_beams):

                beamnr = beamlist_target[beam_index]
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))
                try:
                    p2 = ccal(file_=configfilename_list[beam_index])
                    p2.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    set_files(p2)
                    p2.beam = "{:02d}".format(beamnr)
                    p2.crosscal_transfer_to_target_targetbeams = "{:02d}".format(
                        beamnr)
                    if "ccal" in steps and not dry_run:
                        # director(
                        #     p2, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p2.go()
                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p2,
                                 'rn',
                                 param_file.replace(".npy", "_crosscal.npy"),
                                 file_=param_file,
                                 ignore_nonexistent=True)
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from crosscal".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['crosscal']

        # keep a record of the parallalised step in the main log file
        if 'ccal' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running crosscal ... Done ({0:.0f}s)".format(
                time() - start_time_crosscal))

        # =======
        # Convert
        # =======

        # keep a record of the parallalised step in the main log file
        if 'convert' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running convert")
            start_time_convert = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping convert")

        # 5 threads to not hammer the disks too much, convert is only IO
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p3 = convert(file_=configfilename_list[beam_index])
                    p3.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    set_files(p3)
                    p3.beam = "{:02d}".format(beamnr)
                    p3.convert_targetbeams = "{:02d}".format(beamnr)
                    if "convert" in steps and not dry_run:
                        # director(
                        #     p3, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p3.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p3,
                                 'rn',
                                 param_file.replace(".npy", "_convert.npy"),
                                 file_=param_file,
                                 ignore_nonexistent=True)
                        # director(
                        #     p3, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                except Exception as e:
                    logger.warning(
                        "Failed beam {}, skipping that from convert".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['convert']

        if 'convert' in steps:
            # keep a record of the parallalised step in the main log file
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running convert ... Done ({0:.0f}s)".format(
                time() - start_time_convert))

        # ==================================
        # Selfcal + Continuum + Polarisation
        # ==================================

        # keep a record of the parallalised step in the main log file
        if 'scal' in steps or 'continuum' in steps or 'polarisation' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running selfcal and/or continuum and/or polarisation")
            start_time_selfcal_continuum_polarisation = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping selfcal and continuum and polarisation")

        with pymp.Parallel(10) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p4 = scal(file_=configfilename_list[beam_index])
                    p4.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p4.basedir = basedir
                    p4.beam = "{:02d}".format(beamnr)
                    p4.target = name_target + '.mir'
                    if "scal" in steps and not dry_run:
                        p4.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from scal".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['scal']

                try:
                    p5 = continuum(file_=configfilename_list[beam_index])
                    p5.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p5.basedir = basedir
                    p5.beam = "{:02d}".format(beamnr)
                    p5.target = name_target + '.mir'
                    if "continuum" in steps and not dry_run:
                        p5.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from continuum".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['continuum']

                try:
                    p6 = polarisation(file_=configfilename_list[beam_index])
                    p6.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p6.basedir = basedir
                    p6.beam = "{:02d}".format(beamnr)
                    p6.polcal = name_to_mir(name_polcal)
                    p6.target = name_to_mir(name_target)
                    if "polarisation" in steps and not dry_run:
                        p6.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from polarisation".
                        format(beamnr))
                    logger.exception(e)
                    status[beamnr] += ['polarisation']

        # keep a record of the parallalised step in the main log file
        if 'scal' in steps or 'continuum' in steps or 'polarisation' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info(
                "Running selfcal and/or continuum and/or polarisation ... Done ({0:.0f}s)"
                .format(time() - start_time_selfcal_continuum_polarisation))

        # ====
        # Line
        # ====

        # keep a record of the parallalised step in the main log file
        if 'line' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Running line")
            start_time_line = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Skipping line")

        for beamnr in beamlist_target:

            # Because of the amount of information coming from line
            # this module gets its own logfile
            logfilepath = os.path.join(basedir,
                                       'apercal{:02d}_line.log'.format(beamnr))
            lib.setup_logger('debug', logfile=logfilepath)
            try:
                p7 = line(file_=configfilename_list[
                    beamlist_target_for_config.index(beamnr)])
                if beamnr not in p7.line_beams:
                    logger.debug(
                        "Skipping line imaging for beam {}".format(beamnr))
                    continue
                p7.basedir = basedir
                p7.beam = "{:02d}".format(beamnr)
                p7.target = name_target + '.mir'
                if "line" in steps and not dry_run:
                    p7.go()
            except Exception as e:
                # Exception was already logged just before
                logger.warning(
                    "Failed beam {}, skipping that from line".format(beamnr))
                logger.exception(e)
                status[beamnr] += ['line']

        # with pymp.Parallel(5) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]

        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         try:
        #             p7 = line(file_=configfilename)
        #             if beamnr not in p7.line_beams:
        #                 logger.debug(
        #                     "Skipping line imaging for beam {}".format(beamnr))
        #                 continue
        #             p7.basedir = basedir
        #             p7.beam = "{:02d}".format(beamnr)
        #             p7.target = name_target + '.mir'
        #             if "line" in steps and not dry_run:
        #                 p7.go()
        #         except Exception as e:
        #             # Exception was already logged just before
        #             logger.warning(
        #                 "Failed beam {}, skipping that from line".format(beamnr))
        #             logger.exception(e)
        #             status[beamnr] += ['line']

        # keep a record of the parallalised step in the main log file
        if 'line' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info(
                "Running line ... Done ({0:.0f}s)".format(time() -
                                                          start_time_line))

        # ========
        # Transfer
        # ========

        # keep a record of the parallalised step in the main log file
        if 'transfer' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Running transfer")
            start_time_transfer = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Skipping transfer")

        # 5 threads to not hammer the disks too much during copying
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p8 = transfer(file_=configfilename_list[beam_index])
                    p8.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p8.basedir = basedir
                    p8.target = name_target + '.mir'
                    p8.beam = "{:02d}".format(beamnr)
                    if "transfer" in steps and not dry_run:
                        # director(
                        #     p8, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p8.go()
                except Exception as e:
                    logger.warning(
                        "Failed beam {}, skipping that from transfer".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['transfer']

        # keep a record of the parallalised step in the main log file
        if 'transfer' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running transfer ... Done ({0:.0f}s)".format(
                time() - start_time_transfer))

        # Polarisation
        # ============
        # keep a record of the parallalised step in the main log file
        # if 'polarisation' in steps:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Running polarisation")
        #     start_time_polarisation = time()
        # else:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Skipping polarisation")

        # with pymp.Parallel(5) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]

        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         try:
        #             p7 = polarisation(file_=configfilename)
        #             p7.paramfilename = 'param_{:02d}.npy'.format(beamnr)
        #             p7.basedir = basedir
        #             p7.beam = "{:02d}".format(beamnr)
        #             p7.target = name_to_mir(name_target)
        #             if "polarisation" in steps and not dry_run:
        #                 p7.go()
        #         except Exception as e:
        #             # Exception was already logged just before
        #             logger.warning(
        #                 "Failed beam {}, skipping that from polarisation".format(beamnr))
        #             logger.exception(e)
        #             status[beamnr] += ['polarisation']

        # # keep a record of the parallalised step in the main log file
        # if 'polarisation' in steps:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Running polarisation ... Done ({0:.0f}s)".format(
        #         time() - start_time_polarisation))

        # if "ccalqa" in steps and not dry_run:
        #     logger.info("Starting crosscal QA plots")
        #     try:
        #         make_all_ccal_plots(
        #             taskid_target, name_fluxcal.upper().strip().split('_')[0])
        #     except Exception as e:
        #         logger.warning("Failed crosscal QA plots")
        #         logger.exception(e)
        #     logger.info("Done with crosscal QA plots")

        status = status.copy()  # Convert pymp shared dict to a normal one
        msg = "Apercal finished after " + \
            str(timedelta(seconds=time() - time_start))
        logger.info(msg)
        return status, str(timedelta(seconds=time() - time_start)), None
    except Exception as e:
        msg = "Apercal threw an error after " + \
            str(timedelta(seconds=time() - time_start))
        logger.exception(msg)
        return status, str(timedelta(seconds=time() - time_start)), str(e)
Ejemplo n.º 19
0
def apercc(cal_list=None,
           task_id=None,
           cal_name=None,
           base_dir=None,
           search_all_nodes=False,
           steps=None):
    """
    Main function to run the cross-calibration stability evaluation.

    For a list of calibrator scans or a given task id and calibrator name, 
    this functions runs the cross-calibration evaluation.
    The function can get the data from ALTA and flags them using the Apercal modules
    prepare and preflag. It compares the bandpass solutions and gain factors between beams
    and between observations of the same calibrators
    The different steps can be selected individually.

    Example:
        scanid, source name, beam: [190108926, '3C147_36', 36]
        steps: ['prepare', 'preflag', 'crosscal', bpass_compare', 'gain_comare', 'bpass_compare_obs', 'gain_compare_obs']
        function cal: apercc(cal_list=[[190108926, '3C147_36', 36], [190108927, '3C147_37', 37]) or apercc(task_id = 190409056, cal_name='3C196')

    Args:
        cal_list (List(List(int, str, int)): scan id, source name, beam, optional
        base_dir (str): Name of directory to store the data,
            if not specified it will be /data/apertif/crosscal/<scanid> when new data is fetched
            or /data/apertif/<scanid> when existing data is used
        task_id (int): ID of scan to be used as main ID and for the directory,
            if not specified it will be the first scan id
        cal_name (str): Name of the calibrator,
            if not specified the first name in the calibrator list will be used
        search_all_nodes (bool): 
        steps (List(str)): List of steps in this task

    To Do: Use existing data using the task_id option and the name of the calibrator?

    """

    # General setup
    # =============

    # start time of this function
    start_time = time()

    # check input
    # if no list of a calibrators is given
    cal_list_mode = True
    if cal_list is None:
        # then it needs the task id and the calibrator name to look for existing data
        if task_id is not None and cal_name is not None:
            print(
                "Using task id and calibrator name. Assuming to use existing data. Will not run preflag, prepare and crosscal"
            )
            # check that if steps were provided, they don't contain preflag, prepare and crosscal
            if steps is not None:
                if 'prepare' in steps:
                    steps.remove('prepare')
                if 'preflag' in steps:
                    steps.remove('preflag')
                if 'crosscal' in steps:
                    steps.remove('crosscal')
            else:
                steps = [
                    'bpass_compare', 'gain_comare', 'bpass_compare_obs',
                    'gain_compare_obs'
                ]
            # using existing data
            cal_list_mode = False
        # otherwise it won't do anything
        else:
            print(
                "Input parameters incorrect. Please specify either cal_list or task_id and cal_name. Abort"
            )
            return -1
    else:
        print("Using list of calibrators")
        if not steps:
            steps = [
                'prepare', 'preflag', 'crosscal', 'bpass_compare',
                'gain_comare', 'bpass_compare_obs', 'gain_compare_obs'
            ]

    # # check that preflag is in it if prepare is run
    # else:
    #     if 'prepare' in steps and not 'preflag' in steps:
    #         steps.insert(1, 'preflag')

    # get the scan id to be used as the task id
    if not task_id:
        task_id = cal_list[0][0]
    else:
        task_id = task_id

    # create data directory unless specified using the first id unless otherwise specified
    # if no directory is specified
    if not base_dir:
        # and existing data is used, assume the base dir is an apercal processed data directory
        if task_id is not None and cal_name is not None:
            base_dir = '/data/apertif/{}/'.format(task_id)
        # if new data is fetched from the archive use a different default directory
        else:
            base_dir = '/data/apertif/crosscal/{}/'.format(task_id)
    elif len(base_dir) > 0 and base_dir[-1] != '/':
        base_dir = base_dir + '/'
    if not os.path.exists(base_dir) and cal_list_mode:
        try:
            os.mkdir(base_dir)
        except Exception as e:
            print("Creating the base directory failed. Abort")
            return -1
    elif not os.path.exists(base_dir):
        print("Directory was not found. Abort")
        return -1

    logfilepath = os.path.join(base_dir, 'apercc.log')

    lib.setup_logger('debug', logfile=logfilepath)
    logger = logging.getLogger(__name__)
    # gitinfo = subprocess.check_output('cd ' + os.path.dirname(apercal.__file__) +
    #                                   '&& git describe --tag; cd', shell=True).strip()

    # logger.info("Apercal version: " + gitinfo)

    logger.info("Apertif cross-calibration stability evaluation")

    if cal_list_mode:
        logger.info("Using list of calibrators as input !!!!")
    else:
        logger.info("Using task id and calibrator name as input !!!!")

    logger.debug("apercc called with arguments ...")
    logger.debug("cal_list={}".format(cal_list))
    logger.debug("task_id = {}".format(task_id))
    logger.debug("cal_name = {}".format(cal_name))
    logger.debug("base_dir = {}".format(base_dir))
    logger.debug("search_all_nodes = {}".format(search_all_nodes))
    logger.debug("steps = {}".format(steps))

    # number of calibrators
    if cal_list is not None:
        n_cals = len(cal_list)
        # get a list of beams
        beam_list = np.array([cal_list[k][2] for k in range(n_cals)])
    else:
        n_cals = 1

    # get the name of the flux calibrator
    if cal_name is None:
        name_cal = str(cal_list[0][1]).strip().split('_')[0]
    else:
        name_cal = cal_name

    # Getting the data using prepare
    # ==============================

    if "prepare" in steps:

        start_time_prepare = time()

        logger.info("Getting data for calibrators")

        # go through the list of calibrators and run prepare
        for (task_id_cal, name_cal, beamnr_cal) in cal_list:
            logger.info("Running prepare for {0} of beam {1}".format(
                name_cal, beamnr_cal))
            # create prepare object without config file
            prep = prepare(filename=None)
            # where to store the data
            prep.basedir = base_dir
            # give the calibrator as a target to prepare
            prep.fluxcal = ''
            prep.polcal = ''
            prep.target = name_cal.upper().strip().split('_')[0] + '.MS'
            prep.prepare_target_beams = str(beamnr_cal)
            prep.prepare_date = str(task_id_cal)[:6]
            prep.prepare_obsnum_target = str(task_id_cal)[-3:]
            try:
                prep.go()
            except Exception as e:
                logger.warning(
                    "Prepare failed for calibrator {0} ({1}) beam {2}".format(
                        str(task_id), name_cal, beamnr_cal))
                logger.exception(e)
            else:
                logger.info("Prepare successful for {0} of beam {1}".format(
                    name_cal, beamnr_cal))

        logger.info("Getting data for calibrators ... Done ({0:.0f}s)".format(
            time() - start_time_prepare))
    else:
        logger.info("Skipping getting data for calibrators")

    # Running preflag for calibrators
    # ===============================

    if 'preflag' in steps:
        start_time_flag = time()

        logger.info("Flagging data of calibrators")

        # Flag fluxcal (pretending it's a target)
        # needs to be changed for parallel preflag and make it a loop
        flag = preflag(filename=None)
        flag.basedir = base_dir
        flag.fluxcal = ''
        flag.polcal = ''
        flag.target = name_cal.upper().strip().split('_')[0] + '.MS'
        flag.beam = "{:02d}".format(beam_list[0])
        flag.preflag_targetbeams = "{:02d}".format(beamnr)
        try:
            director(flag,
                     'rm',
                     base_dir + '/param.npy',
                     ignore_nonexistent=True)
            flag.go()
        except Exception as e:
            logger.warning("Preflag failed")
            logger.exception(e)
        else:
            logger.info(
                "Flagging data of calibrators ... Done ({0:.0f}s)".format(
                    time() - start_time_flag))
    else:
        logger.info("Skipping running preflag for calibrators")

    # Running crosscal for calibrators
    # ===============================

    if 'crosscal' in steps:
        start_time_crosscal = time()

        logger.info("Running crosscal for calibrators")

        for beam_nr in beam_list:
            logger.info("Running crosscal for beam {0}".format(beam_nr))
            crosscal = ccal(file_=None)
            crosscal.basedir = base_dir
            crosscal.fluxcal = name_cal.upper().strip().split('_')[0] + '.MS'
            # p.polcal = name_to_ms(name_polcal)
            # p.target = name_to_ms(name_target)
            # p2.paramfilename = 'param_{:02d}.npy'.format(beamnr)
            crosscal.beam = "{:02d}".format(beam_nr)
            crosscal.crosscal_transfer_to_target = False
            # p2.crosscal_transfer_to_target_targetbeams = "{:02d}".format(
            #    beamnr)
            try:
                director(crosscal,
                         'rm',
                         base_dir + '/param.npy',
                         ignore_nonexistent=True)
                # director(
                #     p2, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                crosscal.go()
            except Exception as e:
                # Exception was already logged just before
                logger.warning("Failed crosscal for beam {}".format(beam_nr))
                logger.exception(e)
            else:
                logger.info(
                    "Running crosscal for beam {0} ... Done".format(beam_nr))

        logger.info(
            "Running crosscal for calibrators ... Done ({0:.0f}s)".format(
                time() - start_time_crosscal))
    else:
        logger.info("Skipping running crosscal for calibrators")

    # Running Bandbpass comparison
    # ============================

    if 'bpass_compare' in steps:

        start_time_prepare = time()

        logger.info("Comparing bandpass")

        logger.info("#### Doing nothing here yet ####")

        logger.info(
            "Comparing bandpass ... Done ({0:.0f})".format(time() -
                                                           start_time_prepare))
    else:
        logger.info("Skipping comparing bandpass")

    # Running Bandbpass comparison
    # ============================

    if 'gain_compare' in steps:

        start_time_gain = time()

        logger.info("Comparing gain solutions")

        logger.info("#### Doing nothing here yet ####")

        logger.info("Comparing gain solutions ... Done ({0:.0f})".format(
            time() - start_time_gain))
    else:
        logger.info("Skipping comparing gain solutions")

    # Running Bandbpass comparison between observations
    # =================================================
    if 'bpass_compare_obs' in steps:

        start_time_bandpass = time()

        logger.info("Comparing banpdass solutions across observations")

        logger.info("#### Doing nothing here yet ####")

        logger.info(
            "Comparing banpdass solutions across observations ... Done ({0:.0f})"
            .format(time() - start_time_bandpass))
    else:
        logger.info(
            "Skipping comparing banpdass solutions across observations")

    # Running Bandbpass comparison between observations
    # =================================================
    if 'bpass_compare_obs' in steps:

        start_time_gain = time()

        logger.info("Comparing banpdass solutions across observations")

        logger.info("#### Doing nothing here yet ####")

        logger.info(
            "Comparing banpdass solutions across observations ... Done ({0:.0f})"
            .format(time() - start_time_gain))
    else:
        logger.info(
            "Skipping comparing banpdass solutions across observations")

    logger.info(
        "Apertif cross-calibration stability evaluation ... Done ({0:.0f}s)".
        format(time() - start_time))