コード例 #1
0
ファイル: run_preflag_qa.py プロジェクト: apertif/dataqa
                    default=None,
                    help='Directory where scan is located')

# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument(
    "--trigger_mode",
    action="store_true",
    default=False,
    help='Set it to run Autocal triggering mode automatically after Apercal.')

args = parser.parse_args()

# If no path is given change to default QA path
if args.path is None:
    if args.basedir is not None:
        qa_dir = get_default_imagepath(args.scan, basedir=args.basedir)
    else:
        qa_dir = get_default_imagepath(args.scan)

    # check that selfcal qa directory exists
    qa_preflag_dir = os.path.join(qa_dir, "preflag")

    if not os.path.exists(qa_preflag_dir):
        os.mkdir(qa_preflag_dir)
else:
    qa_preflag_dir = args.path

# Create log file
lib.setup_logger('info',
                 logfile=os.path.join(qa_preflag_dir, 'run_preflag_qa.log'))
logger = logging.getLogger(__name__)
コード例 #2
0
def get_pipeline_run_time(obs_id, trigger_mode=False):
    """Function to get the run time of apercal parts

    Since parselog is broken and the apercal logfiles have changed
    due to the parallelisation, this script just reads out the information
    from the main logfile
    """
    logger.info("## Reading apercal timing measurements")

    # get the QA path
    qa_dir = get_default_imagepath(obs_id)

    host_name = socket.gethostname()

    if trigger_mode:
        data_dir_list = [qa_dir.replace("qa/", "")]
        host_name_list = [host_name]
    elif host_name == "happili-01" and not trigger_mode:
        data_dir_list = [
            qa_dir.replace("qa/", ""),
            qa_dir.replace("qa/", "").replace("/data", "/data2"),
            qa_dir.replace("qa/", "").replace("/data", "/data3"),
            qa_dir.replace("qa/", "").replace("/data", "/data4")
        ]
        host_name_list = [
            "happili-01", "happili-02", "happili-03", "happili-04"
        ]
    else:
        data_dir_list = [qa_dir.replace("qa/", "")]
        host_name_list = [host_name]

    # Create an apercal QA directory
    qa_apercal_dir = "{0:s}apercal_performance/".format(qa_dir)

    if not os.path.exists(qa_apercal_dir):
        logger.info("Creating directory {0:s}".format(qa_apercal_dir))
        try:
            os.mkdir(qa_apercal_dir)
        except Exception as e:
            logger.error(e)

    original_useful_lines = [
        "Running prepare ... Done", "Running split ... Done",
        "Running preflag ... Done", "Running crosscal ... Done",
        "Running convert ... Done",
        "Running selfcal and/or continuum and/or polarisation ... Done",
        "Running line ... Done", "Running transfer ... Done",
        "Apercal finished after"
    ]

    # go through the list of data directories
    for k in range(len(data_dir_list)):

        # get the log files
        apercal_log_list = glob.glob("{0:s}apercal.log".format(
            data_dir_list[k]))

        if len(apercal_log_list) != 0:

            # sort log list
            apercal_log_list.sort()

            # go through the log files
            for log_counter in range(len(apercal_log_list)):

                logger.info("Reading out timing measurement for {0:s}".format(
                    apercal_log_list[log_counter]))

                # to store the results from reading the information
                results = []
                lines_found = []

                # make a copy of useful_lines to use for next log file
                useful_lines = list(original_useful_lines)

                # read logfile
                with open(apercal_log_list[log_counter], "r") as logfile:
                    # go through the lines
                    for logline in logfile:

                        # abort when we are out of useful lines
                        if len(useful_lines) == 0:
                            break

                        # for each line check that a useful line is in there
                        for pos, line in enumerate(useful_lines):
                            # if useful line is found, get value and remove it from list
                            if line in logline:
                                # get the measured time
                                if line == original_useful_lines[-1]:
                                    results.append(logline.split(line)[1])
                                else:
                                    time_in_s = int(
                                        logline.rstrip().lstrip().split(
                                            line)[1].split("(")[1].split(
                                                ")")[0].split("s")[0])
                                    time_str = str(
                                        timedelta(seconds=time_in_s))
                                    results.append(time_str)

                                # the line that was found
                                if line == original_useful_lines[5]:
                                    lines_found.append(
                                        line.replace(" and/or ", "+"))
                                else:
                                    lines_found.append(line)

                                # remove the useful line that was found
                                useful_lines.remove(line)

                                # move to next logline
                                break

                # take the useful lines found and get only the module
                step_info = np.array(
                    [step.split(" ")[1] for step in lines_found])

                # number of entries in results list
                n_entries = len(results)

                # create a column with file name
                file_name_col = np.array([
                    os.path.basename(apercal_log_list[log_counter])
                    for m in range(n_entries)
                ])

                # create table with the above columns
                timing_table = Table([file_name_col, step_info, results],
                                     names=('file_name', 'step', 'time'))

                if log_counter == 0:
                    complete_table = timing_table.copy()
                else:
                    complete_table = vstack([complete_table, timing_table])

            table_output_name = os.path.join(
                qa_apercal_dir,
                "apercal_log_timeinfo_{0:s}.csv".format(host_name_list[k]))

            try:
                complete_table.write(table_output_name,
                                     format="csv",
                                     overwrite=True)
            except Exception as e:
                logger.error(e)
        else:
            logger.warning(
                "Could not find any apercal log file in {0:s}".format(
                    data_dir_list[k]))

    # the following is old code for using parselog
    # go through the list of data directories
    # for k in range(len(data_dir_list)):

    #     # get the log files
    #     apercal_log_list = glob.glob(
    #         "{0:s}apercal*.log".format(data_dir_list[k]))

    #     if len(apercal_log_list) != 0:

    #         # sort log list
    #         apercal_log_list.sort()

    #         # go through the log files
    #         for log_counter in range(len(apercal_log_list)):

    #             logger.info(
    #                 "Reading out timing measurement for {0:s}".format(apercal_log_list[log_counter]))

    #             # read timing information
    #             timinginfo = parselog(apercal_log_list[log_counter])

    #             # number of entries
    #             n_entries_in_timinginfo = len(timinginfo)

    #             # create a column with file name
    #             file_name_col = np.array(
    #                 [os.path.basename(apercal_log_list[log_counter]) for m in range(n_entries_in_timinginfo)])

    #             # create a column with beam name
    #             logfile_name = os.path.basename(
    #                 apercal_log_list[log_counter]).split(".log")[0]
    #             if logfile_name == "apercal":
    #                 beam_name_col = np.array([
    #                     "--" for m in range(n_entries_in_timinginfo)])
    #             else:
    #                 beam_name_col = np.array([
    #                     logfile_name.split("apercal")[-1] for m in range(n_entries_in_timinginfo)])

    #             # create table with the above columns
    #             beam_file_table = Table([beam_name_col, file_name_col], names=(
    #                                     'beam', 'file_name'))

    #             # make it an astropy Table
    #             timinginfo_table = Table(
    #                 rows=timinginfo, names=('pipeline_step', 'run_time'))

    #             if log_counter == 0:
    #                 complete_table = hstack(
    #                     [beam_file_table, timinginfo_table])
    #             else:
    #                 tmp_table = hstack([beam_file_table, timinginfo_table])
    #                 complete_table = vstack([complete_table, tmp_table])

    #         table_output_name = "{0:s}apercal_log_timeinfo_{1:s}.csv".format(
    #             qa_apercal_dir, host_name_list[k])

    #         try:
    #             complete_table.write(
    #                 table_output_name, format="csv", overwrite=True)
    #         except Exception as e:
    #             logger.error(e)
    #     else:
    #         logger.warning(
    #             "Could not find any apercal log file in {0:s}".format(data_dir_list[k]))

    logger.info("## Reading apercal timing measurements. Done")
コード例 #3
0
def main():

    start_time = time.time()

    args = parse_args()

    obs_id = args.obs_id
    flux_cal = args.calibrator
    qa_dir = args.path
    base_dir = args.base_dir
    n_threads = args.threads
    subband_step = args.subband_step

    # set output directory
    if qa_dir is None:
        if base_dir is not None:
            qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
        else:
            qa_dir = get_default_imagepath(obs_id)

        # check that path exists
        if not os.path.exists(qa_dir):
            print("Directory {0:s} does not exist and will be created".format(
                qa_dir))
            os.makedirs(qa_dir)

    data_dir = os.path.dirname(qa_dir).rsplit("qa")[0]

    # check the mode to run the validation
    qa_beamweights_dir = os.path.join(qa_dir, "beamweights")

    # check that this directory exists (just in case)
    if not os.path.exists(qa_beamweights_dir):
        print("Directory {0:s} does not exist and will be created".format(
            qa_beamweights_dir))
        os.makedirs(qa_beamweights_dir)

    lib.setup_logger(
        'debug',
        logfile='{0:s}/create_beamweights.log'.format(qa_beamweights_dir))
    logger = logging.getLogger(__name__)

    logger.info("Getting beamweight plots for {}".format(flux_cal))

    # get a list of beams if no beam was provided
    if args.beam is None:
        data_dir_beam_list = glob.glob(os.path.join(data_dir, "[0-3][0-9]"))
        # check that there are beams
        if len(data_dir_beam_list) == 0:
            logger.warning("No beams found in {}".format(data_dir))
            return None
        else:
            beam_list = [
                int(os.path.basename(beam)) for beam in data_dir_beam_list
            ]
    else:
        beam_list = [args.beam]

    # now go through the beams
    for beam_nr in beam_list:

        start_time_beam = time.time()

        logger.info("Processing beam {}".format(beam_nr))

        # check that the given calibrator exists
        data_cal_dir = os.path.join(data_dir, "{0:02d}".format(beam_nr))

        # calibrator file
        cal_file = os.path.join(data_cal_dir, "raw/{}.MS".format(flux_cal))

        # check that it exists
        if not os.path.exists(cal_file):
            logger.warning(
                "Could not find calibrator {}. Continue with next beam".format(
                    cal_file))
            continue
        else:
            logger.info("Found calibrator {}".format(cal_file))

        # set output directory for plots
        qa_beamweights_beam_dir = os.path.join(qa_beamweights_dir,
                                               "{0:02d}".format(beam_nr))
        # check that this directory exists (just in case)
        if not os.path.exists(qa_beamweights_beam_dir):
            logger.info(
                "Directory {0:s} does not exist and will be created".format(
                    qa_beamweights_beam_dir))
            os.makedirs(qa_beamweights_beam_dir)

        # Start with one measurement set to set up the size of the array
        #
        # cal = pt.table(
        #     "/data/hess/apertif/{}/{}/WSRTA{}_B000.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid, args.taskid),
        #     ack=False)
        cal = pt.table(os.path.join(cal_file, "APERTIF_CALIBRATION"),
                       ack=False)

        num_beams = 40
        num_subbands = pt.taql(
            'select distinct SPECTRAL_WINDOW_ID FROM $cal').nrows()
        num_antennas = pt.taql('select distinct ANTENNA_ID FROM $cal').nrows()

        beamweights = np.zeros((num_beams, num_subbands, num_antennas, 11, 11),
                               dtype=np.complex64)

        logger.info("Number of subbands in {0} is {1}".format(
            os.path.basename(cal_file), num_subbands))

        # in case there are no subbands or antennas better check
        if num_subbands != 0 and num_antennas != 0:

            # Old implementation looped over beams (and I just picked a subband for simplicity, but this could be expanded to loop over subbands)
            #
            # plot_sub = 350
            # for beam_nr in range(40):
            #     ms_name = "/data/hess/apertif/{}/{}/WSRTA{}_B0{:02}.MS/APERTIF_CALIBRATION".format(args.cal_date, args.taskid,
            #                                                                                         args.taskid, beam_nr)
            #     print(ms_name)
            #     cal = pt.table(ms_name, ack=False)
            #     weights_gershape = cal.getcol('BEAM_FORMER_WEIGHTS').reshape((num_subbands, -1, 2, 64))
            #
            #     for subband in range(num_subbands):
            #         for antenna in range(num_antennas):
            #             beamweights[beam_nr, subband, antenna] = convert_weights(weights_gershape[subband, antenna])
            #
            #     print("BEAM NUMBER {}".format(beam_nr))
            #     # fig, axs = plt.subplots(3, 4, figsize=(15, 11))
            #     fig, axs = plt.subplots(3, 4, figsize=(10, 7))
            #     fig.suptitle("Beam {}; Subband {}".format(beam_nr, plot_sub), fontsize=14)
            #     for ax, plot_ant in zip(np.array(axs).flatten(), range(num_antennas)):
            #         ax.imshow(np.abs(beamweights[beam_nr, plot_sub, plot_ant]), cmap='plasma')
            #         ax.set_title("Antenna " + str(plot_ant))
            #         if plot_ant < 8:
            #             ax.set_xticklabels([])
            #         for i in range(61):
            #             x, y = give_coord('X', i)
            #             ax.text(x - 0.35, y + 0.18, 'X' + str(i), color='white', fontsize=5)
            #             x, y = give_coord('Y', i)
            #             ax.text(x - 0.35, y + 0.18, 'Y' + str(i), color='white', fontsize=5)
            #
            #     plt.savefig('/data/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.cal_date, args.cal_date,
            #                                                                              beam_nr, plot_sub))
            #     plt.close()

            # New implementation because I was just thinking of using a single beam and plotting a bunch of subbands. (quick and dirty solution)
            # Beam is chosen by the user and saved in args.beam
            # ms_name = "/home/hess/apertif/{}/{:02}/3C147.MS/APERTIF_CALIBRATION".format(
            #     args.taskid, beam_nr)
            # cal = pt.table(ms_name, ack=False)

            logger.info("Getting weights")
            weights_gershape = cal.getcol('BEAM_FORMER_WEIGHTS').reshape(
                (num_subbands, -1, 2, 64))
            logger.info("Getting weights ... Done")

            # parallelise it to plot faster
            with pymp.Parallel(n_threads) as p:
                # go throught the subband
                for subband in p.range(0, num_subbands, subband_step):
                    for antenna in range(num_antennas):
                        beamweights[beam_nr, subband,
                                    antenna] = convert_weights(
                                        weights_gershape[subband, antenna])

                    fig, axs = plt.subplots(3, 4, figsize=(10, 7))
                    fig.suptitle("Beam {}; Subband {}".format(
                        beam_nr, subband),
                                 fontsize=14)
                    for ax, plot_ant in zip(
                            np.array(axs).flatten(), range(num_antennas)):
                        ax.imshow(np.abs(beamweights[beam_nr, subband,
                                                     plot_ant]),
                                  cmap='plasma')
                        ax.set_title("Antenna " + str(plot_ant))
                        if plot_ant < 8:
                            ax.set_xticklabels([])
                        for i in range(61):
                            x, y = give_coord('X', i)
                            ax.text(x - 0.35,
                                    y + 0.18,
                                    'X' + str(i),
                                    color='white',
                                    fontsize=5)
                            x, y = give_coord('Y', i)
                            ax.text(x - 0.35,
                                    y + 0.18,
                                    'Y' + str(i),
                                    color='white',
                                    fontsize=5)

                    plot_name = os.path.join(
                        qa_beamweights_beam_dir,
                        "{0}_{1}_B{2:02d}_S{3:03d}_weights.png".format(
                            obs_id, flux_cal, beam_nr, subband))
                    # plt.savefig('/home/hess/apertif/{}/{}_B0{:02}_S{:03}_weights.png'.format(args.taskid, args.cal_date,
                    #                                                                          beam_nr, subband))
                    plt.savefig(plot_name, overwrite=True)
                    logger.info("Saving plot {}".format(plot_name))
                    plt.close('all')

            logger.info("Processing beam {0} ... Done ({1:.0f}s)".format(
                beam_nr,
                time.time() - start_time_beam))
        else:
            logger.warning(
                "Found {0} subbands and {1} antennas for beam {2} in {3}".
                format(num_subbands, num_antennas, beam_nr, flux_cal))

    logger.info("Getting beamweight plots for {0} ... Done ({1:.0f}s)".format(
        flux_cal,
        time.time() - start_time))
コード例 #4
0
ファイル: run_scal_plots.py プロジェクト: apertif/dataqa
                    default=True,
                    action='store_false',
                    help='Do not generate amplitude plots')

# this mode will make the script look only for the beams processed by Apercal on a given node
parser.add_argument(
    "--trigger_mode",
    action="store_true",
    default=False,
    help='Set it to run Autocal triggering mode automatically after Apercal.')

args = parser.parse_args()

# If no path is given change to default QA path
if args.path is None:
    output_path = get_default_imagepath(args.scan, basedir=args.basedir)

    # check that selfcal qa directory exists
    output_path = os.path.join(output_path, "selfcal/")

    if not os.path.exists(output_path):
        os.mkdir(output_path)
else:
    output_path = args.path

# Create log file
lib.setup_logger('info', logfile='{0:s}run_scal_plots.log'.format(output_path))
logger = logging.getLogger(__name__)

# Get selfcal maps
if args.maps:
コード例 #5
0
ファイル: run_inspection_plot.py プロジェクト: rs1701/dataqa
def main():
    start = timer()

    parser = argparse.ArgumentParser(description='Generate selfcal QA plots')

    # 1st argument: File name
    parser.add_argument("obs_id", help='ID of observation of target field')

    parser.add_argument(
        "src_name", help='Name of the calibrator or target of the plots')

    parser.add_argument("-c", "--calibrator", action="store_true", default=False,
                        help='Set if a calibrator is used. Also requires beam and cal_id')

    parser.add_argument("--beam", type=int, default=None,
                        help='If src_name is a calibrator set the beam number')

    parser.add_argument("--cal_id", type=str, default=None,
                        help='Obs ID of the calibrator')

    parser.add_argument('-p', '--path', default=None,
                        help='Destination for images')
    parser.add_argument('-b', '--basedir', default=None,
                        help='Directory of obs id')

    # this mode will make the script look only for the beams processed by Apercal on a given node
    # parser.add_argument("--trigger_mode", action="store_true", default=False,
    #                     help='Set it to run Autocal triggering mode automatically after Apercal.')

    args = parser.parse_args()

    # If no path is given change to default QA path
    if args.path is None:
        if args.basedir is not None:
            output_path = get_default_imagepath(
                args.obs_id, basedir=args.basedir)
        else:
            output_path = get_default_imagepath(args.obs_id)

        # check that selfcal qa directory exists
        qa_plot_dir = os.path.join(output_path, "inspection_plots")

        if not os.path.exists(qa_plot_dir):
            os.mkdir(qa_plot_dir)
    else:
        qa_plot_dir = args.path

    # create a directory with the src_name to put
    if args.src_name is not None:
        qa_plot_dir = os.path.join(qa_plot_dir, args.src_name)

        if not os.path.exists(qa_plot_dir):
            os.mkdir(qa_plot_dir)

    # if it is a calibrator then put the plots into a beam directory
    if args.calibrator:
        if args.beam is None:
            print("ERROR: Please specify beam of calibrator")
            return -1
        elif args.cal_id is None:
            print("ERROR: Please specify id of calibrator")
            return -1
        else:
            is_calibrator = True

            qa_plot_dir = os.path.join(
                qa_plot_dir, "{0:02d}".format(args.beam))

            if not os.path.exists(qa_plot_dir):
                os.mkdir(qa_plot_dir)
    else:
        is_calibrator = False

    # Create log file
    lib.setup_logger(
        'info', logfile=os.path.join(qa_plot_dir, 'get_inspection_plot.log'))
    logger = logging.getLogger(__name__)

    # Run function to get plots
    try:
        logger.info("#### Getting inspection plots ...")
        start_time_plots = time.time()
        get_inspection_plots(args.obs_id, qa_plot_dir,
                             is_calibrator=is_calibrator, cal_id=args.cal_id)
    except Exception as e:
        logger.error(e)
        logger.error("#### Getting inspection plots failed")
    else:
        logger.info("#### Getting inspection plots... Done ({0:.0f}s)".format(
            time.time()-start_time_plots))
コード例 #6
0
ファイル: create_report.py プロジェクト: apertif/dataqa
def main():
    start_time = time.time()

    # Create and parse argument list
    # ++++++++++++++++++++++++++++++
    parser = argparse.ArgumentParser(
        description='Create overview for QA')

    # 1st argument: Observation number
    parser.add_argument("obs_id", type=str,
                        help='Observation Number')

    parser.add_argument("--target", type=str, default='',
                        help='Name of the target')

    parser.add_argument("--fluxcal", type=str, default='',
                        help='Name of the flux calibrator')

    parser.add_argument("--polcal", type=str, default='',
                        help='Name of the polarisation calibrator')

    parser.add_argument("--osa", type=str, default='',
                        help='Name of the OSA')

    parser.add_argument("-p", "--path", type=str,
                        help='Path to QA output')

    parser.add_argument("-b", "--basedir", type=str,
                        help='Base directory where the obs id is')

    parser.add_argument("--tank", action="store_true", default=False,
                        help='Create the report on new volume')

    parser.add_argument("-a", "--add_osa_report", action="store_true", default=False,
                        help='Add only the osa report to the webpage')

    parser.add_argument("-c", "--combine", action="store_true", default=False,
                        help='(Depracated) Set to create a combined report from all happilis on happili-01. It will overwrite the report on happili-01')

    parser.add_argument("--no_merge", action="store_true", default=False,
                        help='Set to merge selfcal and crosscal plots')

    parser.add_argument("--do_not_read_timing", action="store_true", default=False,
                        help='Set to avoid reading timing information. Makes only sense if script is run multiple times or for debugging')

    parser.add_argument("--page_only", action="store_true", default=False,
                        help='Set only create the webpages themselves')

    # this mode will make the script look only for the beams processed by Apercal on a given node
    parser.add_argument("--trigger_mode", action="store_true", default=False,
                        help='Set it to run Autocal triggering mode automatically after Apercal.')

    parser.add_argument("--single_node", action="store_true", default=False,
                        help='Set it to run QA on a single node and get same result as if running like the OSA. Note, this is different from trigger mode.')

    args = parser.parse_args()

    obs_id = args.obs_id
    qa_dir = args.path
    base_dir = args.basedir
    do_combine = args.combine
    add_osa_report = args.add_osa_report

    # directory where the output will be of pybdsf will be stored
    if qa_dir is None:
        if base_dir is not None:
            qa_dir = get_default_imagepath(obs_id, basedir=base_dir)
        else:
            qa_dir = get_default_imagepath(obs_id)

        # check that path exists
        if not os.path.exists(qa_dir):
            print(
                "Directory {0:s} does not exist and will be created".format(qa_dir))
            os.makedirs(qa_dir)

    # change the base directory from /data to /tank
    if args.tank and "/data" in qa_dir:
        print("Switching to /tank")
        qa_dir = qa_dir.replace("/data", "/tank")

    # check the mode to run the validation
    qa_report_dir = "{0:s}report".format(
        qa_dir)

    # check that this directory exists (just in case)
    if not os.path.exists(qa_report_dir):
        print("Directory {0:s} does not exist and will be created".format(
            qa_report_dir))
        os.makedirs(qa_report_dir)

    lib.setup_logger(
        'debug', logfile='{0:s}/create_report.log'.format(qa_report_dir))
    logger = logging.getLogger(__name__)

    # if osa report should be added, check it is available
    if add_osa_report:
        # name of the osa report for this observation
        osa_report = os.path.join(
            qa_report_dir, "OSA_Report/{}_OSA_report.ecsv".format(obs_id))

        # check that the file is actually there
        if not os.path.exists(osa_report):
            logger.error("No OSA report found. Abort")
            return -1
    else:
        osa_report = ''

    # Saving observation information if they do not exist yet
    # =======================================================

    table_name = "{0}_obs.ecsv".format(obs_id)

    table_name_with_path = os.path.join(qa_dir, table_name)

    if not os.path.exists(table_name_with_path):

        obs_info = Table([
            [obs_id],
            [args.target],
            [args.fluxcal],
            [''],
            [args.polcal],
            [''],
            [args.osa]], names=(
            'Obs_ID', 'Target', 'Flux_Calibrator', 'Flux_Calibrator_Obs_IDs', 'Pol_Calibrator', 'Pol_Calibrator_Obs_IDs', 'OSA'))

        try:
            obs_info.write(
                table_name_with_path, format='ascii.ecsv', overwrite=True)
        except Exception as e:
            logger.warning("Saving observation information in {0} failed.".format(
                table_name_with_path))
            logger.exception(e)
        else:
            logger.info(
                ("Saving observation information in {0} ... Done.".format(table_name_with_path)))
    else:
        logger.info(
            ("Observation information already exists. Reading {0}.".format(table_name_with_path)))
        obs_info = Table.read(table_name_with_path, format="ascii.ecsv")

    # check on which happili we are:
    host_name = socket.gethostname()

    if args.trigger_mode:
        logger.info(
            "--> Running report QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
    elif args.single_node:
        logger.info(
            "--> Running report QA in single-node mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
    elif do_combine:
        logger.info("Combining QAs from different happilis")
        if host_name != "happili-01":
            logger.warning("You are not working on happili-01.")
            logger.warning("Cannot combine QA from different happilis")
            do_combine = False
    elif host_name != "happili-01" and not args.trigger_mode:
        logger.warning("You are not working on happili-01.")
        logger.warning("The script will not process all beams")
        logger.warning("Please switch to happili-01")

    apercal_log_file = "/data/apertif/{0:s}/apercal.log".format(
        obs_id)

    # logging.basicConfig(filename='{0:s}/create_report.log'.format(qa_dir), level=logging.DEBUG,
    #                     format='%(asctime)s - %(levelname)s: %(message)s')

    # getting timing measurment for apercal only in trigger mode
    # if not add_osa_report and not args.do_not_read_timing:
    if args.trigger_mode or args.single_node:
        try:
            get_pipeline_run_time(obs_id, trigger_mode=args.trigger_mode)
        except Exception as e:
            logger.exception(e)

    # the subpages to be created
    subpages = ['observing_log', 'summary',  'beamweights', 'inspection_plots', 'preflag', 'crosscal',
                'selfcal', 'continuum', 'polarisation', 'line', 'mosaic', 'apercal_log']

    logger.info("#### Create report directory structure")

    # copy the js and css files
    js_file_name = "{0:s}/report_fct.js".format(
        hp.__file__.split("/html_report.py")[0])
    css_file_name = "{0:s}/report_style.css".format(
        hp.__file__.split("/html_report.py")[0])

    # for copying osa_files:
    osa_nb_file = "{0:s}/OSA_report.ipynb".format(
        hp.__file__.split("/html_report.py")[0])
    osa_py_file = "{0:s}/osa_functions.py".format(
        hp.__file__.split("/html_report.py")[0])

    osa_files = [osa_nb_file, osa_py_file]

    # Check that directory of the qa exists
    if not os.path.exists(qa_dir):
        logger.error(
            "Directory {0:s} does not exists. Abort".format(qa_report_dir))
        return -1
    else:
        # do things that should only happen on happili-01 when the OSA runs this function
        if not args.trigger_mode and not args.page_only:
            if host_name == "happili-01" or args.single_node:
                # go through some of the subpages and process numpy files
                for page in subpages:
                    # exclude non-apercal modules (and mosaic)
                    if page != "apercal_log" or page != "inspection_plots" or page != "summary" or page != "mosaic":
                        # just run it on preflag for now
                        if page == "preflag" or page == "crosscal" or page == "convert" or page == "selfcal" or page == "continuum":
                            # get information from numpy files
                            try:
                                logger.info(
                                    "## Getting summary table for {}".format(page))
                                make_nptabel_csv(
                                    obs_id, page, qa_dir, output_path=os.path.join(qa_dir, page))
                            except Exception as e:
                                logger.warning(
                                    "## Getting summary table for {} failed".format(page))
                                logger.exception(e)
                            else:
                                logger.info(
                                    "## Getting summary table for {} ... Done".format(page))

                            # merge plots
                            if not args.no_merge and not args.single_node:
                                try:
                                    logger.info(
                                        "## Merging selfcal and crosscal plots")
                                    run_merge_plots(
                                        qa_dir, do_ccal=True, do_scal=True, run_parallel=True, n_cores=5)
                                except Exception as e:
                                    logger.warning(
                                        "## Merging selfcal and crosscal plots ... Failed")
                                    logger.exception(e)
                                else:
                                    logger.info(
                                        "## Merging selfcal and crosscal plots ... Done")

                    # merge the continuum image properties
                    if page == 'continuum':
                        try:
                            merge_continuum_image_properties_table(
                                obs_id, qa_dir, single_node=args.single_node)
                        except Exception as e:
                            logger.warning(
                                "Merging continuum image properties ... Failed")
                            logger.exception(e)
                        else:
                            logger.info(
                                "Merging continuum image properties ... Done")

                    # get line statistics
                    if page == 'line':
                        try:
                            combine_cube_stats(
                                obs_id, qa_dir, single_node=args.single_node)
                        except Exception as e:
                            logger.warning(
                                "Getting cube statistics ... Failed")
                            logger.exception(e)
                        else:
                            logger.info(
                                "Getting cube statistics ... Done")

                # create dish delay plot
                try:
                    logger.info("Getting dish delay plot")
                    get_dish_delay_plots(
                        obs_id, obs_info['Flux_Calibrator'][0], basedir=args.basedir)
                except Exception as e:
                    logger.warning("Getting dish delay plot ... Failed")
                    logger.exception(e)
                else:
                    logger.info("Getting dish delay plot ... Done")

                # create compound beam plots
                try:
                    logger.info("Getting compound beam plots")
                    make_cb_plots_for_report(obs_id, qa_dir)
                except Exception as e:
                    logger.warning("Getting compound beam plots ... Failed")
                    logger.exception(e)
                else:
                    logger.info("Getting compound beam plots ... Done")

    # Create directory structure for the report
    if not add_osa_report:
        logger.info("#### Creating directory structrure")
        try:
            hpd.create_report_dirs(
                obs_id, qa_dir, subpages, css_file=css_file_name, js_file=js_file_name, trigger_mode=args.trigger_mode, single_node=args.single_node, do_combine=do_combine, obs_info=obs_info, osa_files=osa_files)
        except Exception as e:
            logger.error(e)
        else:
            logger.info("#### Creating directory structrure ... Done")

    logger.info("#### Creating report")

    try:
        hp.create_main_html(qa_report_dir, obs_id, subpages,
                            css_file=css_file_name, js_file=js_file_name, obs_info=obs_info, osa_report=osa_report)
    except Exception as e:
        logger.error(e)

    logger.info("#### Report. Done ({0:.0f}s)".format(
        time.time()-start_time))