Esempio n. 1
0
File: qa.py Progetto: rs1701/apercal
def checkimagegaussianity(self, image, alpha):
    """
    Subroutine to check if an image has gaussian distribution
    image (string): The path/name of the image to check in FITS-format
    returns (boolean): True if image is ok, False otherwise
    """
    setinit.setinitdirs(self)
    char_set = string.ascii_uppercase + string.digits
    if os.path.isdir(image) or os.path.isfile(image):
        with tempfile.TemporaryDirectory() as tempdir:
            if os.path.isdir(image):
                temp_string = ''.join(random.sample(char_set * 8, 8))
                fits = lib.miriad('fits')
                fits.op = 'xyout'
                fits.in_ = image
                fits.out = tempdir + '/' + temp_string + '.fits'
                fits.go()
                pyfile = pyfits.open(tempdir + '/' + temp_string + '.fits')
            elif os.path.isfile(image):
                pyfile = pyfits.open(image)
            else:
                error = 'Image format not supported. Only MIRIAD and FITS formats are supported!'
                logger.error(error)
                raise ApercalException(error)
            image = pyfile[0].data[0][0]
            pyfile.close()
            k2, p = scipy.stats.normaltest(image, nan_policy='omit', axis=None)
            if p < alpha:
                return True
            else:
                return False
    else:
        error = 'Image {} does not seem to exist!'.format(image)
        logger.error(error)
        raise ApercalException(error)
Esempio n. 2
0
def uvflag(vis=None, select=None):
    """
    vis: visibility file to be flagged
    select: semi-colon separated list of data selections to be flagged
    """
    # Setup the path and move to it.
    logger = logging.getLogger('uvflag')
    path2vis = os.path.split(vis)[0]
    vis = os.path.split(vis)[1]
    logger.info("uvflag: Flagging Tool")
    if vis is None or select is None:
        raise ApercalException("Vis or Flags not specified. Check parameters.")
    try:
        os.chdir(path2vis)
        logger.info("Moved to path " + path2vis)
    except Exception:
        raise ApercalException("Path to vis does not exist!")
    # Flag each selection in a for-loop
    for s in select.split(';'):
        o = masher(task='uvflag',
                   vis=vis,
                   select='"' + s + '"',
                   flagval='flag')
        logger.info(o)
    logger.info("Appears to have ended successfully.")
Esempio n. 3
0
def pgflag(vis=None, flagpar='6,2,2,2,5,3', settings=None, stokes='qq'):
    """
    Wrapper around the MIRIAD task PGFLAG, which in turn is a wrapper for the AOFlagger
    SumThreshold algorithm.
    Defaults:  flagpar='6,2,2,2,5,3',  stokes='qq'
    Uses parameters from a settings object if this is provided.
    Outputs are written to a log file, which is in the same directory as vis, and has name
    <vis>.pgflag.txt.
    Note: The considerably long output of PGFLAG is written out with the logger at debug level.
    This may not be ideal if you're having a quick look, so switch the level to info if you want
    to avoid the output of the task appearing in your console.
    Beware: You could lose a LOT of data if you're not careful!!!
    """
    # Exception handling and checking
    logger = logging.getLogger('pgflag')
    logger.info("PGFLAG: Automated Flagging using SumThresholding")
    if vis is None and settings is None:
        raise ApercalException(
            "No inputs - please provide either vis and flagpar or settings.")
    path2vis = os.path.split(vis)[0]
    vis = os.path.split(vis)[1]
    try:
        os.chdir(path2vis)
        logger.info("Moved to path " + path2vis)
    except Exception:
        raise ApercalException("Error: path to vis does not exist!")

    o = None

    # Do pgflag with the settings parameters if provided.
    if settings and vis:
        params = settings.get('pgflag')
        logger.info("Doing PGFLAG on " + vis + " using stokes=" +
                    params.stokes + " with flagpar=" + params.flagpar)
        logger.info("Output written to " + vis + '.pgflag.txt')
        o = masher(task='pgflag',
                   vis=vis,
                   stokes=params.stokes,
                   flagpar=params.flagpar,
                   options='nodisp',
                   command="'<'")
    # Do PGFLAG with input settings, i.e. no settings file provided.
    if vis and settings is None:
        logger.info("Doing PGFLAG on " + vis + " using stokes " + stokes +
                    " with flagpar=" + flagpar)
        o = masher(task='pgflag',
                   vis=vis,
                   stokes=stokes,
                   flagpar=flagpar,
                   options='nodisp',
                   command="'<'")

    if o:
        logger.info("Writing output " + path2vis + '/' + vis + '.pgflag.txt')
        write2file('pgflag', o, vis + '.pgflag.txt')
    logger.info("PGFLAG: DONE.")
Esempio n. 4
0
def get_theoretical_noise(self, dataset, gausslimit, startchan=None, endchan=None):
    """
    Subroutine to create a Stokes V image from a dataset and measure the noise, which should be similar to the theoretical one
    image (string): The path to the dataset file.
    startchan(int): First channel to use for imaging, zero-based
    endchan(int): Last channel to use for imaging, zero-based
    returns (numpy array): The rms of the image
    """
    invert = lib.miriad('invert')
    invert.vis = dataset
    invert.map = 'vrms'
    invert.beam = 'vbeam'
    invert.imsize = 1024
    invert.cell = 5
    invert.stokes = 'v'
    invert.slop = 1
    invert.robust = -2
    invert.options='mfs'
    if (startchan and endchan) != None:
        invert.line = 'channel,1,' + str(startchan + 1) + ',' + str(endchan - startchan + 1) + ',' + str(endchan - startchan + 1)
    else:
        pass
    invert.go()
    vmax, vmin, vstd = imstats.getimagestats(self, 'vrms')
    gaussianity = qa.checkimagegaussianity(self, 'vrms', gausslimit)
    if os.path.isdir('vrms') and os.path.isdir('vbeam'):
        managefiles.director(self, 'rm', 'vrms')
        managefiles.director(self, 'rm', 'vbeam')
    else:
        raise ApercalException('Stokes V image was not created successfully. Cannot calculate theoretical noise! No iterative selfcal possible!')
    return gaussianity, vstd
Esempio n. 5
0
def calc_mask_threshold(theoretical_noise_threshold, noise_threshold,
                        dynamic_range_threshold):
    """
    Function to calculate the actual mask_threshold and the type of mask threshold from the
    theoretical noise threshold, noise threshold, and the dynamic range threshold
    theoretical_noise_threshold (float): The theoretical noise threshold calculated by
                                         calc_theoretical_noise_threshold
    noise_threshold (float): The noise threshold calculated by calc_noise_threshold
    dynamic_range_threshold (float): The dynamic range threshold calculated by calc_dynamic_range_threshold
    returns (float, string): The maximum of the three thresholds, the type of the maximum threshold
    """
    # if np.isinf(dynamic_range_threshold) or np.isnan(dynamic_range_threshold):
    #     dynamic_range_threshold = noise_threshold
    mask_threshold = np.max([
        theoretical_noise_threshold, noise_threshold, dynamic_range_threshold
    ])
    mask_argmax = np.argmax([
        theoretical_noise_threshold, noise_threshold, dynamic_range_threshold
    ])
    if mask_argmax == 0:
        mask_threshold_type = 'Theoretical noise threshold'
    elif mask_argmax == 1:
        mask_threshold_type = 'Noise threshold'
    elif mask_argmax == 2:
        mask_threshold_type = 'Dynamic range threshold'
    else:
        raise ApercalException("Unknown mask thresholdtype")

    return mask_threshold, mask_threshold_type
Esempio n. 6
0
def qimplot(image=None, rmin=-2, rmax=2, cmap='gray'):
    """
    qimplot: Quick Image Plot
    Plots image in grayscale. Colorscale is from rmin*RMS to rmax*RMS.
    Defaults:
        rmin = -2
        rmax = +2
        cmap = 'gray'
            Can be any of the usual cmap values, e.g. 'YlOrRd' or 'jet'
    """
    logger = logging.getLogger('QIMPLOT')
    logger.info("Quick Image Plot")
    if image is None:
        logger.critical("Please provide input image!")
    plt.figure(figsize=(10, 10))
    fits = miriad('fits')
    if not os.path.exists(image):
        raise ApercalException(image + " not found!")
    fits.in_ = image
    fits.out = image + '.fits'
    fits.op = 'xyout'
    fits.go(rmfiles=True)
    imheader = pyfits.open(image + '.fits')
    imdata = imheader[0].data
    rms = np.sqrt(np.mean(np.abs(imdata[0, 0, :, :])**2))
    logger.info('RMS = ' + "{:2.2}".format(rms))
    logger.info("Plotting from " + str(rmin) + "*RMS to " + str(rmax) +
                str("*RMS"))
    plt.imshow(np.flipud(imdata[0, 0, :, :]),
               cmap=cmap,
               vmin=rmin * rms,
               vmax=rmax * rms)
    plt.colorbar()
    plt.xticks(())
    plt.yticks(())
Esempio n. 7
0
def calc_dr_min(dr_maj, majc, minorcycles, function):
    """
    Function to calculate the dynamic range limits during minor cycles
    dr_maj (list of floats): List with dynamic range limits for major cycles. Usually from calc_dr_maj
    majc (int): The major cycles you want to calculate the minor cycle dynamic ranges for
    minorcycles (int): The number of minor cycles to use
    function (string): The function to follow for increasing the dynamic ranges. Currently 'square', 'power', and
                       'linear' is supported.
    returns (list of floats): A list of floats for the dynamic range limits within the minor cycles.
    """
    if majc == 0:  # Take care about the first major cycle
        prevdr = 0
    else:
        prevdr = dr_maj[majc - 1]
    # The different options to increase the minor cycle threshold
    if function == 'square':
        dr_min = [
            prevdr + ((dr_maj[majc] - prevdr) * (n**2.0)) /
            ((minorcycles - 1)**2.0) for n in range(minorcycles)
        ]
    elif function == 'power':
        # Not exactly need to work on this, but close
        dr_min = [
            prevdr + np.power((dr_maj[majc] - prevdr), (1.0 / n))
            for n in range(minorcycles)
        ][::-1]
    elif function == 'linear':
        dr_min = [(prevdr + ((dr_maj[majc] - prevdr) / (minorcycles - 1)) * n)
                  for n in range(minorcycles)]
    else:
        raise ApercalException(' Function for minor cycles not supported!')
    return dr_min
Esempio n. 8
0
def getimagestats(self, image):
    """
    Subroutine to calculate the min, max and rms of an image
    image (string): The absolute path to the image file.
    returns (numpy array): The min, max and rms of the image
    """
    setinit.setinitdirs(self)
    char_set = string.ascii_uppercase + string.digits
    if os.path.isdir(image) or os.path.isfile(image):
        if os.path.isdir(image):
            temp_string = ''.join(random.sample(char_set * 8, 8))
            fits = lib.miriad('fits')
            fits.op = 'xyout'
            fits.in_ = image
            with tempfile.TemporaryDirectory() as tempdir:
                fits.out = tempdir + '/' + temp_string + '.fits'
                fits.go()
                image_data = pyfits.open(tempdir + '/' + temp_string + '.fits')
        elif os.path.isfile(image):
            image_data = pyfits.open(image)
        else:
            error = 'Image format not supported. Only MIRIAD and FITS formats are supported!'
            logger.error(error)
            raise ApercalException(error)

        data = image_data[0].data
        imagestats = np.full(3, np.nan)
        if data.shape[-3] == 2:
            imagestats[0] = np.nanmin(
                data[0, 0, :, :])  # Get the maxmimum of the image
            imagestats[1] = np.nanmax(
                data[0, 0, :, :])  # Get the minimum of the image
            imagestats[2] = np.nanstd(
                data[0, 0, :, :])  # Get the standard deviation
        else:
            imagestats[0] = np.nanmin(data)  # Get the maxmimum of the image
            imagestats[1] = np.nanmax(data)  # Get the minimum of the image
            imagestats[2] = np.nanstd(data)  # Get the standard deviation
        image_data.close()  # Close the image
    else:
        error = 'Image does not seem to exist!'
        logger.error(error)
        raise ApercalException(error)

    return imagestats
Esempio n. 9
0
def importuvfitsys(uvfits=None, uv=None, tsys=True):
    """
    Imports UVFITS file and does Tsys correction on the output MIRIAD UV file.
    Uses the MIRIAD task WSRTFITS to import the UVFITS file and convert it to MIRIAD UV format.
    Uses the MIRIAD task ATTSYS to do the Tsys correction.
    """
    logger = logging.getLogger('importuvfitsys')
    # NOTE: Import the fits file
    path2uvfits = os.path.split(uvfits)[0]
    uvfits = os.path.split(uvfits)[1]
    if uv is None:
        # Default output name if a custom name isn't provided.
        uv = uvfits.split('.')[0] + '.UV'
    if uvfits is None:
        raise ApercalException("UVFITS not specified. Please check parameters")

    if path2uvfits != '':
        try:
            os.chdir(path2uvfits)
            logger.info("Moved to path " + path2uvfits)
        except Exception:
            raise ApercalException("Error: Directory does not exist!")

    # cmd = 'wsrtfits in='+uvf+' op=uvin velocity=optbary out='+uv
    if uvfits.split('.')[1] == 'MS':
        uvfits = uvfits.split('.')[0] + '.UVF'
    if not os.path.exists(uvfits):
        raise ApercalException(uvfits + " does not exist!")
    if os.path.exists(uv):
        logger.warn(uv + ' exists! I won\'t clobber. Skipping this part...')
        logger.info("Exiting gracefully.")
        return
    masher(task='wsrtfits', in_=uvfits, out=uv, op='uvin', velocity='optbary')

    # NOTE: Tsys Calibration
    # basher("attsys vis="+uv+" out=temp")
    if tsys is True:
        if os.path.exists('temp'):
            basher("rm -r temp")
        masher(task='attsys', vis=uv, out='temp')
        basher('rm -r ' + uv)
        basher('mv temp ' + uv)
    logger.info('Appears to have ended successfully...')
Esempio n. 10
0
def ms2uvfits(ms=None, uvf=None):
    """
    ms2uvfits(ms=None)
    Utility to convert ms to a uvfits file
    """
    logger = logging.getLogger('ms2uvfits')
    # Setup the path and input file name.
    path2ms = os.path.split(ms)[0]
    ms = os.path.split(ms)[1]
    logger.info("ms2uvfits: Converting MS to UVFITS Format")
    if ms is None:
        raise ApercalException("MS not specified. Please check parameters")
    if path2ms != '':
        try:
            os.chdir(path2ms)
            logger.info("Moved to path " + path2ms)
        except Exception:
            raise ApercalException("Directory or MS does not exist!")

    # Start the processing by setting up an output name and reporting the status.
    if uvf is None:
        uvfits = ms.replace(".MS", ".UVF")
    else:
        uvfits = uvf
    if os.path.exists(uvfits):
        logger.error(uvfits + " exists! Skipping this part....")
        logger.info("Exiting gracefully.")
        return
        # TODO: Decided whether to replace logger.info with logger.debug, since this module is
    # wrapped up.
    logger.info("MS: " + ms)
    logger.info("UVFITS: " + uvfits)
    logger.info("Directory: " + path2ms)
    # NOTE: Here I'm using masher to call ms2uvfits.
    o = masher(task='ms2uvfits',
               ms=ms,
               fitsfile=uvfits,
               writesyscal='T',
               multisource='T',
               combinespw='T')
    logger.info("Appears to have ended successfully.")
Esempio n. 11
0
def calc_dr_maj(drinit, dr0, majorcycles, func):
    """
    Function to calculate the dynamic range limits during major cycles
    drinit (float): The initial dynamic range
    dr0 (float): Coefficient for increasing the dynamic range threshold at each major cycle
    majorcycles (int): The number of major cycles to execute
    func (string): The function to follow for increasing the dynamic ranges. Currently 'power' is supported.
    returns (list of floats): A list of floats for the dynamic range limits within the major cycles.
    """
    if func == 'square':
        dr_maj = [drinit * np.power(dr0, m) for m in range(majorcycles)]
    else:
        raise ApercalException('Function for major cycles not supported')
    return dr_maj
Esempio n. 12
0
def getmaskstats(self, image, size):
    """
    Subroutine to calculate the number of pixels in a mask and its percentage of the full image
    image (string): The absolute path to the image file.
    size (int): Number of pixels along an axis of the original image. Assumes square images.
    returns (numpy array): The number of pixels and their percentage of the full image
    """
    setinit.setinitdirs(self)
    char_set = string.ascii_uppercase + string.digits
    if os.path.isdir(image) or os.path.isfile(image):
        if os.path.isdir(image):
            temp_string = ''.join(random.sample(char_set * 8, 8))
            fits = lib.miriad('fits')
            fits.op = 'xyout'
            fits.in_ = image
            with tempfile.TemporaryDirectory() as tempdir:
                fits.out = tempdir + '/' + temp_string + '.fits'
                fits.go()
                mask_data = pyfits.open(tempdir + '/' + temp_string + '.fits')
        elif os.path.isfile(image):
            mask_data = pyfits.open(image)
        else:
            error = 'Image format not supported. Only MIRIAD and FITS formats are supported!'
            logger.error(error)
            raise ApercalException(error)

        data = mask_data[0].data
        maskstats = np.full(2, np.nan)
        maskstats[0] = np.count_nonzero(~np.isnan(data))
        maskstats[1] = maskstats[0] / (size**2)
        mask_data.close()
    else:
        error = 'Image does not seem to exist!'
        logger.error(error)
        raise ApercalException(error)

    return maskstats
Esempio n. 13
0
def getmodelstats(self, image):
    """
    Subroutine to calculate the number of clean components and their flux
    image (string): The absolute path to the image file.
    returns (numpy array): The number of pixels with clean components and their summed flux in Jy
    """
    setinit.setinitdirs(self)
    char_set = string.ascii_uppercase + string.digits
    if os.path.isdir(image) or os.path.isfile(image):
        if os.path.isdir(image):
            temp_string = ''.join(random.sample(char_set * 8, 8))
            fits = lib.miriad('fits')
            fits.op = 'xyout'
            fits.in_ = image
            with tempfile.TemporaryDirectory() as tempdir:
                fits.out = tempdir + '/' + temp_string + '.fits'
                fits.go()
                model_data = pyfits.open(tempdir + '/' + temp_string + '.fits')
        elif os.path.isfile(image):
            model_data = pyfits.open(image)
        else:
            error = 'Image format not supported. Only MIRIAD and FITS formats are supported!'
            logger.error(error)
            raise ApercalException(error)

        data = model_data[0].data[:, 0, :, :]
        modelstats = np.full(2, np.nan)
        modelstats[0] = np.count_nonzero(data)
        modelstats[1] = np.sum(data)
        model_data.close()
    else:
        error = 'Image does not seem to exist!'
        logger.error(error)
        raise ApercalException(error)

    return modelstats
Esempio n. 14
0
def get_source_names(vis=None):
    """
    get_source_names (vis=None)
    Helper function that uses the MIRIAD task UVINDEX to grab the name of the
    sources from a MIRIAD visibility file.
    """
    if vis:
        u = masher(task='uvindex', vis=vis)
        i = [i for i in range(0, len(u)) if "pointing" in u[i]]
        N = len(u)
        s_raw = u[int(i[0] + 2):N - 2]
        sources = []
        for s in s_raw:
            sources.append(s.replace('  ', ' ').split(' ')[0])
        return sources[0:-1]
    else:
        raise ApercalException("get_source_names needs a vis!")
Esempio n. 15
0
def calc_dr_amp(drstart, dr0, minorcycles, function_):
    """
    Function to calculate the dynamic range limits during the amplitude self-calibration
    drstart (float): Dynamic range of the last phase calibration mask
    dr0 (float): Coefficient for increasing the dynamic range threshold at each major cycle
    minorcycles (int): Number of maximum minor cycles during cleaning for amplitude calibration
    function_ (string): The function to follow for increasing the dynamic ranges. Currently 'square', 'power', and 'linear' is supported.
    returns (list of floats): A list of floats for the dynamic range limits within the minor cycles.
    """
    dr_limits = [drstart * np.power(dr0, m) for m in range(2)]
    dr_start = dr_limits[0]
    dr_end = dr_limits[1]
    # The different options to increase the minor cycle threshold
    if function_ == 'square':
        dr_min = [dr_start + ((dr_end - dr_start) * (n ** 2.0)) / ((minorcycles - 1) ** 2.0) for n in
                  range(minorcycles)]
    elif function_ == 'power':
        dr_min = [dr_start + np.power((dr_end - dr_start), (1.0 / n)) for n in range(minorcycles)][
                 ::-1]  # Not exactly need to work on this, but close
    elif function_ == 'linear':
        dr_min = [(dr_start + ((dr_end - dr_start) / (minorcycles - 1)) * n) for n in range(minorcycles)]
    else:
        raise ApercalException(' Function for minor cycles not supported! Exiting!')
    return dr_min
Esempio n. 16
0
    logger.debug("Done getting data from ALTA")


if __name__ == "__main__":
    import doctest

    doctest.testmod()

    logging.basicConfig()

    args = sys.argv
    # Get date
    try:
        date = args[1]
    except Exception:
        raise ApercalException("Date required! Format: YYMMDD e.g. 180309")

    # Get date
    try:
        irange = args[2]
    except Exception:
        raise ApercalException("ID range required! Format: NNN-NNN e.g. 002-010")

    # Get beams
    try:
        brange = args[3]
    except Exception:
        raise ApercalException("Beam range required! Format: NN-NN e.g. 00-37")

    # Get beams
    try:
Esempio n. 17
0
def start_apercal_pipeline(targets,
                           fluxcals,
                           polcals,
                           dry_run=False,
                           basedir=None,
                           flip_ra=False,
                           steps=None,
                           configfilename=None):
    """
    Trigger the start of a fluxcal pipeline. Returns when pipeline is done.
    Example for taskid, name, beamnr: (190108926, '3C147_36', 36)
    Fluxcals and polcals can be specified in the wrong order, if the polcal is not polarised
    they will be flipped.
    If both polcals and fluxcals are set, they should both be the same length.
    A list of config files can be provided, i.e., one for each beam. If a single config file 
    is given, copies of it will be created so that there is one config per beam. If no
    config file is given, the default one is used and copies for each beam are made.

    Args:
        targets (Tuple[int, str, List[int]]): taskid, name, list of beamnrs
        fluxcals (List[Tuple[int, str, int]]): fluxcals: taskid, name, beamnr
        polcals (List[Tuple[int, str, int]]): polcals: taskid, name, beamnr (can be None)
        dry_run (bool): interpret arguments, do not actually run pipeline
        basedir (str): base directory; if not specified will be /data/apertif/{target_taskid}
        flip_ra (bool): flip RA (for old measurement sets where beamweights were flipped)
        steps (List[str]): list of steps to perform
        configfilename (List[str]): Custom configfile (should be full path for now)

    Returns:
        Tuple[Dict[int, List[str]], str], str: Tuple of a dict, the formatted runtime, and possibly
                                          an exception. The dict
                                          contains beam numbers (ints) as keys, a list of failed
                                          steps as values. Failed is defined here as 'threw an
                                          exception', only for target steps. Please also read logs.
    """
    if steps is None:
        steps = [
            "prepare", "split", "preflag", "ccal", "convert", "scal",
            "continuum", "polarisation", "line", "transfer"
        ]

    (taskid_target, name_target, beamlist_target) = targets

    # set the base directory if none was provided
    if not basedir:
        basedir = '/data/apertif/{}/'.format(taskid_target)
    elif len(basedir) > 0 and basedir[-1] != '/':
        basedir = basedir + '/'
    if not os.path.exists(basedir):
        os.mkdir(basedir)

    logfilepath = os.path.join(basedir, 'apercal.log')

    lib.setup_logger('debug', logfile=logfilepath)
    logger = logging.getLogger(__name__)
    gitinfo = subprocess.check_output('cd ' +
                                      os.path.dirname(apercal.__file__) +
                                      '&& git describe --tag; cd',
                                      shell=True).strip()
    logger.info("Apercal version: " + gitinfo)

    logger.info(
        "start_apercal called with arguments targets={}; fluxcals={}; polcals={}"
        .format(targets, fluxcals, polcals))
    logger.info("steps = {}".format(steps))

    # number of beams to process
    n_beams = len(beamlist_target)

    # check the input config file
    # get the default configfile if none was provided
    if not configfilename:
        logger.info("No config file provided, getting default config")
        # create a list of config file name
        configfilename_list = [
            os.path.join(
                basedir,
                "{0}_B{1}_Apercal_settings.cfg".format(taskid_target,
                                                       str(beam).zfill(2)))
            for beam in beamlist_target
        ]
        # get the default config settings
        config = lib.get_default_config()
        # go through the config files and create them
        for beam_index in range(n_beams):
            with open(configfilename_list[beam_index], "w") as fp:
                config.write(fp)
            logger.info("Beam {} config file saved to {}".format(
                beamlist_target[beam_index], configfilename_list[beam_index]))
    # if configfile(s) are given as a list
    elif type(configfilename) is list:
        # if it is just one, create copies for each beam in the base directory
        if len(configfilename) == 1:
            logger.info(
                "A single config file was provided. Creating copies of {}".
                format(configfilename[0]))
            configfilename_list = [
                os.path.join(
                    basedir, "{0}_B{1}_Apercal_settings.cfg".format(
                        taskid_target,
                        str(beam).zfill(2))) for beam in beamlist_target
            ]
            # make the copies
            for config in configfilename_list:
                lib.basher("cp " + str(configfilename[0]) + " " + str(config))
        elif len(configfilename) == n_beams:
            logger.info("Number of config files and target beams match.")
            configfilename_list = configfilename
        else:
            error = "Number of config files and target beams did not match. Abort"
            logger.error(error)
            raise RuntimeError(error)
    # if configfilename is just a string
    elif type(configfilename) is str:
        logger.info(
            "A single config file was provided. Creating copies of {}".format(
                configfilename))
        configfilename_list = [
            os.path.join(
                basedir,
                "{0}_B{1}_Apercal_settings.cfg".format(taskid_target,
                                                       str(beam).zfill(2)))
            for beam in beamlist_target
        ]
        # make the copies
        for config in configfilename_list:
            lib.basher("cp " + str(configfilename) + " " + str(config))
    else:
        error = "Unknown input for configfilename. Abort"
        logger.error(error)
        raise RuntimeError(error)

    status = pymp.shared.dict({beamnr: [] for beamnr in beamlist_target})

    if fluxcals:
        name_fluxcal = str(fluxcals[0][1]).strip().split('_')[0].upper()
    else:
        name_fluxcal = ''
    if polcals:
        name_polcal = str(polcals[0][1]).strip().split('_')[0].upper()
    else:
        name_polcal = ''
    name_target = str(name_target).strip()  # .upper()

    # If both fluxcal and polcal polarized, remove polcal
    if subs_calmodels.is_polarised(
            name_polcal) and subs_calmodels.is_polarised(name_fluxcal):
        name_polcal = ""

    if (fluxcals and fluxcals != '') and (polcals and polcals != ''):
        assert (len(fluxcals) == len(polcals))

    # avoid symmetry bias, if there is only a polcal but no fluxcal, switch them
    if fluxcals is None and polcals is not None:
        logger.info(
            "Only polcal was provided. Setting polcal {} to fluxcal".format(
                name_polcal))
        fluxcals, polcals = polcals, fluxcals
        name_polcal = ""
    # Exchange polcal and fluxcal if specified in the wrong order
    elif not subs_calmodels.is_polarised(name_polcal) and name_polcal != '':
        if subs_calmodels.is_polarised(name_fluxcal):
            logger.info("Switching polcal and fluxcal because " + name_polcal +
                        " is not polarised")
            fluxcals, polcals = polcals, fluxcals
            name_polcal = str(polcals[0][1]).strip()
        else:
            logger.info("Setting polcal to '' since " + name_polcal +
                        " is not polarised")
            name_polcal = ""
    elif name_polcal != '':
        logger.info("Polcal " + name_polcal + " is polarised, all good")

    def name_to_ms(name):
        if not name:
            return ''
        elif '3C' in name:
            return name.upper().strip().split('_')[0] + '.MS'
        else:
            return name + '.MS'

    def name_to_mir(name):
        if not name:
            return ''
        elif '3C' in name:
            return name.upper().strip().split('_')[0] + '.mir'
        else:
            return name + '.mir'

    def set_files(p):
        """
        Set the basedir, fluxcal, polcal, target properties

        Args:
            p (BaseModule): apercal step object (e.g. prepare)

        Returns:
            None
        """

        p.basedir = basedir
        p.fluxcal = name_to_ms(name_fluxcal)
        p.polcal = name_to_ms(name_polcal)
        p.target = name_to_ms(name_target)

        # debug_msg = """
        # p.basedir = basedir = {0};
        # p.fluxcal = name_to_ms(name_fluxcal) = {1};
        # p.polcal = name_to_ms(name_polcal) = {2};
        # p.target = name_to_ms(name_target) = {3};
        # """.format(basedir, name_to_ms(name_fluxcal), name_to_ms(name_polcal), name_to_ms(name_target))
        # logger.debug(debug_msg)

    beamnrs_fluxcal = [f[2] for f in fluxcals]
    if len(fluxcals) > 1:
        # Check every target beam has a fluxcal beam
        for beamnr_target in beamlist_target:
            assert (beamnr_target in beamnrs_fluxcal)

    # creating a copy of the target beamlist as a normal array
    # to avoid using np.where() for such a small thing
    if type(beamlist_target) == np.ndarray:
        beamlist_target_for_config = beamlist_target.tolist()
    else:
        beamlist_target_for_config = beamlist_target

    time_start = time()
    try:
        # =======
        # Prepare
        # =======

        # keep a start-finish record of step in the main log file
        if "prepare" in steps:
            logger.info("Running prepare")
            start_time_prepare = time()
        else:
            logger.info("Skipping prepare")

        # Prepare fluxcals
        for (taskid_fluxcal, name_fluxcal, beamnr_fluxcal) in fluxcals:
            p0 = prepare(file_=configfilename_list[
                beamlist_target_for_config.index(beamnr_fluxcal)])
            p0.basedir = basedir
            #set_files(p0)
            p0.prepare_flip_ra = flip_ra
            # the following two need to be empty strings for prepare
            p0.fluxcal = ''
            p0.polcal = ''
            p0.target = name_to_ms(name_fluxcal)
            p0.prepare_target_beams = str(beamnr_fluxcal)
            p0.prepare_date = str(taskid_fluxcal)[:6]
            p0.prepare_obsnum_target = validate_taskid(taskid_fluxcal)
            if "prepare" in steps and not dry_run:
                try:
                    p0.go()
                except Exception as e:
                    logger.warning("Prepare failed for fluxcal " +
                                   str(taskid_fluxcal) + " beam " +
                                   str(beamnr_fluxcal))
                    logger.exception(e)

        if 'prepare' in steps:
            # copy the param file generated here
            param_file = os.path.join(basedir, 'param.npy')
            director(p0,
                     'rn',
                     param_file.replace(
                         ".npy",
                         "_prepare_{}.npy".format(name_fluxcal.split('_')[0])),
                     file_=param_file,
                     ignore_nonexistent=True)

        # Prepare polcals
        if name_polcal != '':
            for (taskid_polcal, name_polcal, beamnr_polcal) in polcals:
                p0 = prepare(file_=configfilename_list[
                    beamlist_target_for_config.index(beamnr_polcal)])
                p0.basedir = basedir
                #set_files(p0)
                p0.prepare_flip_ra = flip_ra
                # the following two need to be empty strings for prepare
                p0.fluxcal = ''
                p0.polcal = ''
                p0.target = name_to_ms(name_polcal)
                p0.prepare_target_beams = str(beamnr_polcal)
                p0.prepare_date = str(taskid_polcal)[:6]
                p0.prepare_obsnum_target = validate_taskid(taskid_polcal)
                if "prepare" in steps and not dry_run:
                    try:
                        p0.go()
                    except Exception as e:
                        logger.warning("Prepare failed for polcal " +
                                       str(taskid_polcal) + " beam " +
                                       str(beamnr_polcal))
                        logger.exception(e)

            if 'prepare' in steps:
                # copy the param file generated here
                param_file = os.path.join(basedir, 'param.npy')
                director(p0,
                         'rn',
                         param_file.replace(
                             ".npy", "_prepare_{}.npy".format(
                                 name_polcal.split('_')[0])),
                         file_=param_file,
                         ignore_nonexistent=True)

        # Prepare target
        for beamnr in beamlist_target:
            p0 = prepare(file_=configfilename_list[
                beamlist_target_for_config.index(beamnr)])
            p0.basedir = basedir
            # set_files(p0)
            p0.prepare_flip_ra = flip_ra
            # the following two need to be empty strings for prepare
            p0.fluxcal = ''
            p0.polcal = ''
            p0.target = name_to_ms(name_target)
            p0.prepare_date = str(taskid_target)[:6]
            p0.prepare_obsnum_target = validate_taskid(taskid_target)
            p0.prepare_target_beams = ','.join(
                ['{:02d}'.format(beamnr) for beamnr in beamlist_target])
            if "prepare" in steps and not dry_run:
                try:
                    p0.go()
                except Exception as e:
                    logger.warning("Prepare failed for target " +
                                   str(taskid_target) + " beam " + str(beamnr))
                    logger.exception(e)
                    status[beamnr] += ['prepare']

        # keep a start-finish record of step in the main log file
        if "prepare" in steps:
            logger.info("Running prepare ... Done ({0:.0f}s)".format(
                time() - start_time_prepare))

            # copy the param file generated here
            param_file = os.path.join(basedir, 'param.npy')
            director(p0,
                     'rn',
                     param_file.replace(".npy",
                                        "_prepare_{}.npy".format(name_target)),
                     file_=param_file,
                     ignore_nonexistent=True)

        # =====
        # Split
        # =====

        # keep a start-finish record of step in the main log file
        if 'split' in steps:
            logger.info("Running split")
            start_time_split = time()
        else:
            logger.info("Skipping split")

        # Splitting a small chunk of data for quicklook pipeline
        # at the moment it all relies on the target beams
        # what if there are more calibrator than target beams-> realistic?
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))
                try:
                    s0 = split(file_=configfilename_list[beam_index])
                    set_files(s0)
                    s0.beam = "{:02d}".format(beamnr)
                    if "split" in steps and not dry_run:
                        s0.go()
                except Exception as e:
                    logger.warning("Split failed for {0} beam {1}".format(
                        str(taskid_target), str(beamnr)))
                    logger.exception(e)
                    # not sure if following line is necessary
                    status[beamnr] += ['split']

        # keep a start-finish record of step in the main log file
        if "split" in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)
            logger.info(
                "Running split ... Done ({0:.0f}s)".format(time() -
                                                           start_time_split))

            # copy the param file generated here
            # param_file = os.path.join(basedir, 'param.npy')
            # director(
            #     p0, 'rn', param_file.replace(".npy", "_split.npy"), file_=param_file, ignore_nonexistent=True)

        # =======
        # Preflag
        # =======

        # keep a record of the parallalised step in the main log file
        if "preflag" in steps:
            logger.info("Running preflag")
            start_time_preflag = time()
        else:
            logger.info("Skipping preflag")

        # In order to run in parallel, the bandpass table needs to exists
        # doing it here is not elegant but requires the least amount of changes
        # to preflage
        # with pymp.Parallel(10) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]
        #         # individual logfiles for each process
        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         logger.debug("Starting logfile for beam " + str(beamnr))
        #         p1 = preflag(filename=configfilename)
        #         p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
        #         p1.basedir = basedir
        #         p1.fluxcal = ''
        #         p1.polcal = ''
        #         p1.target = name_to_ms(name_fluxcal)

        #         p1.beam = "{:02d}".format(beamnr)
        #         p1.preflag_targetbeams = "{:02d}".format(beamnr)
        #         if "preflag" in steps and not dry_run:
        #             try:
        #                 bandpass_start_time = time()
        #                 logger.info("Running aoflagger bandpass for flux calibrator {0} in beam {1}".format(
        #                     p1.target, p1.beam))
        #                 # director(
        #                 #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
        #                 p1.go()
        #                 # director(p1, 'rm', basedir + '/param.npy',
        #                 #         ignore_nonexistent=True)

        #                 # it is necessary to move the param files in order to keep them
        #                 param_file = basedir + \
        #                     '/param_{:02d}.npy'.format(beamnr)
        #                 director(
        #                     p1, 'mv', param_file, file_=param_file.replace(".npy", "_preflag_{0}.npy".format(name_fluxcal)), ignore_nonexistent=True)

        #                 p1.aoflagger_bandpass()
        #             except Exception as e:
        #                 logger.warning("Running aoflagger bandpass for flux calibrator {0} in beam {1} ... Failed ({2:.0f}s)".format(
        #                     p1.target, p1.beam, time() - bandpass_start_time))
        #                 logger.exception(e)
        #                 status[beamnr] += ['preflag_bandpass']
        #             else:
        #                 logger.info("Running aoflagger bandpass for flux calibrator {0} in beam {1} ... Done ({2:.0f}s)".format(
        #                     p1.target, p1.beam, time() - bandpass_start_time))

        # Flag fluxcal (pretending it's a target, parallelised version)
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    p1.fluxcal = ''
                    p1.polcal = ''
                    p1.target = name_to_ms(name_fluxcal)
                    p1.beam = "{:02d}".format(beamnr)
                    p1.preflag_targetbeams = "{:02d}".format(beamnr)
                    if beam_index < 2:
                        p1.preflag_aoflagger_threads = 9
                    else:
                        p1.preflag_aoflagger_threads = 10
                    if "preflag" in steps and not dry_run:
                        logger.info(
                            "Running preflag for flux calibrator {0} in beam {1}"
                            .format(p1.target, p1.beam))
                        preflag_flux_cal_start_time = time()
                        # director(
                        #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p1.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p1,
                                 'rn',
                                 param_file.replace(
                                     ".npy", "_preflag_{0}.npy".format(
                                         name_fluxcal.split('_')[0])),
                                 file_=param_file,
                                 ignore_nonexistent=True)

                        logger.info(
                            "Running preflag for flux calibrator {0} in beam {1} ... Done ({2:.0f}s)"
                            .format(p1.target, p1.beam,
                                    time() - preflag_flux_cal_start_time))
                except Exception as e:
                    logger.warning(
                        "Running preflag for flux calibrator {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_flux_cal_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # Flag polcal (pretending it's a target, parallel version)
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    # remove next line in final version
                    p1.preflag_aoflagger_version = 'local'
                    p1.basedir = basedir
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    if name_polcal != '':
                        p1.fluxcal = ''
                        p1.polcal = ''
                        p1.target = name_to_ms(name_polcal)
                        p1.beam = "{:02d}".format(beamnr)
                        p1.preflag_targetbeams = "{:02d}".format(beamnr)
                        if beam_index < 2:
                            p1.preflag_aoflagger_threads = 9
                        else:
                            p1.preflag_aoflagger_threads = 10
                        if "preflag" in steps and not dry_run:
                            logger.info(
                                "Running preflag for pol calibrator {0} in beam {1}"
                                .format(p1.target, p1.beam))
                            preflag_pol_cal_start_time = time()
                            # director(
                            #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                            p1.go()

                            # it is necessary to move the param files in order to keep them
                            param_file = os.path.join(
                                basedir, 'param_{:02d}.npy'.format(beamnr))
                            director(p1,
                                     'rn',
                                     param_file.replace(
                                         ".npy", "_preflag_{0}.npy".format(
                                             name_polcal.split('_')[0])),
                                     file_=param_file,
                                     ignore_nonexistent=True)

                            logger.info(
                                "Running preflag for pol calibrator {0} in beam {1} ... Done ({2:.0f}s)"
                                .format(p1.target, p1.beam,
                                        time() - preflag_pol_cal_start_time))
                except Exception as e:
                    logger.warning(
                        "Running preflag for pol calibrator {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_pol_cal_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # Flag target
        # 5 in parallel
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                # individual logfiles for each process
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))

                try:
                    p1 = preflag(filename=configfilename_list[beam_index])
                    # remove next line in final version
                    p1.preflag_aoflagger_version = 'local'
                    p1.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p1.basedir = basedir
                    p1.fluxcal = ''
                    p1.polcal = ''
                    p1.target = name_to_ms(name_target)
                    p1.beam = "{:02d}".format(beamnr)
                    p1.preflag_targetbeams = "{:02d}".format(beamnr)
                    if beam_index < 2:
                        p1.preflag_aoflagger_threads = 9
                    else:
                        p1.preflag_aoflagger_threads = 10
                    if "preflag" in steps and not dry_run:
                        logger.info(
                            "Running preflag for target {0} in beam {1}".
                            format(p1.target, p1.beam))
                        preflag_target_start_time = time()
                        # director(
                        #     p1, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p1.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p1,
                                 'rn',
                                 param_file.replace(
                                     ".npy",
                                     "_preflag_{0}.npy".format(name_target)),
                                 file_=param_file,
                                 ignore_nonexistent=True)

                        logger.info(
                            "Running preflag for target {0} in beam {1} ... Done ({2:.0f}s)"
                            .format(p1.target, p1.beam,
                                    time() - preflag_target_start_time))
                except Exception as e:
                    logger.info(
                        "Running preflag for target {0} in beam {1} ... Failed ({2:.0f}s)"
                        .format(p1.target, p1.beam,
                                time() - preflag_target_start_time))
                    logger.exception(e)
                    status[beamnr] += ['preflag']

        # keep a record of the parallalised step in the main log file
        if "preflag" in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running preflag ... Done ({0:.0f}s)".format(
                time() - start_time_preflag))

        # ===============
        # Crosscal
        # ===============

        # keep a record of the parallalised step in the main log file
        if 'ccal' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running crosscal")
            start_time_crosscal = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping crosscal")

        if len(fluxcals) == 1 and fluxcals[0][-1] == 0 and n_beams > 1:
            raise ApercalException(
                "Sorry, one fluxcal is not supported anymore at the moment")

        with pymp.Parallel(10) as p:
            for beam_index in p.range(n_beams):

                beamnr = beamlist_target[beam_index]
                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                logger.debug("Starting logfile for beam " + str(beamnr))
                try:
                    p2 = ccal(file_=configfilename_list[beam_index])
                    p2.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    set_files(p2)
                    p2.beam = "{:02d}".format(beamnr)
                    p2.crosscal_transfer_to_target_targetbeams = "{:02d}".format(
                        beamnr)
                    if "ccal" in steps and not dry_run:
                        # director(
                        #     p2, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p2.go()
                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p2,
                                 'rn',
                                 param_file.replace(".npy", "_crosscal.npy"),
                                 file_=param_file,
                                 ignore_nonexistent=True)
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from crosscal".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['crosscal']

        # keep a record of the parallalised step in the main log file
        if 'ccal' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running crosscal ... Done ({0:.0f}s)".format(
                time() - start_time_crosscal))

        # =======
        # Convert
        # =======

        # keep a record of the parallalised step in the main log file
        if 'convert' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running convert")
            start_time_convert = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping convert")

        # 5 threads to not hammer the disks too much, convert is only IO
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p3 = convert(file_=configfilename_list[beam_index])
                    p3.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    set_files(p3)
                    p3.beam = "{:02d}".format(beamnr)
                    p3.convert_targetbeams = "{:02d}".format(beamnr)
                    if "convert" in steps and not dry_run:
                        # director(
                        #     p3, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p3.go()

                        # it is necessary to move the param files in order to keep them
                        param_file = os.path.join(
                            basedir, 'param_{:02d}.npy'.format(beamnr))
                        director(p3,
                                 'rn',
                                 param_file.replace(".npy", "_convert.npy"),
                                 file_=param_file,
                                 ignore_nonexistent=True)
                        # director(
                        #     p3, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                except Exception as e:
                    logger.warning(
                        "Failed beam {}, skipping that from convert".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['convert']

        if 'convert' in steps:
            # keep a record of the parallalised step in the main log file
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running convert ... Done ({0:.0f}s)".format(
                time() - start_time_convert))

        # ==================================
        # Selfcal + Continuum + Polarisation
        # ==================================

        # keep a record of the parallalised step in the main log file
        if 'scal' in steps or 'continuum' in steps or 'polarisation' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running selfcal and/or continuum and/or polarisation")
            start_time_selfcal_continuum_polarisation = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Skipping selfcal and continuum and polarisation")

        with pymp.Parallel(10) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p4 = scal(file_=configfilename_list[beam_index])
                    p4.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p4.basedir = basedir
                    p4.beam = "{:02d}".format(beamnr)
                    p4.target = name_target + '.mir'
                    if "scal" in steps and not dry_run:
                        p4.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from scal".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['scal']

                try:
                    p5 = continuum(file_=configfilename_list[beam_index])
                    p5.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p5.basedir = basedir
                    p5.beam = "{:02d}".format(beamnr)
                    p5.target = name_target + '.mir'
                    if "continuum" in steps and not dry_run:
                        p5.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from continuum".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['continuum']

                try:
                    p6 = polarisation(file_=configfilename_list[beam_index])
                    p6.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p6.basedir = basedir
                    p6.beam = "{:02d}".format(beamnr)
                    p6.polcal = name_to_mir(name_polcal)
                    p6.target = name_to_mir(name_target)
                    if "polarisation" in steps and not dry_run:
                        p6.go()
                except Exception as e:
                    # Exception was already logged just before
                    logger.warning(
                        "Failed beam {}, skipping that from polarisation".
                        format(beamnr))
                    logger.exception(e)
                    status[beamnr] += ['polarisation']

        # keep a record of the parallalised step in the main log file
        if 'scal' in steps or 'continuum' in steps or 'polarisation' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info(
                "Running selfcal and/or continuum and/or polarisation ... Done ({0:.0f}s)"
                .format(time() - start_time_selfcal_continuum_polarisation))

        # ====
        # Line
        # ====

        # keep a record of the parallalised step in the main log file
        if 'line' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Running line")
            start_time_line = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Skipping line")

        for beamnr in beamlist_target:

            # Because of the amount of information coming from line
            # this module gets its own logfile
            logfilepath = os.path.join(basedir,
                                       'apercal{:02d}_line.log'.format(beamnr))
            lib.setup_logger('debug', logfile=logfilepath)
            try:
                p7 = line(file_=configfilename_list[
                    beamlist_target_for_config.index(beamnr)])
                if beamnr not in p7.line_beams:
                    logger.debug(
                        "Skipping line imaging for beam {}".format(beamnr))
                    continue
                p7.basedir = basedir
                p7.beam = "{:02d}".format(beamnr)
                p7.target = name_target + '.mir'
                if "line" in steps and not dry_run:
                    p7.go()
            except Exception as e:
                # Exception was already logged just before
                logger.warning(
                    "Failed beam {}, skipping that from line".format(beamnr))
                logger.exception(e)
                status[beamnr] += ['line']

        # with pymp.Parallel(5) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]

        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         try:
        #             p7 = line(file_=configfilename)
        #             if beamnr not in p7.line_beams:
        #                 logger.debug(
        #                     "Skipping line imaging for beam {}".format(beamnr))
        #                 continue
        #             p7.basedir = basedir
        #             p7.beam = "{:02d}".format(beamnr)
        #             p7.target = name_target + '.mir'
        #             if "line" in steps and not dry_run:
        #                 p7.go()
        #         except Exception as e:
        #             # Exception was already logged just before
        #             logger.warning(
        #                 "Failed beam {}, skipping that from line".format(beamnr))
        #             logger.exception(e)
        #             status[beamnr] += ['line']

        # keep a record of the parallalised step in the main log file
        if 'line' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info(
                "Running line ... Done ({0:.0f}s)".format(time() -
                                                          start_time_line))

        # ========
        # Transfer
        # ========

        # keep a record of the parallalised step in the main log file
        if 'transfer' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Running transfer")
            start_time_transfer = time()
        else:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger.info("Skipping transfer")

        # 5 threads to not hammer the disks too much during copying
        with pymp.Parallel(5) as p:
            for beam_index in p.range(n_beams):
                beamnr = beamlist_target[beam_index]

                logfilepath = os.path.join(basedir,
                                           'apercal{:02d}.log'.format(beamnr))
                lib.setup_logger('debug', logfile=logfilepath)
                logger = logging.getLogger(__name__)

                try:
                    p8 = transfer(file_=configfilename_list[beam_index])
                    p8.paramfilename = 'param_{:02d}.npy'.format(beamnr)
                    p8.basedir = basedir
                    p8.target = name_target + '.mir'
                    p8.beam = "{:02d}".format(beamnr)
                    if "transfer" in steps and not dry_run:
                        # director(
                        #     p8, 'rm', basedir + '/param_{:02d}.npy'.format(beamnr), ignore_nonexistent=True)
                        p8.go()
                except Exception as e:
                    logger.warning(
                        "Failed beam {}, skipping that from transfer".format(
                            beamnr))
                    logger.exception(e)
                    status[beamnr] += ['transfer']

        # keep a record of the parallalised step in the main log file
        if 'transfer' in steps:
            logfilepath = os.path.join(basedir, 'apercal.log')
            lib.setup_logger('debug', logfile=logfilepath)
            logger = logging.getLogger(__name__)

            logger.info("Running transfer ... Done ({0:.0f}s)".format(
                time() - start_time_transfer))

        # Polarisation
        # ============
        # keep a record of the parallalised step in the main log file
        # if 'polarisation' in steps:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Running polarisation")
        #     start_time_polarisation = time()
        # else:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Skipping polarisation")

        # with pymp.Parallel(5) as p:
        #     for beam_index in p.range(n_beams):
        #         beamnr = beamlist_target[beam_index]

        #         logfilepath = os.path.join(
        #             basedir, 'apercal{:02d}.log'.format(beamnr))
        #         lib.setup_logger('debug', logfile=logfilepath)
        #         logger = logging.getLogger(__name__)

        #         try:
        #             p7 = polarisation(file_=configfilename)
        #             p7.paramfilename = 'param_{:02d}.npy'.format(beamnr)
        #             p7.basedir = basedir
        #             p7.beam = "{:02d}".format(beamnr)
        #             p7.target = name_to_mir(name_target)
        #             if "polarisation" in steps and not dry_run:
        #                 p7.go()
        #         except Exception as e:
        #             # Exception was already logged just before
        #             logger.warning(
        #                 "Failed beam {}, skipping that from polarisation".format(beamnr))
        #             logger.exception(e)
        #             status[beamnr] += ['polarisation']

        # # keep a record of the parallalised step in the main log file
        # if 'polarisation' in steps:
        #     logfilepath = os.path.join(basedir, 'apercal.log')
        #     lib.setup_logger('debug', logfile=logfilepath)
        #     logger = logging.getLogger(__name__)

        #     logger.info("Running polarisation ... Done ({0:.0f}s)".format(
        #         time() - start_time_polarisation))

        # if "ccalqa" in steps and not dry_run:
        #     logger.info("Starting crosscal QA plots")
        #     try:
        #         make_all_ccal_plots(
        #             taskid_target, name_fluxcal.upper().strip().split('_')[0])
        #     except Exception as e:
        #         logger.warning("Failed crosscal QA plots")
        #         logger.exception(e)
        #     logger.info("Done with crosscal QA plots")

        status = status.copy()  # Convert pymp shared dict to a normal one
        msg = "Apercal finished after " + \
            str(timedelta(seconds=time() - time_start))
        logger.info(msg)
        return status, str(timedelta(seconds=time() - time_start)), None
    except Exception as e:
        msg = "Apercal threw an error after " + \
            str(timedelta(seconds=time() - time_start))
        logger.exception(msg)
        return status, str(timedelta(seconds=time() - time_start)), str(e)
Esempio n. 18
0
    def ms2miriad(self):
        """
        Converts the data from MS to MIRIAD format via UVFITS using drivecasa. Does it for the flux calibrator,
        polarisation calibrator, and target field independently.
        """
        subs_setinit.setinitdirs(self)

        ccalbeam = 'ccal_B' + str(self.beam).zfill(2)
        cbeam = 'convert_B' + str(self.beam).zfill(2)

        # Read the parameters from crosscal
        # and check before doing anything

        # Status of the solution transfer for the target, flux calibrator and polarisation calibrator
        ccal_targetbeams_transfer = get_param_def(
            self, ccalbeam + '_targetbeams_transfer', False)
        ccal_calibration_calibrator_finished = get_param_def(
            self, ccalbeam + '_calibration_calibrator_finished', False)

        if not ccal_calibration_calibrator_finished:
            error = "Beam {}: Will not convert files to miriad format because cross-calibration failed.".format(
                str(self.beam).zfill(2))
            logger.error(error)
            raise ApercalException(error)
        elif not ccal_targetbeams_transfer:
            error = "Beam {}: Will not convert files to miriad format because cross-calibration solutions were not successfully applied to target.".format(
                str(self.beam).zfill(2))
            logger.error(error)
            raise ApercalException(error)

        # Create the parameters for the parameter file for converting from MS to UVFITS format

        # Flux calibrator MS dataset available?
        convertfluxcalmsavailable = get_param_def(
            self, cbeam + '_fluxcal_MSavailable', False)

        # Polarised calibrator MS dataset available?
        convertpolcalmsavailable = get_param_def(self,
                                                 cbeam + '_polcal_MSavailable',
                                                 False)

        # Target beam MS dataset available?
        converttargetbeamsmsavailable = get_param_def(
            self, cbeam + '_targetbeams_MSavailable', False)

        # Flux calibrator MS dataset converted to UVFITS?
        convertfluxcalms2uvfits = get_param_def(self,
                                                cbeam + '_fluxcal_MS2UVFITS',
                                                False)

        # Polarised calibrator MS dataset converted to UVFITS?
        convertpolcalms2uvfits = get_param_def(self,
                                               cbeam + '_polcal_MS2UVFITS',
                                               False)

        # Target beam MS dataset converted to UVFITS?
        converttargetbeamsms2uvfits = get_param_def(
            self, cbeam + '_targetbeams_MS2UVFITS', False)

        # Flux calibrator UVFITS dataset available?
        convertfluxcaluvfitsavailable = get_param_def(
            self, cbeam + '_fluxcal_UVFITSavailable', False)

        # Polarised calibrator UVFITS dataset available?
        convertpolcaluvfitsavailable = get_param_def(
            self, cbeam + '_polcal_UVFITSavailable', False)

        # Target beam UVFITS dataset available?
        converttargetbeamsuvfitsavailable = get_param_def(
            self, cbeam + '_targetbeams_UVFITSavailable', False)

        # Flux calibrator UVFITS dataset converted to MIRIAD?
        convertfluxcaluvfits2miriad = get_param_def(
            self, cbeam + '_fluxcal_UVFITS2MIRIAD', False)

        # Polarised calibrator UVFITS dataset converted to MIRIAD?
        convertpolcaluvfits2miriad = get_param_def(
            self, cbeam + '_polcal_UVFITS2MIRIAD', False)

        # Target beam UVFITS dataset converted to MIRIAD?
        converttargetbeamsuvfits2miriad = get_param_def(
            self, cbeam + '_targetbeams_UVFITS2MIRIAD', False)

        # Check which datasets are available in MS format #
        if self.fluxcal != '':
            convertfluxcalmsavailable = path.isdir(self.get_fluxcal_path())
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Flux calibrator dataset not specified. Cannot convert flux calibrator!'
            )
        if self.polcal != '':
            convertpolcalmsavailable = path.isdir(self.get_polcal_path())
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Polarised calibrator dataset not specified. Cannot convert polarised calibrator!'
            )
        if self.target != '':
            converttargetbeamsmsavailable = path.isdir(self.get_target_path())
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Target beam dataset not specified. Cannot convert target beams!'
            )

        # Save the derived parameters for the availability to the parameter file

        subs_param.add_param(self, cbeam + '_fluxcal_MSavailable',
                             convertfluxcalmsavailable)
        subs_param.add_param(self, cbeam + '_polcal_MSavailable',
                             convertpolcalmsavailable)
        subs_param.add_param(self, cbeam + '_targetbeams_MSavailable',
                             converttargetbeamsmsavailable)

        # Convert the flux calibrator
        if self.convert_fluxcal:
            if self.fluxcal != '':
                if not convertfluxcaluvfits2miriad:
                    if convertfluxcalmsavailable:
                        logger.debug(
                            'Beam ' + self.beam +
                            ': Converting flux calibrator dataset from MS to UVFITS format.'
                        )
                        subs_managefiles.director(
                            self,
                            'mk',
                            self.get_crosscalsubdir_path(),
                            verbose=False)
                        fluxcal_ms = self.get_fluxcal_path()

                        # convert only if corrected data column exists
                        if subs_msutils.has_correcteddata(fluxcal_ms):
                            datacolumn = "corrected"

                            fluxcal_fits = mspath_to_fitspath(
                                self.get_crosscalsubdir_path(), fluxcal_ms)

                            fc_convert = exportuvfits_cmd.format(
                                vis=self.get_fluxcal_path(),
                                fits=fluxcal_fits,
                                datacolumn=datacolumn)

                            lib.run_casa([fc_convert], timeout=3600)
                            if path.isfile(fluxcal_fits):
                                convertfluxcalms2uvfits = True
                                logger.info(
                                    'Beam ' + self.beam +
                                    ': Converted flux calibrator dataset from MS to UVFITS format!'
                                )
                            else:
                                convertfluxcalms2uvfits = False
                                logger.warning(
                                    'Beam ' + self.beam +
                                    ': Could not convert flux calibrator dataset {} '
                                    'from MS to UVFITS format!'.format(
                                        fluxcal_fits))
                        else:
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Flux calibrator does not have a corrected_data column! Not '
                                'converting flux calibrator dataset!')
                    else:
                        logger.warning(
                            'Beam ' + self.beam +
                            ': Flux calibrator dataset {} not available!'.
                            format(self.get_fluxcal_path()))
                else:
                    logger.info(
                        'Beam ' + self.beam +
                        ': Flux calibrator dataset was already converted from MS to UVFITS format'
                    )
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Flux calibrator dataset not specified. Cannot convert flux calibrator!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting flux calibrator dataset!')

        # Convert the polarised calibrator
        if self.convert_polcal:
            if self.polcal != '':
                if not convertpolcaluvfits2miriad:
                    if convertpolcalmsavailable:
                        logger.debug(
                            'Beam ' + self.beam +
                            ': Converting polarised calibrator dataset from MS to UVFITS format.'
                        )
                        subs_managefiles.director(
                            self,
                            'mk',
                            self.get_crosscalsubdir_path(),
                            verbose=False)
                        polcal_ms = self.get_polcal_path()

                        # convert only if corrected data column exists
                        if subs_msutils.has_correcteddata(polcal_ms):
                            datacolumn = "corrected"

                            polcal_fits = mspath_to_fitspath(
                                self.get_crosscalsubdir_path(), polcal_ms)

                            pc_convert = exportuvfits_cmd.format(
                                vis=polcal_ms,
                                fits=polcal_fits,
                                datacolumn=datacolumn)

                            lib.run_casa([pc_convert], timeout=3600)
                            if path.isfile(polcal_fits):
                                convertpolcalms2uvfits = True
                                logger.info(
                                    'Beam ' + self.beam +
                                    ': Converted polarised calibrator dataset from MS to UVFITS format!'
                                )
                            else:
                                convertpolcalms2uvfits = False
                                logger.warning(
                                    'Beam ' + self.beam +
                                    ': Could not convert polarised calibrator dataset from MS to UVFITS format!'
                                )
                        else:
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Polarised calibrator does not have a corrected_data column! Not '
                                'converting polarised calibrator dataset!')

                    else:
                        logger.warning(
                            'Beam ' + self.beam +
                            ': Polarised calibrator dataset not available!')
                else:
                    logger.info(
                        'Beam ' + self.beam +
                        ': Polarised calibrator dataset was already converted from MS to UVFITS format'
                    )
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Polarised calibrator dataset not specified. Cannot convert polarised calibrator!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting polarised calibrator dataset!')

        # Convert the target beams
        if self.convert_target:
            if self.target != '':
                logger.info(
                    'Beam ' + self.beam +
                    ': Converting target beam dataset from MS to UVFITS format.'
                )
                if not converttargetbeamsuvfits2miriad:
                    if converttargetbeamsmsavailable:
                        subs_managefiles.director(
                            self,
                            'mk',
                            self.get_crosscalsubdir_path(),
                            verbose=False)

                        target_ms = self.get_target_path()
                        target_fits = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(), target_ms)

                        # only convert if corrected data column exists
                        if subs_msutils.has_correcteddata(target_ms):
                            datacolumn = "corrected"

                            tg_convert = exportuvfits_cmd.format(
                                vis=target_ms,
                                fits=target_fits,
                                datacolumn=datacolumn)

                            lib.run_casa([tg_convert], timeout=10000)
                            if path.isfile(target_fits):
                                converttargetbeamsms2uvfits = True
                                logger.debug(
                                    'Beam ' + self.beam +
                                    ': Converted dataset of target beam  from MS to UVFITS format!'
                                )
                            else:
                                converttargetbeamsms2uvfits = False
                                logger.warning(
                                    'Beam ' + self.beam +
                                    ': Could not convert dataset for target beam from MS to UVFITS format!'
                                )
                        else:
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Target beam dataset does not have a corrected_data column! Not '
                                'converting target beam dataset!')

                    else:
                        logger.warning('Beam ' + self.beam +
                                       ': Target beam dataset not available!')
                else:
                    logger.info('Beam ' + self.beam +
                                ': Target beam dataset was already '
                                'converted from MS to UVFITS format')
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Target beam dataset not specified. Cannot convert target beam dataset!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting target beam dataset!')

        # Save the derived parameters for the MS to UVFITS conversion to the parameter file

        subs_param.add_param(self, cbeam + '_fluxcal_MS2UVFITS',
                             convertfluxcalms2uvfits)
        subs_param.add_param(self, cbeam + '_polcal_MS2UVFITS',
                             convertpolcalms2uvfits)
        subs_param.add_param(self, cbeam + '_targetbeams_MS2UVFITS',
                             converttargetbeamsms2uvfits)

        # Check which datasets are available in UVFITS format #
        if self.fluxcal != '':
            crosscal_fluxcal = mspath_to_fitspath(
                self.get_crosscalsubdir_path(), self.fluxcal)
            convertfluxcaluvfitsavailable = path.isfile(crosscal_fluxcal)
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Flux calibrator dataset not specified. Cannot convert flux calibrator!'
            )
        if self.polcal != '':
            crosscal_polcal = mspath_to_fitspath(
                self.get_crosscalsubdir_path(), self.polcal)
            convertpolcaluvfitsavailable = path.isfile(crosscal_polcal)
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Polarised calibrator dataset not specified. Cannot convert polarised calibrator!'
            )
        if self.target != '':
            crosscal_target = mspath_to_fitspath(
                self.get_crosscalsubdir_path(), self.target)
            converttargetbeamsuvfitsavailable = path.isfile(crosscal_target)
        else:
            logger.warning(
                'Beam ' + self.beam +
                ': Target beam dataset not specified. Cannot convert target beam!'
            )

        # Save the derived parameters for the availability to the parameter file

        subs_param.add_param(self, cbeam + '_fluxcal_UVFITSavailable',
                             convertfluxcaluvfitsavailable)
        subs_param.add_param(self, cbeam + '_polcal_UVFITSavailable',
                             convertpolcaluvfitsavailable)
        subs_param.add_param(self, cbeam + '_targetbeams_UVFITSavailable',
                             converttargetbeamsuvfitsavailable)

        # Convert the available UVFITS-datasets to MIRIAD format #

        # Convert the flux calibrator
        if self.convert_fluxcal:
            if self.fluxcal != '':
                if not convertfluxcaluvfits2miriad:
                    if convertfluxcaluvfitsavailable:
                        logger.debug(
                            'Beam ' + self.beam +
                            ': Converting flux calibrator dataset from UVFITS to MIRIAD format.'
                        )
                        subs_managefiles.director(
                            self,
                            'ch',
                            self.get_crosscalsubdir_path(),
                            verbose=False)
                        fits = lib.miriad('fits')
                        fits.op = 'uvin'
                        fits.in_ = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(), self.fluxcal)
                        fits.out = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(),
                            self.fluxcal,
                            ext='mir')
                        fits.go()
                        if path.isdir(fits.out):
                            convertfluxcaluvfits2miriad = True
                            logger.info(
                                'Beam ' + self.beam +
                                ': Converted flux calibrator dataset from UVFITS to MIRIAD format!'
                            )
                        else:
                            convertfluxcaluvfits2miriad = False
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Could not convert flux calibrator dataset {} from UVFITS to '
                                'MIRIAD format!'.format(fits.out))
                    else:
                        logger.warning(
                            'Beam ' + self.beam +
                            ': Flux calibrator dataset not available!')
                else:
                    logger.info(
                        'Beam ' + self.beam +
                        ': Flux calibrator dataset was already converted from UVFITS to MIRIAD format'
                    )
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Flux calibrator dataset not specified. Cannot convert flux calibrator!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting flux calibrator dataset!')
        # Convert the polarised calibrator
        if self.convert_polcal:
            if self.polcal != '':
                if not convertpolcaluvfits2miriad:
                    if convertpolcaluvfitsavailable:
                        logger.debug(
                            'Beam ' + self.beam +
                            ': Converting polarised calibrator dataset from UVFITS to MIRIAD format.'
                        )
                        subs_managefiles.director(
                            self,
                            'ch',
                            self.get_crosscalsubdir_path(),
                            verbose=False)
                        fits = lib.miriad('fits')
                        fits.op = 'uvin'
                        fits.in_ = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(), self.polcal)
                        fits.out = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(),
                            self.polcal,
                            ext='mir')
                        fits.go()
                        if path.isdir(fits.out):
                            convertpolcaluvfits2miriad = True
                            logger.info(
                                'Beam ' + self.beam +
                                ': Converted polarised calibrator dataset from UVFITS to MIRIAD format!'
                            )
                        else:
                            convertpolcaluvfits2miriad = False
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Could not convert polarised calibrator dataset from UVFITS to MIRIAD format!'
                            )
                    else:
                        logger.warning(
                            'Beam ' + self.beam +
                            ': Polarised calibrator dataset not available!')
                else:
                    logger.info(
                        'Beam ' + self.beam +
                        ': Polarised calibrator dataset was already converted from UVFITS to MIRIAD format'
                    )
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Polarised calibrator dataset not specified. Cannot convert polarised calibrator!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting polarised calibrator dataset!')
        # Convert the target beams
        if self.convert_target:
            if self.target != '':
                logger.info(
                    'Beam ' + self.beam +
                    ': Converting target beam dataset from UVFITS to MIRIAD format.'
                )
                if not converttargetbeamsuvfits2miriad:
                    if converttargetbeamsuvfitsavailable:
                        subs_managefiles.director(
                            self,
                            'ch',
                            self.get_crosscalsubdir_path(),
                            verbose=False)
                        fits = lib.miriad('fits')
                        fits.op = 'uvin'
                        fits.in_ = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(), self.target)
                        fits.out = mspath_to_fitspath(
                            self.get_crosscalsubdir_path(),
                            self.target,
                            ext='mir')
                        fits.go()
                        if path.isdir(fits.out):
                            converttargetbeamsuvfits2miriad = True
                            logger.debug(
                                'Beam ' + self.beam +
                                ': Converted target beam dataset from '
                                'UVFITS to MIRIAD format!')
                        else:
                            converttargetbeamsuvfits2miriad = False
                            logger.warning(
                                'Beam ' + self.beam +
                                ': Could not convert target beam dataset '
                                '{} from UVFITS to MIRIAD format!'.format(
                                    fits.out))
                    else:
                        logger.warning('Beam ' + self.beam +
                                       ': Target beam dataset not available!')
                else:
                    logger.info('Beam ' + self.beam +
                                ': Target beam dataset was already converted '
                                'from MS to UVFITS format')
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': Target beam dataset not specified. Cannot convert target beam datasets!'
                )
        else:
            logger.warning('Beam ' + self.beam +
                           ': Not converting target beam dataset!')

        # Save the derived parameters for the MS to UVFITS conversion to the parameter file

        subs_param.add_param(self, cbeam + '_fluxcal_UVFITS2MIRIAD',
                             convertfluxcaluvfits2miriad)
        subs_param.add_param(self, cbeam + '_polcal_UVFITS2MIRIAD',
                             convertpolcaluvfits2miriad)
        subs_param.add_param(self, cbeam + '_targetbeams_UVFITS2MIRIAD',
                             converttargetbeamsuvfits2miriad)

        if self.convert_averagems and self.subdirification:
            logger.info('Beam ' + self.beam +
                        ': Averaging down target measurement set')
            average_cmd = 'mstransform(vis="{vis}", outputvis="{outputvis}", chanaverage=True, chanbin=64)'
            vis = self.get_target_path()
            outputvis = vis.replace(".MS", "_avg.MS")
            lib.run_casa([average_cmd.format(vis=vis, outputvis=outputvis)],
                         timeout=10000)

        # Remove measurement sets if wanted
        if self.convert_removems and self.subdirification:
            logger.info('Beam ' + self.beam + ': Removing measurement sets')
            vis = self.get_target_path()
            if path.exists(vis):
                subs_managefiles.director(self, 'rm', vis)

        # Remove the UVFITS files if wanted
        if self.convert_removeuvfits and self.subdirification:
            logger.info('Beam ' + self.beam + ': Removing all UVFITS files')
            if self.fluxcal != '' and path.exists(
                    mspath_to_fitspath(
                        self.get_crosscalsubdir_path(),
                        self.fluxcal)) and convertfluxcalms2uvfits:
                subs_managefiles.director(
                    self, 'rm',
                    mspath_to_fitspath(self.get_crosscalsubdir_path(),
                                       self.fluxcal))
                logger.info('Beam ' + self.beam +
                            ': Removed fluxcal UVFITS files')
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': No fluxcal UVFITS file available for removing')
            if self.polcal != '' and path.exists(
                    mspath_to_fitspath(
                        self.get_crosscalsubdir_path(),
                        self.polcal)) and convertpolcalms2uvfits:
                subs_managefiles.director(
                    self, 'rm',
                    mspath_to_fitspath(self.get_crosscalsubdir_path(),
                                       self.polcal))
                logger.info('Beam ' + self.beam +
                            ': Removed polcal UVFITS files')
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': No polcal UVFITS file available for removing')
            if self.target != '' and path.exists(
                    mspath_to_fitspath(
                        self.get_crosscalsubdir_path(),
                        self.target)) and convertfluxcalms2uvfits:
                subs_managefiles.director(
                    self, 'rm',
                    mspath_to_fitspath(self.get_crosscalsubdir_path(),
                                       self.target))
                logger.info('Beam ' + self.beam +
                            ': Removed target UVFITS files')
            else:
                logger.warning(
                    'Beam ' + self.beam +
                    ': No target UVFITS file available for removing')
Esempio n. 19
0
    def splitdata(self):
        """
        Applies calibrator corrections to data, splits the data into chunks in frequency and bins it to the given
        frequency resolution for the self-calibration
        """
        if self.selfcal_splitdata:
            subs_setinit.setinitdirs(self)
            subs_setinit.setdatasetnamestomiriad(self)
            subs_managefiles.director(self, 'ch', self.selfcaldir)
            logger.info(' Splitting of target data into individual frequency chunks started')
            if os.path.exists(self.selfcaldir + '/' + self.target):
                logger.info('Calibrator corrections already seem to have been applied #')
            else:
                logger.info('Applying calibrator solutions to target data before averaging #')
                uvaver = lib.miriad('uvaver')
                uvaver.vis = self.crosscaldir + '/' + self.target
                uvaver.out = self.selfcaldir + '/' + self.target
                uvaver.go()
                logger.info('Calibrator solutions to target data applied #')
            if self.selfcal_flagantenna != '':
                uvflag = lib.miriad('uvflag')
                uvflag.vis = self.selfcaldir + '/' + self.target
                uvflag.flagval = 'flag'
                uvflag.select = 'antenna(' + str(self.selfcal_flagantenna) + ')'
                uvflag.go()
            else:
                pass
            try:
                uv = aipy.miriad.UV(self.selfcaldir + '/' + self.target)
            except RuntimeError:
                raise ApercalException(' No data in your selfcal directory!')

            try:
                nsubband = len(uv['nschan'])  # Number of subbands in data
            except TypeError:
                nsubband = 1  # Only one subband in data since exception was triggered
            logger.info('Found ' + str(nsubband) + ' subband(s) in target data #')
            counter = 0  # Counter for naming the chunks and directories
            for subband in range(nsubband):
                logger.info('Started splitting of subband ' + str(subband) + ' #')
                if nsubband == 1:
                    numchan = uv['nschan']
                    finc = np.fabs(uv['sdf'])
                else:
                    numchan = uv['nschan'][subband]  # Number of channels per subband
                    finc = np.fabs(uv['sdf'][subband])  # Frequency increment for each channel
                subband_bw = numchan * finc  # Bandwidth of one subband
                subband_chunks = round(subband_bw / self.selfcal_splitdata_chunkbandwidth)
                # Round to the closest power of 2 for frequency chunks with the same bandwidth over the frequency
                # range of a subband
                subband_chunks = int(np.power(2, np.ceil(np.log(subband_chunks) / np.log(2))))
                if subband_chunks == 0:
                    subband_chunks = 1
                chunkbandwidth = (numchan / subband_chunks) * finc
                logger.info('Adjusting chunk size to ' + str(
                    chunkbandwidth) + ' GHz for regular gridding of the data chunks over frequency #')
                for chunk in range(subband_chunks):
                    logger.info(
                        'Starting splitting of data chunk ' + str(chunk) + ' for subband ' + str(subband) + ' #')
                    binchan = round(
                        self.selfcal_splitdata_channelbandwidth / finc)  # Number of channels per frequency bin
                    chan_per_chunk = numchan / subband_chunks
                    if chan_per_chunk % binchan == 0:  # Check if the freqeuncy bin exactly fits
                        logger.info('Using frequency binning of ' + str(
                            self.selfcal_splitdata_channelbandwidth) + ' for all subbands #')
                    else:
                        # Increase the frequency bin to keep a regular grid for the chunks
                        while chan_per_chunk % binchan != 0:
                            binchan = binchan + 1
                        else:
                            # Check if the calculated bin is not larger than the subband channel number
                            if chan_per_chunk >= binchan:
                                pass
                            else:
                                # Set the frequency bin to the number of channels in the chunk of the subband
                                binchan = chan_per_chunk
                        logger.info('Increasing frequency bin of data chunk ' + str(
                            chunk) + ' to keep bandwidth of chunks equal over the whole bandwidth #')
                        logger.info('New frequency bin is ' + str(binchan * finc) + ' GHz #')
                    nchan = int(chan_per_chunk / binchan)  # Total number of output channels per chunk
                    start = 1 + chunk * chan_per_chunk
                    width = int(binchan)
                    step = int(width)
                    subs_managefiles.director(self, 'mk', self.selfcaldir + '/' + str(counter).zfill(2))
                    uvaver = lib.miriad('uvaver')
                    uvaver.vis = self.selfcaldir + '/' + self.target
                    uvaver.out = self.selfcaldir + '/' + str(counter).zfill(2) + '/' + str(counter).zfill(2) + '.mir'
                    uvaver.select = "'" + 'window(' + str(subband + 1) + ')' + "'"
                    uvaver.line = "'" + 'channel,' + str(nchan) + ',' + str(start) + ',' + str(width) + ',' + str(
                        step) + "'"
                    uvaver.go()
                    counter = counter + 1
                    logger.info('Splitting of data chunk ' + str(chunk) + ' for subband ' + str(subband) + ' done #')
                logger.info('Splitting of data for subband ' + str(subband) + ' done #')
            logger.info(' Splitting of target data into individual frequency chunks done')