Exemplo n.º 1
0
def validateinput():
    """
    Parse the input config file (command line argument) and validate that the
    parameters look okay
    """

    logger.info('This is version {0} of the pipeline'.format(processMeerKAT.__version__))

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])

    visname = va(taskvals, 'data', 'vis', str)
    calcrefant = va(taskvals, 'crosscal', 'calcrefant', bool)
    refant = va(taskvals, 'crosscal', 'refant', str)
    fields = bookkeeping.get_field_ids(taskvals['fields'])

    # Check if the reference antenna exists, and complain and quit if it doesn't
    if not calcrefant:
        refant = va(taskvals, 'crosscal', 'refant', str)
        msmd.open(visname)
        get_fields.check_refant(MS=visname, refant=refant, warn=False)
        msmd.close()
        msmd.done()

    if not os.path.exists(visname):
        raise IOError("Path to MS %s not found" % (visname))
Exemplo n.º 2
0
    def construct_from_config(self, config_filename):
        """
        Read and parse the config file,
        load the models and datasets
        """
        config = parse_config(config_filename)
        self.API_list = []
        for API_name in config['API_list']:
            API = import_module(API_name)
            self.API_list.append(API)

        self.names = config['names']

        self.class_name_list = config['class_name_list']
        self.num_classes = len(self.class_name_list)
        self.class_name_lists = config['class_name_lists']
        self.local_class_name_lists = config['local_class_name_lists']
        self.class_ratio_list = config['class_ratio_list']

        self.load(config['model_path_list'], config['data_path_list'])
        self.full_label_space_mapping()
        self.data_size = []
        for name in self.names:
            self.data_size.append(self.data_pool[name]['y_train'].shape[0])
        self.full_data_size = sum(self.data_size)
        self.model_weight = np.array(self.data_size) / self.full_data_size
        print(self.data_size)
Exemplo n.º 3
0
def main():

    args = processMeerKAT.parse_args()
    processMeerKAT.setup_logger(args.config,args.verbose)
    msmd.open(args.MS)

    dopol = args.dopol
    refant = config_parser.parse_config(args.config)[0]['crosscal']['refant']
    fields = get_fields(args.MS)
    logger.info('[fields] section written to "{0}". Edit this section if you need to change field IDs (comma-seperated string for multiple IDs, not supported for calibrators).'.format(args.config))

    npol = msmd.ncorrforpol()[0]
    parang = 0
    if 'phasecalfield' in fields:
        parang = parang_coverage(args.MS, int(fields['phasecalfield'][1:-1])) #remove '' from field

    if npol < 4:
        logger.warn("Only {0} polarisations present in '{1}'. Any attempted polarisation calibration will fail, so setting dopol=False in [run] section of '{2}'.".format(npol,args.MS,args.config))
        dopol = False
    elif 0 < parang < 30:
        logger.warn("Parallactic angle coverage is < 30 deg. Polarisation calibration will most likely fail, so setting dopol=False in [run] section of '{0}'.".format(args.config))
        dopol = False

    check_refant(args.MS, refant, args.config, warn=True)
    threads = check_scans(args.MS,args.nodes,args.ntasks_per_node,dopol)
    SPW = check_spw(args.config)

    config_parser.overwrite_config(args.config, conf_dict={'dopol' : dopol}, conf_sec='run', sec_comment='# Internal variables for pipeline execution')
    config_parser.overwrite_config(args.config, conf_dict=threads, conf_sec='slurm')
    config_parser.overwrite_config(args.config, conf_dict=fields, conf_sec='fields')
    config_parser.overwrite_config(args.config, conf_dict={'spw' : "'{0}'".format(SPW)}, conf_sec='crosscal')

    msmd.done()
Exemplo n.º 4
0
def get_config_kwargs(config,section,expected_keys):

    """Return kwargs from config section. Check section exists, and that all expected keys are present, otherwise raise KeyError.
    Arguments:
    ----------
    config : str
        Path to config file.
    section : str
        Config section from which to extract kwargs.
    expected_keys : list
        List of expected keys.
    Returns:
    --------
    kwargs : dict
        Keyword arguments from this config section."""

    config_dict = config_parser.parse_config(config)[0]

    #Ensure section exists, otherwise raise KeyError
    if section not in config_dict.keys():
        raise KeyError("Config file '{0}' has no section [{1}]. Please insert section or build new config with [-B --build].".format(config,section))

    kwargs = config_dict[section]

    #Check for any unknown keys and display warning
    unknown_keys = list(set(kwargs) - set(expected_keys))
    if len(unknown_keys) > 0:
        logger.warn("Unknown keys {0} present in section [{1}] in '{2}'.".format(unknown_keys,section,config))

    #Check that expected keys are present, otherwise raise KeyError
    missing_keys = list(set(expected_keys) - set(kwargs))
    if len(missing_keys) > 0:
        raise KeyError("Keys {0} missing from section [{1}] in '{2}'.".format(missing_keys,section,config))

    return kwargs
Exemplo n.º 5
0
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    args = build_parser()
    log.info('Parsing configuration file')
    machine_list = config_parser.parse_config(args.config)

    client_list = []
    log.info('Clients start executing')
    for machine in machine_list:
        client_list.append(
            client_execution(machine, args.server_ip, args.server_login,
                             args.server_psw, args.ftp_dir, log))

    log.info('Executor script is waiting for all experiments')
    for client in client_list:
        client.wait_all()

    ftp_connection = ftplib.FTP(args.server_ip, args.server_login,
                                args.server_psw)
    ftp_connection.cwd(args.ftp_dir)
    table_format.join_tables(ftp_connection, "benchmark",
                             args.benchmark_result_table)
    table_format.join_tables(ftp_connection, "accuracy_checker",
                             args.accuracy_checker_result_table)
    ftp_connection.close()
Exemplo n.º 6
0
def main():

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])

    visname = va(taskvals, 'data', 'vis', str)

    fields = bookkeeping.get_field_ids(taskvals['fields'])
    calcrefant = va(taskvals, 'crosscal', 'calcrefant', bool, default=False)

    # Calculate reference antenna
    if calcrefant:
        if len(fields.fluxfield.split(',')) > 1:
            field = fields.fluxfield.split(',')[0]
        else:
            field = fields.fluxfield

        refant, badants = get_ref_ant(visname, field)
        # Overwrite config file with new refant
        config_parser.overwrite_config(args['config'],
                                       conf_sec='crosscal',
                                       conf_dict={'refant': refant})
        config_parser.overwrite_config(args['config'],
                                       conf_sec='crosscal',
                                       conf_dict={'badants': badants})
Exemplo n.º 7
0
    def test_parse_config(self):
        file_config = "TEST = 'run'"
        expected_config = {'TEST': 'run'}

        with provide_file_at_root('parse.py', file_config):
            out_config = parse_config('parse')

        self.assertEquals(out_config, expected_config)
Exemplo n.º 8
0
def start_analysis(project_pk):
    config_params = config_parser.parse_config()
    project, result_bucket_name, sample_mapping = setup(
        project_pk, config_params)
    print 'done with setup'
    compute = googleapiclient.discovery.build('compute', 'v1')
    launch_workers(compute, project, result_bucket_name, sample_mapping,
                   config_params)
Exemplo n.º 9
0
def main(args, taskvals):

    visname = va(taskvals, 'data', 'vis', str)
    calcrefant = va(taskvals, 'crosscal', 'calcrefant', bool, default=False)
    refant = va(taskvals, 'crosscal', 'refant', str, default='m005')
    spw = va(taskvals, 'crosscal', 'spw', str, default='')
    nspw = va(taskvals, 'crosscal', 'nspw', int, default='')
    tasks = va(taskvals, 'slurm', 'ntasks_per_node', int)
    preavg = va(taskvals, 'crosscal', 'chanbin', int, default=1)
    include_crosshand = va(taskvals, 'run', 'dopol', bool, default=False)
    createmms = va(taskvals, 'crosscal', 'createmms', bool, default=True)

    # HPC Specific Configuration
    known_hpc_path = os.path.dirname(SCRIPT_DIR) + "/known_hpc.cfg"
    KNOWN_HPCS, HPC_CONFIG = config_parser.parse_config(known_hpc_path)
    HPC_NAME = taskvals["run"]["hpc"]
    HPC_NAME = HPC_NAME if HPC_NAME in KNOWN_HPCS.keys() else "unknown"
    CPUS_PER_NODE_LIMIT = va(KNOWN_HPCS,
                             HPC_NAME,
                             "CPUS_PER_NODE_LIMIT".lower(),
                             dtype=int)

    if nspw > 1:
        casalog.setlogfile(
            'logs/{SLURM_JOB_NAME}-{SLURM_ARRAY_JOB_ID}_{SLURM_ARRAY_TASK_ID}.casa'
            .format(**os.environ))
    else:
        logfile = casalog.logfile()
        casalog.setlogfile(
            'logs/{SLURM_JOB_NAME}-{SLURM_JOB_ID}.casa'.format(**os.environ))

    if ',' in spw:
        low, high, unit, dirs = config_parser.parse_spw(args['config'])
        spwname = '{0:.0f}~{1:.0f}MHz'.format(min(low), max(high))
    else:
        spwname = spw.replace('0:', '')

    msmd.open(visname)
    npol = msmd.ncorrforpol()[0]

    if not include_crosshand and npol == 4:
        npol = 2
    CPUs = npol if tasks * npol <= CPUS_PER_NODE_LIMIT else 1  #hard-code for number of polarisations

    mvis = do_partition(visname, spw, preavg, CPUs, include_crosshand,
                        createmms, spwname)
    mvis = "'{0}'".format(mvis)
    vis = "'{0}'".format(visname)

    config_parser.overwrite_config(args['config'],
                                   conf_sec='data',
                                   conf_dict={'vis': mvis})
    config_parser.overwrite_config(
        args['config'],
        conf_sec='run',
        sec_comment='# Internal variables for pipeline execution',
        conf_dict={'orig_vis': vis})
    msmd.done()
def main():
    args = sys.argv
    if 1 >= len(args):
        config_path = False
    else:
        config_path = args[1]

    instances, configs = parse_config(config_path)
    for i in instances:
        print(i.instance_name)
Exemplo n.º 11
0
def get_imaging_params():

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])
    params = taskvals['image']
    params['vis'] = taskvals['data']['vis']

    return args, params
Exemplo n.º 12
0
def run_script(func, logfile=''):

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])

    continue_run = config_parser.validate_args(taskvals,
                                               'run',
                                               'continue',
                                               bool,
                                               default=True)
    spw = config_parser.validate_args(taskvals, 'crosscal', 'spw', str)
    nspw = config_parser.validate_args(taskvals, 'crosscal', 'nspw', int)

    if continue_run:
        try:
            func(args, taskvals)
            #rename_logs(logfile)
        except Exception as err:
            logger.error(
                'Exception found in the pipeline of type {0}: {1}'.format(
                    type(err), err))
            logger.error(traceback.format_exc())
            config_parser.overwrite_config(
                args['config'],
                conf_dict={'continue': False},
                conf_sec='run',
                sec_comment='# Internal variables for pipeline execution')
            if nspw > 1:
                for SPW in spw.split(','):
                    spw_config = '{0}/{1}'.format(SPW.replace('0:', ''),
                                                  args['config'])
                    config_parser.overwrite_config(
                        spw_config,
                        conf_dict={'continue': False},
                        conf_sec='run',
                        sec_comment=
                        '# Internal variables for pipeline execution')
            rename_logs(logfile)
            sys.exit(1)
    else:
        logger.error(
            'Exception found in previous pipeline job, which set "continue=False" in [run] section of "{0}". Skipping "{1}".'
            .format(args['config'],
                    os.path.split(sys.argv[2])[1]))
        #os.system('./killJobs.sh') # and cancelling remaining jobs (scancel not found since /opt overwritten)
        rename_logs(logfile)
        sys.exit(1)
Exemplo n.º 13
0
def main():

    args = processMeerKAT.parse_args()
    msmd.open(args.MS)

    refant = config_parser.parse_config(args.config)[0]['crosscal']['refant']
    check_refant(args.MS, refant, warn=True)

    threads = check_scans(args.MS,args.nodes,args.ntasks_per_node)
    config_parser.overwrite_config(args.config, conf_dict=threads, conf_sec='slurm')

    fields = get_fields(args.MS)
    config_parser.overwrite_config(args.config, conf_dict=fields, conf_sec='fields')
    logger.info('[fields] section written to "{0}". Edit this section to change field IDs (comma-seperated string for multiple IDs).'.format(args.config))

    msmd.close()
    msmd.done()
def main():
    log.basicConfig(format='[ %(levelname)s ] %(message)s',
                    level=log.INFO,
                    stream=sys.stdout)
    parser = build_parser()
    log.info('Parsing config file')
    machine_list = config_parser.parse_config(parser.config)
    proc_watcher = pw()
    proc_watcher.run_benchmark_on_all_machines(machine_list, parser.server_ip,
                                               parser.server_login,
                                               parser.server_psw)
    log.info('Waiting all benchmarks')
    proc_watcher.wait_all_benchmarks()
    ftp_con = ftplib.FTP(parser.server_ip, parser.server_login,
                         parser.server_psw)
    table_format.join_tables(ftp_con, parser.result_table)
    ftp_con.close()
Exemplo n.º 15
0
def main():

    config = config_parser.parse_args()['config']

    # Parse config file
    taskvals, config = config_parser.parse_config(config)

    visname = va(taskvals, 'data', 'vis', str)
    keepmms = va(taskvals, 'crosscal', 'keepmms', bool)

    calfiles, caldir = bookkeeping.bookkeeping(visname)
    fields = bookkeeping.get_field_ids(taskvals['fields'])

    msmd.open(visname)

    if not os.path.exists(PLOT_DIR):
        os.mkdir(PLOT_DIR)

    #Plot solutions for bandpass calibrator
    plotms(vis=calfiles.bpassfile, xaxis='Real', yaxis='Imag', coloraxis='corr', plotfile='{0}/bpass_real_imag.png'.format(PLOT_DIR),showgui=False)
    plotms(vis=calfiles.bpassfile, xaxis='freq', yaxis='Amp', coloraxis='antenna1', plotfile='{0}/bpass_freq_amp.png'.format(PLOT_DIR),showgui=False)
    plotms(vis=calfiles.bpassfile, xaxis='freq', yaxis='Phase', coloraxis='antenna1', plotfile='{0}/bpass_freq_phase.png'.format(PLOT_DIR),showgui=False)

    #Plot solutions for phase calibrator
    plotms(vis=calfiles.gainfile, xaxis='Real', yaxis='Imag', coloraxis='corr', plotfile='{0}/phasecal_real_imag.png'.format(PLOT_DIR),showgui=False)
    plotms(vis=calfiles.gainfile, xaxis='Time', yaxis='Amp', coloraxis='antenna1', plotfile='{0}/phasecal_time_amp.png'.format(PLOT_DIR),showgui=False)
    plotms(vis=calfiles.gainfile, xaxis='Time', yaxis='Phase', coloraxis='antenna1', plotfile='{0}/phasecal_time_phase.png'.format(PLOT_DIR),showgui=False)

    #Plot solutions for individual antennas of bandpass and phase calibrator in 3x2 panels
    plot_antennas('bpass',fields,calfiles,xaxis='freq',yaxis='amp')
    plot_antennas('bpass',fields,calfiles,xaxis='freq',yaxis='phase')
    plot_antennas('phasecal',fields,calfiles,xaxis='time',yaxis='amp')
    plot_antennas('phasecal',fields,calfiles,xaxis='time',yaxis='phase')


    extn = 'mms' if keepmms else 'ms'
    for field in fields:
        for subf in field.split(','):
            fname = msmd.namesforfields(int(subf))[0]
            inname = '%s.%s.%s' % (os.path.splitext(visname)[0], fname, extn)
            if not os.path.exists('{0}/{1}_freq_amp.png'.format(PLOT_DIR,fname)):
                plotms(vis=inname, xaxis='freq', yaxis='Amp', coloraxis='corr', plotfile='{0}/{1}_freq_amp.png'.format(PLOT_DIR,fname),showgui=False)
                plotms(vis=inname, xaxis='Real', yaxis='Imag', coloraxis='corr', plotfile='{0}/{1}_real_imag.png'.format(PLOT_DIR,fname),showgui=False)

    msmd.close()
    msmd.done()
Exemplo n.º 16
0
def main():

    config = config_parser.parse_args()['config']

    # Parse config file
    taskvals, config = config_parser.parse_config(config)

    visname = va(taskvals, 'data', 'vis', str)

    calfiles, caldir = bookkeeping.bookkeeping(visname)
    fields = bookkeeping.get_field_ids(taskvals['fields'])

    #Plot solutions for bandpass calibrator
    fastplot(calfiles.bpassfile,
             col='CPARAM',
             xaxis='Real',
             yaxis='Imag',
             fname='bpass_real_imag.png')
    fastplot(calfiles.bpassfile,
             col='CPARAM',
             xaxis='chan',
             yaxis='Amp',
             logy=True,
             fname='bpass_chan_amp.png')
    fastplot(calfiles.bpassfile,
             col='CPARAM',
             xaxis='chan',
             yaxis='Phase',
             fname='bpass_chan_phase.png')

    #Plot solutions for phase calibrator
    fastplot(calfiles.gainfile,
             col='CPARAM',
             xaxis='Amp',
             yaxis='Time',
             fname='phasecal_time_amp.png',
             markersize=2,
             extent=1e-8)
    fastplot(calfiles.gainfile,
             col='CPARAM',
             xaxis='Phase',
             yaxis='Time',
             fname='phasecal_time_phase.png',
             markersize=2,
             extent=1e-8)
Exemplo n.º 17
0
def setup_logger(config,verbose=False):

    """Setup logger at debug or info level according to whether verbose option selected (via command line or config file).
    Arguments:
    ----------
    config : str
        Path to config file.
    verbose : bool
        Verbose output? This will display all logger debug output."""

    #Overwrite with verbose mode if set to True in config file
    if not verbose:
        config_dict = config_parser.parse_config(config)[0]
        if 'slurm' in config_dict.keys() and 'verbose' in config_dict['slurm']:
            verbose = config_dict['slurm']['verbose']

    loglevel = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(format="%(asctime)-15s %(levelname)s: %(message)s", level=loglevel)
Exemplo n.º 18
0
def lamport(config_file, node_index):
    nodes = parse_config(config_file)

    clean_log(node_index)

    node = LamportNode(nodes, node_index)
    party = Party(len(nodes))
    try:
        party.start()
        party.enter(node_index)
        while party.is_alive():
            sleep(0.1)

        node.start()

        print("Uploading log")
        upload_log(node_index)
        print("FIN")

    except KeyboardInterrupt:
        party.stop()
        node.stop()
Exemplo n.º 19
0
def main():

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])

    visname = va(taskvals, 'data', 'vis', str)
    calcrefant = va(taskvals, 'crosscal', 'calcrefant', bool, default=False)
    refant = va(taskvals, 'crosscal', 'refant', str, default='m005')
    spw = va(taskvals, 'crosscal', 'spw', str, default='')

    mvis = do_partition(visname, spw)
    mvis = "'{0}'".format(mvis)
    vis = "'{0}'".format(visname)

    config_parser.overwrite_config(args['config'],
                                   conf_sec='data',
                                   conf_dict={'vis': mvis})
    config_parser.overwrite_config(args['config'],
                                   conf_sec='data',
                                   conf_dict={'orig_vis': vis})
Exemplo n.º 20
0
import numpy as np
import os
import copy
from demos import demo_full
from lib import models, mesh_sampling
from lib.load_data import BodyData, load_graph_mtx
from config_parser import parse_config
from psbody.mesh import Mesh

args, args_dict = parse_config()
np.random.seed(args_dict['seed'])
project_dir = os.path.dirname(os.path.realpath(__file__))
reference_mesh_file = os.path.join(project_dir, 'data/template_mesh.obj')
reference_mesh = Mesh(filename=reference_mesh_file)

datadir_root = os.path.join(project_dir, 'data', 'datasets')
data_dir = os.path.join(datadir_root, args.dataset)

# load data for train and test
if args.mode in ['train', 'test']:
    print("Loading data from {} ..".format(data_dir))
    bodydata = BodyData(
        nVal=100,
        train_mesh_fn=data_dir + '/train/train_disp.npy',
        train_cond1_fn=data_dir + '/train/train_{}.npy'.format(args.pose_type),
        train_cond2_fn=data_dir + '/train/train_{}.npy'.format('clo_label'),
        test_mesh_fn=data_dir + '/test/test_disp.npy',
        test_cond1_fn=data_dir + '/test/test_{}.npy'.format(args.pose_type),
        test_cond2_fn=data_dir + '/test/test_{}.npy'.format('clo_label'),
        reference_mesh_file=reference_mesh_file)
Exemplo n.º 21
0
#!/usr/bin/python
import sys
import io
import uuid
import config_parser
import os

DIR = os.path.dirname(os.path.realpath(__file__)) + "/../"

print "---- setup-config.py"
print "Generating config.ts..."

templatePath = DIR + "scripts/config.template.ts"
configPath = DIR + "config.ts"

config = config_parser.parse_config()

if config == None:
    sys.exit(1)

prodPort = config["LISTEN_PORT"]
mqtt = config["LORA_BROKER"]
loraserver = config["LORA_API"]

print "* LoRaWAN mqtt server   : " + mqtt
print "* Server port           : " + str(prodPort)
secret = str(uuid.uuid1())

templateFile = open(templatePath, "r")
template = templateFile.read()
templateFile.close()
Exemplo n.º 22
0
                                          connections)
        html = "node_1: " + str(nodes[int(node_1)][1]) + "<br>"
        html += "node_2: " + str(nodes[int(node_2)][1]) + "<br>"
        html += str([nodes[a][1] for a in x])
        self.write(html)


application = tornado.web.Application([
    (r"/", MainHandler),
    (r"/openslice", OpenSliceHandler),
])

if __name__ == "__main__":

    config = False
    print "wow"
    if config:
        nodes, connections = config_parser.parse_config("config.txt")
    else:
        nodes, connections = get_graph.get_graph(daylight_ip, daylight_port,
                                                 user, passw)
    hosts = get_graph.get_hosts(daylight_ip, daylight_port, user, passw, nodes)
    print "Nodes: ", nodes
    print "Connections: ", connections
    print "Hosts: ", hosts

    topology_commands.build_base_rules(daylight_ip, daylight_port, user, passw,
                                       nodes, connections, hosts)

    application.listen(8888)
    tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 23
0
def run_experiment(config_path):
    config = config_parser.parse_config("config", config_path)
    random.seed(config["random_seed"] if "random_seed" in config else 123)
    temp_model_path = config_path + ".model"

    if "load" in config and config["load"] is not None and len(config["load"]) > 0:
        model = WordPairClassifier.load(config["load"])
        data_test = read_dataset(config["path_test"])
        word2id = model.config["word2id"]
        config = model.config
        process_dataset(data_test, model, word2id, True, config, "test")
        sys.exit()

    data_train = read_dataset(config["path_train"])
    data_dev = read_dataset(config["path_dev"])
    data_test = read_dataset(config["path_test"])
    data_pretrain = read_dataset(config["path_pretrain"]) if ("path_pretrain" in config and config["path_pretrain"] != None and len(config["path_pretrain"]) > 0) else []

    embedding_vocab_set = construct_embedding_vocab([config["word_embedding_path_a"], config["word_embedding_path_b"]])

#    if len(embedding_vocab_set) > 0 and len(data_pretrain) > 0:
#        data_pretrain = [x for x in data_pretrain if (x[1] in embedding_vocab_set and x[2] in embedding_vocab_set)]

    vocabulary = construct_vocabulary([data_train, data_dev, data_test, data_pretrain], embedding_vocab_set if config["restrict_to_embedded_vocab"] == True else None)
    if "extend_vocabulary" in config and config["extend_vocabulary"] == True:
        extend_vocabulary(vocabulary, config["word_embedding_path_a"], config["word_embedding_path_b"], "\t", True)
    word2id = collections.OrderedDict()
    for i in range(len(vocabulary)):
        word2id[vocabulary[i]] = i
    assert(len(word2id) == len(set(vocabulary)))
    config["n_words"] = len(vocabulary)
    config["word2id"] = word2id
    config["feature_count"] = len(data_train[0][3])

    model = WordPairClassifier(config)
    load_embeddings_into_matrix(config["word_embedding_path_a"], model.word_embedding_matrix_A, word2id)
    if config["late_fusion"] == True and config["word_embedding_size_b"] > 0 and config["word_embedding_path_b"] != None and len(config["word_embedding_path_b"]) > 0:
        load_embeddings_into_matrix(config["word_embedding_path_b"], model.word_embedding_matrix_B, word2id)

    for key, val in config.items():
        if key not in ["word2id"]:
            print(str(key) + ": " + str(val))

    if len(data_pretrain) > 0:
        for epoch in range(config["pretrain_epochs"]):
            print("pretrain_epoch: " + str(epoch))
            results_pretrain = process_dataset(data_pretrain, model, word2id, False, True, config, "pretrain")

    best_score = 0.0
    for epoch in range(config["epochs"]):
        print("epoch: " + str(epoch))
        results_train = process_dataset(data_train, model, word2id, False, False, config, "train")
        results_dev = process_dataset(data_dev, model, word2id, True, False, config, "dev")
        score_dev = results_dev[config["model_selector"]]

        if epoch == 0 or score_dev > best_score:
            best_epoch = epoch
            best_score = score_dev
            model.save(temp_model_path)
        print("best_epoch: " + str(best_epoch))
        print("best_measure: " + str(best_score))

        if config["stop_if_no_improvement_for_epochs"] > 0 and (epoch - best_epoch) >= config["stop_if_no_improvement_for_epochs"]:
            break

    if os.path.isfile(temp_model_path):
        model = WordPairClassifier.load(temp_model_path)
        os.remove(temp_model_path)

    if "save" in config and config["save"] is not None and len(config["save"]) > 0:
        model.save(config["save"])

    score_dev = process_dataset(data_dev, model, word2id, True, False, config, "dev_final")
    score_test = process_dataset(data_test, model, word2id, True, False, config, "test")
Exemplo n.º 24
0
def write_spw_master(filename,config,SPWs,precal_scripts,postcal_scripts,submit,dir='jobScripts',pad_length=5,dependencies='',timestamp='',slurm_kwargs={}):
    
    """Write master master script, which separately calls each of the master scripts in each SPW directory.
        
        filename : str
        Name of master pipeline submission script.
        config : str
        Path to config file.
        SPWs : str
        Comma-separated list of spw ranges.
        precal_scripts : list, optional
        List of sbatch scripts to call in order, before running pipeline in SPW directories.
        postcal_scripts : list, optional
        List of sbatch scripts to call in order, after running pipeline in SPW directories.
        submit : bool, optional
        Submit jobs to SLURM queue immediately?
        dir : str, optional
        Name of directory to output ancillary job scripts.
        pad_length : int, optional
        Length to pad the SLURM sacct output columns.
        dependencies : str, optional
        Comma-separated list of SLURM job dependencies.
        timestamp : str, optional
        Timestamp to put on this run and related runs in SPW directories.
        slurm_kwargs : list, optional
        Parameters parsed from [slurm] section of config."""
    
    master = open(filename,'w')
    master.write('#!/bin/bash\n')
    SPWs = SPWs.replace('0:','')
    toplevel = len(precal_scripts + postcal_scripts) > 0
    
    scripts = precal_scripts[:]
    if len(scripts) > 0:
        command = 'sbatch'
        if dependencies != '':
            master.write('\n#Run after these dependencies\nDep={0}\n'.format(dependencies))
            command += ' -d afterok:$Dep --kill-on-invalid-dep=yes'
            dependencies = '' #Remove dependencies so it isn't fed into launching SPW scripts
        master.write('\n#{0}\n'.format(scripts[0]))
        master.write("allSPWIDs=$({0} {1} | cut -d ' ' -f4)\n".format(command,scripts[0]))
        scripts.pop(0)
    for script in scripts:
        command = 'sbatch -d afterok:$allSPWIDs --kill-on-invalid-dep=yes'
        master.write('\n#{0}\n'.format(script))
        master.write("allSPWIDs+=,$({0} {1} | cut -d ' ' -f4)\n".format(command,script))
    
    if 'calc_refant.sbatch' in precal_scripts:
        master.write('echo Calculating reference antenna, and copying result to SPW directories.\n')
    if 'partition.sbatch' in precal_scripts:
        master.write('echo Running partition job array, iterating over {0} SPWs.\n'.format(len(SPWs.split(','))))
    
    partition = len(precal_scripts) > 0 and 'partition' in precal_scripts[-1]
    if partition:
        master.write('\npartitionID=$(echo $allSPWIDs | cut -d , -f{0})\n'.format(len(precal_scripts)))

    #Add time as extn to this pipeline run, to give unique filenames
    killScript = 'killJobs'
    summaryScript = 'summary'
    fullSummaryScript = 'fullSummary'
    errorScript = 'findErrors'
    timingScript = 'displayTimes'
    cleanupScript = 'cleanup'

    master.write('\n#Add time as extn to this pipeline run, to give unique filenames')
    master.write("\nDATE={0}\n".format(timestamp))
    master.write('mkdir -p {0}\n'.format(dir))
    master.write('mkdir -p {0}\n\n'.format(globals.LOG_DIR))
    extn = '_$DATE.sh'
    
    for i,spw in enumerate(SPWs.split(',')):
        master.write('echo Running pipeline in directory "{0}" for spectral window 0:{0}\n'.format(spw))
        master.write('cd {0}\n'.format(spw))
        master.write('output=$({0} --config ./{1} --run --submit --quiet'.format(os.path.split(globals.THIS_PROG)[1],config))
        if partition:
            master.write(' --dependencies=$partitionID\_{0}'.format(i))
        elif len(precal_scripts) > 0:
            master.write(' --dependencies=$allSPWIDs')
        elif dependencies != '':
            master.write(' --dependencies={0}'.format(dependencies))
        master.write(')\necho $output\n')
        if i == 0:
            master.write("IDs=$(echo $output | cut -d ' ' -f7)")
        else:
            master.write("IDs+=,$(echo $output | cut -d ' ' -f7)")
        master.write('\ncd ..\n\n')
    
    if 'concat.sbatch' in postcal_scripts:
        master.write('echo Will concatenate MSs/MMSs and create quick-look continuum cube across all SPWs for all fields from \"{0}\".\n'.format(config))
    scripts = postcal_scripts[:]

    #Hack to perform correct number of selfcal loops
    if 'selfcal_part1.sbatch' in scripts and 'selfcal_part2.sbatch' in scripts and 'run_bdsf.sbatch' in scripts and 'make_pixmask.sbatch' in scripts:
        selfcal_loops = config_parser.parse_config(config)[0]['selfcal']['nloops']
        scripts.extend(['selfcal_part1.sbatch','selfcal_part2.sbatch','run_bdsf.sbatch','make_pixmask.sbatch']*(selfcal_loops))
        scripts.append('selfcal_part1.sbatch')

    if len(scripts) > 0:
        command = 'sbatch -d afterany:$IDs {0}'.format(scripts[0])
        master.write('\n#{0}\n'.format(scripts[0]))
        scripts.pop(0)
        if len(precal_scripts) == 0:
            master.write("allSPWIDs=$({0} | cut -d ' ' -f4)\n".format(command))
        else:
            master.write("allSPWIDs+=,$({0} | cut -d ' ' -f4)\n".format(command))
        for script in scripts:
            command = 'sbatch -d afterok:$allSPWIDs'
            master.write('\n#{0}\n'.format(script))
            master.write("allSPWIDs+=,$({0} {1} | cut -d ' ' -f4)\n".format(command,script))
    master.write('\necho Submitted the following jobIDs within the {0} SPW directories: $IDs\n'.format(len(SPWs.split(','))))

    prefix = ''
    #Write bash job scripts for the jobs run in this top level directory
    if toplevel:
        master.write('\necho Submitted the following jobIDs over all SPWs: $allSPWIDs\n')
        master.write('\necho For jobs over all SPWs:\n')
        prefix = 'allSPW_'
        write_all_bash_jobs_scripts(master,extn,IDs='allSPWIDs',dir=dir,prefix=prefix,pad_length=pad_length,slurm_kwargs=slurm_kwargs)
        master.write('\nln -f -s {1}{2}{3} {0}/{1}{4}{3}\n'.format(dir,prefix,summaryScript,extn,fullSummaryScript))

    master.write('\necho For all jobs within the {0} SPW directories:\n'.format(len(SPWs.split(','))))
    header = '-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------' + '-'*pad_length
    do = """echo "for f in {%s,}; do if [ -d \$f ]; then cd \$f; ./%s/%s%s; cd ..; else echo Directory \$f doesn\\'t exist; fi; done;%s"""
    suffix = '' if toplevel else ' \"'
    write_bash_job_script(master, killScript, extn, do % (SPWs,dir,killScript,extn,suffix), 'kill all the jobs', dir=dir,prefix=prefix)
    write_bash_job_script(master, cleanupScript, extn, do % (SPWs,dir,cleanupScript,extn,' \"'), 'remove the MMSs/MSs within SPW directories \(after pipeline has run\), while leaving the concatenated data at the top level', dir=dir)
    
    do = """echo "counter=1; for f in {%s,}; do echo -n SPW \#\$counter:; echo -n ' '; if [ -d \$f ]; then cd \$f; pwd; ./%s/%s%s %s; cd ..; else echo Directory \$f doesn\\'t exist; fi; counter=\$((counter+1)); echo '%s'; done; """
    if toplevel:
        do += "echo -n 'All SPWs: '; pwd; "
    else:
        do += ' \"'
    write_bash_job_script(master, summaryScript, extn, do % (SPWs,dir,summaryScript,extn,"| grep -v 'PENDING\|COMPLETED'",header), 'view the progress \(for running or failed jobs\)', dir=dir,prefix=prefix)
    write_bash_job_script(master, fullSummaryScript, extn, do % (SPWs,dir,summaryScript,extn,'',header), 'view the progress \(for all jobs\)', dir=dir,prefix=prefix)
    header = '------------------------------------------------------------------------------------------' + '-'*pad_length
    write_bash_job_script(master, errorScript, extn, do % (SPWs,dir,errorScript,extn,'',header), 'find errors \(after pipeline has run\)', dir=dir,prefix=prefix)
    write_bash_job_script(master, timingScript, extn, do % (SPWs,dir,timingScript,extn,'',header), 'display start and end timestamps \(after pipeline has run\)', dir=dir,prefix=prefix)
    
    #Close master submission script and make executable
    master.close()
    os.chmod(filename, 509)
    
    #Submit script or output that it will not run
    if submit:
        logger.logger.info('Running master script "{0}"'.format(filename))
        os.system('./{0}'.format(filename))
    else:
        logger.logger.info('Master script "{0}" written, but will not run.'.format(filename))
Exemplo n.º 25
0
from mailer import Mailer
from mod_auth.controllers import mod_auth
from mod_ci.controllers import mod_ci
from mod_customized.controllers import mod_customized
from mod_deploy.controllers import mod_deploy
from mod_home.controllers import mod_home
from mod_regression.controllers import mod_regression
from mod_sample.controllers import mod_sample
from mod_test.controllers import mod_test
from mod_upload.controllers import mod_upload

app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)  # type: ignore
# Load config
try:
    config = parse_config('config')
except ImportStringError:
    traceback.print_exc()
    raise MissingConfigError()

app.config.from_mapping(config)
try:
    app.config['DEBUG'] = os.environ['DEBUG']
except KeyError:
    app.config['DEBUG'] = False

# embed flask-migrate in the app itself
try:
    app.config['SQLALCHEMY_DATABASE_URI'] = app.config['DATABASE_URI']
    Migrate(app, Base)
except KeyError:
import config_parser
from db_connection import OverView_db

#config
args = config_parser.parse_arguments()
config = config_parser.parse_config(args.config)

overview_db = OverView_db(config['overview'])

post_1 = {
    "path_to_image": "imgs/adelaide-canola-flowers.jpg",
    "post_text": "Chicago is the most populous city...",
    "post_header": "Chicago and Lake Michigan",
    "coordinates": "41.881944°, -87.627778°",
    "posted": "FALSE",
    "epoch_id": 123
    # "link" : "https://www.over-view.com/overviews/adelaide-canola-flowers"
}

if __name__ == '__main__':
    overview_db.add_post(post_1)
    print(overview_db.count_posts())
    #overview_db.delete_all_posts()
Exemplo n.º 27
0
					"node":b["edge"]["tailNodeConnector"]["node"]["id"],
					"ingressPort":b["edge"]["tailNodeConnector"]["id"],
					"priority":"100",
					"etherType":"0x0800",
					"actions":["HW_PATH"]
				}
				url = 'http://' + str(daylight_ip) + ":" + str(daylight_port) + '/controller/nb/v2/flowprogrammer/default/node/OF/' + str(input_flow["node"]) + "/staticFlow/" + str(input_flow["name"]) 
				resp, content = h.request(url,"PUT", body=str(json.dumps(input_flow)), headers={'content-type':'application/json'} )
			if a[0] == b["edge"]["headNodeConnector"]["node"]["id"]:
				input_flow = {
					"name":"normal_"+b["edge"]["headNodeConnector"]["id"],
					"node":b["edge"]["headNodeConnector"]["node"]["id"],
					"ingressPort":b["edge"]["headNodeConnector"]["id"],
					"priority":"100",
					"etherType":"0x0800",
					"actions":["HW_PATH"]
				}
				url = 'http://' + str(daylight_ip) + ":" + str(daylight_port) + '/controller/nb/v2/flowprogrammer/default/node/OF/' + str(input_flow["node"]) + "/staticFlow/" + str(input_flow["name"]) 
				resp, content = h.request(url,"PUT", body=str(json.dumps(input_flow)), headers={'content-type':'application/json'} )




if __name__ == "__main__":
	import config_parser
	nodes,connections = config_parser.parse_config("config.txt")
	x = build_slice(nodes[0],nodes[2],nodes,connections)
	print x
	names = [nodes[a][1] for a in x]
	print names
Exemplo n.º 28
0
def write_master(filename,config,scripts=[],submit=False,dir='jobScripts',pad_length=5,verbose=False, echo=True, dependencies='',slurm_kwargs={}):
    
    """Write master pipeline submission script, calling various sbatch files, and writing ancillary job scripts.
        
        Arguments:
        ----------
        filename : str
        Name of master pipeline submission script.
        config : str
        Path to config file.
        scripts : list, optional
        List of sbatch scripts to call in order.
        submit : bool, optional
        Submit jobs to SLURM queue immediately?
        dir : str, optional
        Name of directory to output ancillary job scripts.
        pad_length : int, optional
        Length to pad the SLURM sacct output columns.
        verbose : bool, optional
        Verbose output (inserted into master script)?
        echo : bool, optional
        Echo the pupose of each job script for the user?
        dependencies : str, optional
        Comma-separated list of SLURM job dependencies.
        slurm_kwargs : list, optional
        Parameters parsed from [slurm] section of config."""
    
    master = open(filename,'w')
    master.write('#!/bin/bash\n')
    timestamp = config_parser.get_key(config,'run','timestamp')
    if timestamp == '':
        timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        config_parser.overwrite_config(config, conf_dict={'timestamp' : "'{0}'".format(timestamp)}, conf_sec='run', sec_comment='# Internal variables for pipeline execution')
    
    #Copy config file to TMP_CONFIG and inform user
    if verbose:
        master.write("\necho Copying \'{0}\' to \'{1}\', and using this to run pipeline.\n".format(config,globals.TMP_CONFIG))
    master.write('cp {0} {1}\n'.format(config, TMP_CONFIG))

    #Hack to perform correct number of selfcal loops
    if 'selfcal_part1.sbatch' in scripts and 'selfcal_part2.sbatch' in scripts and 'run_bdsf.sbatch' in scripts and 'make_pixmask.sbatch' in scripts:
        selfcal_loops = config_parser.parse_config(config)[0]['selfcal']['nloops']
        scripts.extend(['selfcal_part1.sbatch','selfcal_part2.sbatch','run_bdsf.sbatch','make_pixmask.sbatch']*(selfcal_loops))
        scripts.append('selfcal_part1.sbatch')
    
    command = 'sbatch'
    
    if dependencies != '':
        master.write('\n#Run after these dependencies\nDep={0}\n'.format(dependencies))
        command += ' -d afterok:$Dep --kill-on-invalid-dep=yes'
    master.write('\n#{0}\n'.format(scripts[0]))
    if verbose:
        master.write('echo Submitting {0} SLURM queue with following command:\necho {1}\n'.format(scripts[0],command))
    master.write("IDs=$({0} {1} | cut -d ' ' -f4)\n".format(command,scripts[0]))
    scripts.pop(0)


    #Submit each script with dependency on all previous scripts, and extract job IDs
    for script in scripts:
        command = 'sbatch -d afterok:$IDs --kill-on-invalid-dep=yes'
        master.write('\n#{0}\n'.format(script))
        if verbose:
            master.write('echo Submitting {0} SLURM queue with following command\necho {1} {0}\n'.format(script,command))
        master.write("IDs+=,$({0} {1} | cut -d ' ' -f4)\n".format(command,script))

    master.write('\n#Output message and create {0} directory\n'.format(dir))
    master.write('echo Submitted sbatch jobs with following IDs: $IDs\n')
    master.write('mkdir -p {0}\n'.format(dir))

    #Add time as extn to this pipeline run, to give unique filenames
    master.write('\n#Add time as extn to this pipeline run, to give unique filenames')
    master.write("\nDATE={0}".format(timestamp))
    extn = '_$DATE.sh'
    
    #Copy contents of config file to jobScripts directory
    master.write('\n#Copy contents of config file to {0} directory\n'.format(dir))
    master.write('cp {0} {1}/{2}_$DATE.txt\n'.format(config,dir,os.path.splitext(config)[0]))
    
    #Write each job script - kill script, summary script, error script, and timing script
    write_all_bash_jobs_scripts(master,extn,IDs='IDs',dir=dir,echo=echo,pad_length=pad_length,slurm_kwargs=slurm_kwargs)
    
    #Close master submission script and make executable
    master.close()
    os.chmod(filename, 509)
    
    #Submit script or output that it will not run
    if submit:
        if echo:
            logger.logger.info('Running master script "{0}"'.format(filename))
        os.system('./{0}'.format(filename))
    else:
        logger.logger.info('Master script "{0}" written, but will not run.'.format(filename))
Exemplo n.º 29
0
from log_configuration import LogConfiguration
from mailer import Mailer
from mod_auth.controllers import mod_auth
from mod_ci.controllers import mod_ci
from mod_deploy.controllers import mod_deploy
from mod_home.controllers import mod_home
from mod_regression.controllers import mod_regression
from mod_sample.controllers import mod_sample
from mod_test.controllers import mod_test
from mod_upload.controllers import mod_upload
from mod_customized.controllers import mod_customized

app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
# Load config
config = parse_config('config')
app.config.from_mapping(config)
try:
    app.config['DEBUG'] = os.environ['DEBUG']
except KeyError:
    app.config['DEBUG'] = False

# Init logger
log_configuration = LogConfiguration(app.root_path, 'platform', app.config['DEBUG'])
log = log_configuration.create_logger("Platform")


def install_secret_keys(application, secret_session='secret_key', secret_csrf='secret_csrf'):
    """
    Configure the SECRET_KEY from a file in the instance directory.
Exemplo n.º 30
0
def main():

    # Parse Arguments
    args = processMeerKAT.parse_args()
    processMeerKAT.setup_logger(args.config, args.verbose)

    # Read in known_hpc and HPC_DEFAULTS from configuration file.
    known_hpc_path = "{0}/{1}".format(os.path.dirname(__file__),
                                      "known_hpc.cfg")
    if os.path.isfile(known_hpc_path):
        KNOWN_HPCS, _ = config_parser.parse_config(known_hpc_path)
    else:
        parser.error(
            "Known HPC config file ({0}) not found.".format(known_hpc_path))
    global HPC_DEFAULTS
    HPC_DEFAULTS = KNOWN_HPCS[args.hpc if args.hpc in
                              KNOWN_HPCS.keys() else "unknown"]

    # Open Measurement Set
    msmd.open(args.MS)

    dopol = args.dopol
    refant = config_parser.parse_config(args.config)[0]['crosscal']['refant']
    fields = get_fields(args.MS)
    logger.info(
        '[fields] section written to "{0}". Edit this section if you need to change field IDs (comma-seperated string for multiple IDs, not supported for calibrators).'
        .format(args.config))

    npol = msmd.ncorrforpol()[0]
    parang = 0
    if 'phasecalfield' in fields:
        calfield = msmd.fieldsforname(fields['phasecalfield'][1:-1])[
            0]  #remove '' from field and convert to int
        parang = parang_coverage(args.MS, calfield)

    if npol < 4:
        logger.warning(
            "Only {0} polarisations present in '{1}'. Any attempted polarisation calibration will fail, so setting dopol=False in [run] section of '{2}'."
            .format(npol, args.MS, args.config))
        dopol = False
    elif 0 < parang < 30:
        logger.warning(
            "Parallactic angle coverage is < 30 deg. Polarisation calibration will most likely fail, so setting dopol=False in [run] section of '{0}'."
            .format(args.config))
        dopol = False

    check_refant(args.MS, refant, args.config, warn=True)
    threads = check_scans(args.MS, args.nodes, args.ntasks_per_node, dopol)
    SPW = check_spw(args.config)

    config_parser.overwrite_config(
        args.config,
        conf_dict={'dopol': dopol},
        conf_sec='run',
        sec_comment='# Internal variables for pipeline execution')
    config_parser.overwrite_config(args.config,
                                   conf_dict=threads,
                                   conf_sec='slurm')
    config_parser.overwrite_config(args.config,
                                   conf_dict=fields,
                                   conf_sec='fields')
    config_parser.overwrite_config(args.config,
                                   conf_dict={'spw': "'{0}'".format(SPW)},
                                   conf_sec='crosscal')

    msmd.done()
Exemplo n.º 31
0
            overwrite=True, writeflags=True)

    # now flag using 'rflag' option
    flagdata(vis=visname, mode="rflag", datacolumn="corrected",
            field=fields.targetfield, timecutoff=5.0, freqcutoff=5.0, timefit="poly",
            freqfit="poly", flagdimension="freqtime", extendflags=False,
            timedevscale=5.0, freqdevscale=5.0, spectralmax=500.0,
            extendpols=False, growaround=False, flagneartime=False,
            flagnearfreq=False, action="apply", flagbackup=True, overwrite=True,
            writeflags=True)

    # Now summary
    flagdata(vis=visname, mode="summary", datacolumn="corrected",
            extendflags=True, name=visname + 'summary.split', action="apply",
            flagbackup=True, overwrite=True, writeflags=True)


if __name__ == '__main__':
    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])

    visname = va(taskvals, 'data', 'vis', str)

    calfiles, caldir = bookkeeping.bookkeeping(visname)
    fields = bookkeeping.get_field_ids(taskvals['fields'])

    do_pre_flag_2(visname, fields)
Exemplo n.º 32
0
def get_selfcal_params():

    #Flag for input errors
    exit = False

    # Get the name of the config file
    args = config_parser.parse_args()

    # Parse config file
    taskvals, config = config_parser.parse_config(args['config'])
    params = taskvals['selfcal']

    check_params = params.keys()
    check_params.pop(check_params.index('nloops'))
    check_params.pop(check_params.index('restart_no'))

    params['vis'] = taskvals['data']['vis']
    params['refant'] = taskvals['crosscal']['refant']
    if 'loop' not in params:
        params['loop'] = 0
    else:
        check_params.pop(check_params.index('loop'))

    for arg in check_params:

        # Multiscale needs to be a list of lists (if specifying multiple scales)
        # or a simple list (if specifying a single scale). So make sure these two
        # cases are covered. Likewise for imsize.

        if arg in ['multiscale', 'imsize']:
            # Not a list of lists, so turn it into one of right length
            if type(params[arg]) is list and (len(params[arg]) == 0 or type(
                    params[arg][0]) is not list):
                params[arg] = [
                    params[arg],
                ] * (params['nloops'] + 1)
            # Not a list at all, so put it into a list
            elif type(params[arg]) is not list:
                params[arg] = [
                    [
                        params[arg],
                    ],
                ] * (params['nloops'] + 1)
            # A list of lists of length 1, so put into list of lists of right length
            elif type(params[arg]) is list and type(
                    params[arg][0]) is list and len(params[arg]) == 1:
                params[arg] = [
                    params[arg][0],
                ] * (params['nloops'] + 1)
            if len(params[arg]) != params['nloops'] + 1:
                logger.error(
                    "Parameter '{0}' in '{1}' is the wrong length. It is {2} but must be a single value or equal to 'nloops' + 1 ({3})."
                    .format(arg, args['config'], len(params[arg]),
                            params['nloops'] + 1))
                exit = True

        else:
            if type(params[arg]) is not list:
                if arg in ['solint', 'calmode']:
                    params[arg] = [params[arg]] * (params['nloops'])
                else:
                    params[arg] = [params[arg]] * (params['nloops'] + 1)

            if arg in ['solint', 'calmode'
                       ] and len(params[arg]) != params['nloops']:
                logger.error(
                    "Parameter '{0}' in '{1}' is the wrong length. It is {2} long but must be 'nloops' ({3}) long or a single value (not a list)."
                    .format(arg, args['config'], len(params[arg]),
                            params['nloops']))
                exit = True
            elif arg not in ['solint', 'calmode'
                             ] and len(params[arg]) != params['nloops'] + 1:
                logger.error(
                    "Parameter '{0}' in '{1}' is the wrong length. It is {2} long but must 'nloops' + 1 ({3}) long or a single value (not a list)."
                    .format(arg, args['config'], len(params[arg]),
                            params['nloops'] + 1))
                exit = True

    if exit:
        sys.exit(1)

    return args, params