Example #1
0
def main_parse_args():
    """Processes command line arguments.

    Expects one positional argument (infile) and number of optional
    arguments. If arguments are missing, supplies default values.

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('infile',
                        help='path to the file to be mapped. If mode \
                        is LIST, it should contain one identifer on each line. \
                        If mode is EDGE, it should be a single edge file \
                        produced in table, e.g. biogrid.PPI.edge.1.txt')
    parser.add_argument('-mo',
                        '--mode',
                        help='mode for running convert. "EDGE" \
                        if mapping and edge file, or "LIST" to map a list of \
                        names to the stable ids used in the Knowledge Network',
                        default='EDGE')
    parser.add_argument('-sh',
                        '--source_hint',
                        help='suggestion for ID source \
                        database used to resolve ambiguities in mapping',
                        default=DEFAULT_HINT)
    parser.add_argument('-t',
                        '--taxon',
                        help='taxon id of species of all gene \
                        names',
                        default=DEFAULT_TAXON)
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    return args
Example #2
0
def main():
    """Parses arguments and then exports the specified subnetworks.
    """
    parser = ArgumentParser()
    parser = cf.add_config_args(parser)
    parser = su.add_config_args(parser)
    parser.add_argument("-e", "--edge_type", help="Edge type")
    parser.add_argument("-s", "--species", help="Species")
    args = parser.parse_args()

    db = mu.get_database(args=args)
    db.use_db("KnowNet")

    cls, bidir = figure_out_class(db, args.edge_type)
    edges_fn = '{}.{}.edge'.format(args.species, args.edge_type)
    nodes_fn = '{}.{}.node_map'.format(args.species, args.edge_type)
    meta_fn = '{}.{}.metadata'.format(args.species, args.edge_type)
    bucket_dir = os.path.join(cls, args.species, args.edge_type)
    sync_dir = os.path.join(args.bucket, bucket_dir)
    sync_edges = os.path.join(sync_dir, edges_fn)
    sync_nodes = os.path.join(sync_dir, nodes_fn)
    sync_meta = os.path.join(sync_dir, meta_fn)

    if not args.force_fetch and all(
            map(os.path.exists, [sync_edges, sync_nodes, sync_meta])):
        print("Files already exist.  Skipping.")
        return

    get = get_gg if cls == 'Gene' else get_pg
    res = get(db, args.edge_type, args.species)

    print("ProductionLines: " + str(len(res)))
    if not args.force_fetch and should_skip(cls, res):
        print('Skipping {}.{}'.format(args.species, args.edge_type))
        return
    res, lines = norm_edges(res, args)

    n1des = list(set(i[0] for i in res))
    n2des = list(set(i[1] for i in res))

    n1des_desc = convert_nodes(args, n1des)
    n2des_desc = convert_nodes(args, n2des)
    nodes_desc = set(n1des_desc) | set(n2des_desc)

    metadata = get_metadata(db, res, nodes_desc, lines, args.species,
                            args.edge_type, args)
    db.close()

    os.makedirs(sync_dir, exist_ok=True)
    with open(sync_edges, 'w') as file:
        csvw = csv.writer(file, delimiter='\t')
        csvw.writerows(res)
    with open(sync_nodes, 'w', encoding='utf-8') as file:
        csvw = csv.writer(file, delimiter='\t')
        csvw.writerows(nodes_desc)
    with open(sync_meta, 'w') as file:
        yaml.dump(metadata, file, default_flow_style=False)
Example #3
0
def main():
    """Deploy a MySQL container using marathon with the provided command line
    arguements.

    This uses the provided command line arguments and the defaults found in
    config_utilities to launch a MySQL docker container using marathon.
    """
    parser = ArgumentParser()
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    deploy_container(args)
Example #4
0
def main_parse_args():
    """Processes command line arguments.

    Expects three positional arguments(start_step, deploy_loc, run_mode) and
    a number of optional arguments. If arguments are missing, supplies default
    values.

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('module', help='select SrcClass to check, e.g. dip')
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    return args
Example #5
0
def main_parse_args():
    """Processes command line arguments.

    Expects one positional argument (metadata_json) and number of optional
    arguments. If arguments are missing, supplies default values.

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('metadata_json',
                        help='json file produced from check, \
                        e.g. file_metadata.json')
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    return args
Example #6
0
def main_parse_args():
    """Processes command line arguments.

    Expects one positional argument (status_file) and number of optional
    arguments. If arguments are missing, supplies default values.

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('importfile',
                        help='import file produced from map step, \
                        or merged files, and must contain the table name e.g. \
                        kegg/ath/kegg.ath.unique.status.1.txt or \
                        unique.status.txt')
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    return args
Example #7
0
def main_parse_args():
    """Processes command line arguments.

    Expects two positional arguments (chunkfile, metadata_json) and number of
    optional arguments. If arguments are missing, supplies default values.

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('chunkfile',
                        help='path to a single chunk file produced \
                        in fetch, e.g. dip.PPI.raw_line.1.txt')
    parser.add_argument('metadata_json',
                        help='json file produced from check, \
                        e.g. file_metadata.json')
    parser = cf.add_config_args(parser)
    args = parser.parse_args()
    return args
def main_parse_args():
    """Processes command line arguments.

    Expects one argument (start_step) and a number of optional arguments. If
    argument is missing, supplies default value.

.. csv-table::
    :header: parameter,argument type,flag,description
    :widths: 4,2,2,12
    :delim: |

    [start_step]    	|	    |	    |string indicating which pipeline stage to start with
    --setup	            |	    |-su	|run db inits instead of source specific pipelines
    --one_step      	|	    |-os	|run for a single step instead of rest of pipeline
    --step_parameters	|str	|-p	    |parameters to specify calls of a single step in pipeline
    --no_ensembl	    |	    |-ne	|do not run ensembl in setup pipeline
    --dependencies	    |str	|-d	    |names of parent jobs that must finish

    Returns:
        Namespace: args as populated namespace
    """
    parser = ArgumentParser()
    parser.add_argument('start_step',
                        default=DEFAULT_START_STEP,
                        help='start step, must be ' + str(POSSIBLE_STEPS))
    parser.add_argument(
        '-su',
        '--setup',
        default=False,
        action='store_true',
        help='run db inits instead of source specific pipelines')
    parser.add_argument('-os',
                        '--one_step',
                        default=False,
                        action='store_true',
                        help='run for a single step instead of pipeline')
    parser.add_argument(
        '-p',
        '--step_parameters',
        default='',
        help='parameters to specify calls of a single step in pipeline')
    parser.add_argument(
        '-ne',
        '--no_ensembl',
        action='store_true',
        default=False,
        help='do not run ensembl in setup pipeline',
    )
    parser.add_argument('-d',
                        '--dependencies',
                        default='',
                        help='names of parent jobs that must finish')
    parser = cf.add_config_args(parser)
    args = parser.parse_args()

    config_opts = sys.argv[1:]
    for opt in [
            args.start_step, '-p', '--step_parameters', args.step_parameters,
            '-d', '--dependencies', args.dependencies
    ]:
        if opt in config_opts:
            config_opts.remove(opt)
    workflow_opts = []
    for opt in ['-su', '--setup', '-os', '--one_step', '-ne', '--no_ensembl']:
        if opt in config_opts:
            config_opts.remove(opt)
            workflow_opts.extend([opt])
    args.time_stamp = time.strftime('_%m-%d_%H-%M-%S')
    args.config_opts = " ".join(config_opts)
    args.workflow_opts = " ".join(workflow_opts)
    args.working_dir = args.working_dir.rstrip('/')
    args.storage_dir = args.storage_dir.rstrip('/')
    return args