Exemplo n.º 1
0
def test_launcher():
    # configured launch commands for selected launchers
    serial = SerialMapper()
    print("non-python serial launch:", serial)
    settings = {'python':'', 'program':"hostname"}
    print(serial._launcher(settings), "\n")

    print("serial python launch:", serial)
    defaults['program'] = "tools.py"
    defaults['progargs'] = "12345"
    print(serial._launcher(defaults), "\n")

    qsub = Torque()
    serial.scheduler = qsub
    print("scheduled serial launch:", serial)
    settings = {'program':"tools.py", 'progargs':'12345'}
    print(serial._launcher(settings), "\n")

    mpi = Mpi()
    print("non-scheduled parallel launch:", mpi)
    print(mpi._launcher(settings), "\n")

    qsub.nodes = '4:ppn=2'
    mpi.nodes = mpi.njobs(qsub.nodes)
    print("scheduled parallel launch:", mpi, "| Torque")
    print(qsub._submit(mpi._launcher(settings)), "\n")

    mpi.scheduler = qsub
    print("scheduled parallel launch:", mpi)
    print(mpi._launcher(settings), "\n")

    _mpi = Mpi(scheduler=Torque(nodes='4:ppn=2'))
    print("scheduled parallel launch:", _mpi)
    print(_mpi._launcher(settings), "\n")

    _mpi = TorqueMpi(nodes='4:ppn=2')
    print("scheduled parallel launch:", _mpi)
    print(_mpi._launcher(settings), "\n")

    qsub.nodes = 1
    serial = SerialMapper()
    print("scheduled serial launch:", serial, "| Torque")
    print(qsub._submit(serial._launcher(settings)), "\n")
Exemplo n.º 2
0
def test_launcher():
    # configured launch commands for selected launchers
    serial = SerialMapper()
    print "non-python serial launch:", serial
    settings = {'python': '', 'program': "hostname"}
    print serial._launcher(settings), "\n"

    print "serial python launch:", serial
    defaults['program'] = "tools.py"
    defaults['progargs'] = "12345"
    print serial._launcher(defaults), "\n"

    qsub = Torque()
    serial.scheduler = qsub
    print "scheduled serial launch:", serial
    settings = {'program': "tools.py", 'progargs': '12345'}
    print serial._launcher(settings), "\n"

    mpi = Mpi()
    print "non-scheduled parallel launch:", mpi
    print mpi._launcher(settings), "\n"

    qsub.nodes = '4:ppn=2'
    mpi.nodes = mpi.njobs(qsub.nodes)
    print "scheduled parallel launch:", mpi, "| Torque"
    print qsub._submit(mpi._launcher(settings)), "\n"

    mpi.scheduler = qsub
    print "scheduled parallel launch:", mpi
    print mpi._launcher(settings), "\n"

    _mpi = Mpi(scheduler=Torque(nodes='4:ppn=2'))
    print "scheduled parallel launch:", _mpi
    print _mpi._launcher(settings), "\n"

    _mpi = TorqueMpi(nodes='4:ppn=2')
    print "scheduled parallel launch:", _mpi
    print _mpi._launcher(settings), "\n"

    qsub.nodes = 1
    serial = SerialMapper()
    print "scheduled serial launch:", serial, "| Torque"
    print qsub._submit(serial._launcher(settings)), "\n"
Exemplo n.º 3
0
 def __init__(self, *args, **kwds):
     kwds['scheduler'] = Torque(*args, **kwds)
     kwds.pop('nodes', None)
     Slurm.__init__(self, **kwds)
Exemplo n.º 4
0
# Copyright (c) 2016-2018 The Uncertainty Quantification Foundation.
# License: 3-clause BSD.  The full license text is available at:
#  - https://github.com/uqfoundation/pyina/blob/master/LICENSE

from pyina.launchers import Mpi
from pyina.schedulers import Torque
from pyina.mpi import _save, _debug


#_debug(True)
#_save(True)
def host(id):
    import socket
    return "Rank: %d -- %s" % (id, socket.gethostname())


print "Submit an mpi job to torque in the 'productionQ' queue..."
print "Using 15 items over 5 nodes and the scatter-gather strategy"
torque = Torque('5:ppn=2',
                queue='productionQ',
                timelimit='20:00:00',
                workdir='.')
pool = Mpi(scheduler=torque, scatter=True)
res = pool.map(host, range(15))
print pool
print '\n'.join(res)

print "hello from master"

# end of file
Exemplo n.º 5
0
def batch_deploy_preprocess(n_cores, subtype_output_dir, meffil, torque, run,
                            series, pc_qc_parameters_csv, use_cache, qc_only,
                            chunk_size):
    """Deploy multiple preprocessing jobs in series or parallel."""
    pheno_csvs = glob.glob(os.path.join(subtype_output_dir, '*', '*.csv'))
    opts = {'-n': n_cores}
    try:
        pc_qc_parameters = pd.read_csv(
            pc_qc_parameters).drop_duplicates().set_index('subtype')
    except:
        pc_qc_parameters = pd.DataFrame(
            [[name, -1, 0.05, 0.05, 0.05, 0.05, 5, -2]
             for name in np.vectorize(lambda x: x.split('/')[-2])(pheno_csvs)],
            columns=[
                'subtype', 'n_pcs', 'p_beadnum_samples', 'p_detection_samples',
                'p_beadnum_cpgs', 'p_detection_cpgs', 'sex_sd', 'sex_cutoff'
            ]).drop_duplicates().set_index('subtype')
    if meffil:
        opts['-m'] = ''
    if use_cache:
        opts['-u'] = ''
    if qc_only:
        opts['-qc'] = ''
    commands = []
    for pheno_csv in pheno_csvs:
        pheno_path = os.path.abspath(pheno_csv)
        subtype = pheno_path.split('/')[-2]
        opts['-pc'] = int(pc_qc_parameters.loc[subtype, 'n_pcs'])
        opts['-bns'] = pc_qc_parameters.loc[subtype, 'p_beadnum_samples']
        opts['-pds'] = pc_qc_parameters.loc[subtype, 'p_detection_samples']
        opts['-bnc'] = pc_qc_parameters.loc[subtype, 'p_beadnum_cpgs']
        opts['-pdc'] = pc_qc_parameters.loc[subtype, 'p_detection_cpgs']
        opts['-sc'] = pc_qc_parameters.loc[subtype, 'sex_cutoff']
        opts['-sd'] = pc_qc_parameters.loc[subtype, 'sex_sd']
        opts['-i'] = pheno_path[:pheno_path.rfind('/') + 1]
        opts['-o'] = pheno_path[:pheno_path.rfind('/') +
                                1] + 'methyl_array.pkl'
        command = 'pymethyl-preprocess preprocess_pipeline {}'.format(' '.join(
            '{} {}'.format(k, v) for k, v in opts.items()))
        commands.append(command)
    if not torque:
        if not series and chunk_size != -1:
            #commands = np.array_split(commands,len(commands)//chunk_size)
            print(commands)
            with open('commands.txt', 'w') as f:
                f.write('\n'.join(commands))
            subprocess.call(
                'cat commands.txt | xargs -L 1 -I CMD -P {} bash -c CMD'.
                format(chunk_size),
                shell=True)  # https://www.gnu.org/software/parallel/sem.html
            """for command_list in commands:
                subprocess.call('run_parallel {}'.format(' '.join(['"{}"'.format(command) for command in command_list])),shell=True)"""
        else:
            for command in commands:
                if not series:
                    command = "nohup {} &".format(command)
                if not run:
                    click.echo(command)
                else:
                    subprocess.call(command, shell=True)
    else:
        run_command = lambda command: subprocess.call(
            'module load cuda && module load python/3-Anaconda && source activate py36 && {}'
            .format(command),
            shell=True)
        from pyina.schedulers import Torque
        from pyina.launchers import Mpi
        config = {
            'nodes': '1:ppn=6',
            'queue': 'default',
            'timelimit': '01:00:00'
        }
        torque = Torque(**config)
        pool = Mpi(scheduler=torque)
        pool.map(run_command, commands)