def run_tests(mpi_runner, runner_name, test_list_exargs, exp_list):

    # Mock up system
    customizer = {
        'mpi_runner':
        mpi_runner,  # Select runner: mpich, openmpi, aprun, srun, jsrun
        'runner_name':
        runner_name,  # Runner name: Replaces run command if not None
        'cores_on_node': (16, 64),  # Tuple (physical cores, logical cores)
        'node_file': node_file
    }  # Name of file containing a node-list

    exctr = MPIExecutor(central_mode=True,
                        auto_resources=True,
                        custom_info=customizer)
    exctr.register_calc(full_path=sim_app, calc_type='sim')

    test_list = test_list_base + test_list_exargs
    sim_specs['user'] = {'tests': test_list, 'expect': exp_list}

    # Perform the run
    H, pinfo, flag = libE(sim_specs,
                          gen_specs,
                          exit_criteria,
                          persis_info,
                          libE_specs=libE_specs)
Exemple #2
0
def register_rsmpi_executor(hosts='auto', cores_on_node=None, **kwargs):
    """
    Create an MPIExecutor that can use rsmpi. The executor is returned and may be used to register calculations
    for workers like any other libEnsemble executor.
    :param hosts: (str or int) If 'auto' then all rsmpi resources are detected and used. Otherwise specify number
                               of hosts as an int.
    :param cores_on_node: (tuple) Defaults to (16, 16). Number of physical cores and logical cores on the hosts.
    :param kwargs: Any other kwargs given will be passed to the MPIExecutor that is created.

    :return: libensemble.executors.mpi_executor.MPIExecutor object
    """

    if type(hosts) == int:
        hosts = int(hosts)
    elif type(hosts) == str:
        hosts = _detect_rsmpi_resources()
    else:
        raise TypeError('hosts must be str or int')

    _generate_rsmpi_node_file(hosts)

    cores_on_node = (16, 16) if not cores_on_node else cores_on_node
    customizer = {
        'mpi_runner': 'mpich',
        'runner_name': 'libensemble-rsmpi',
        'cores_on_node': cores_on_node,
        'node_file': 'libe_nodes'
    }

    jobctrl = MPIExecutor(**kwargs, custom_info=customizer)
    # Set longer fail time - rsmpi is relatively slow to start
    jobctrl.fail_time = 8

    return jobctrl
Exemple #3
0
def setup_executor_fakerunner():
    # sim_app = './my_simtask.x'
    if not os.path.isfile(sim_app):
        build_simfunc()

    if USE_BALSAM:
        print('Balsom does not support this feature - running MPIExecutor')

    # Create non-existent MPI runner.
    customizer = {
        'mpi_runner': 'custom',
        'runner_name': 'non-existent-runner',
        'subgroup_launch': True
    }

    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor(auto_resources=False, custom_info=customizer)
    exctr.register_calc(full_path=sim_app, calc_type='sim')
Exemple #4
0
def setup_executor_noapp():
    # sim_app = './my_simtask.x'
    if not os.path.isfile(sim_app):
        build_simfunc()

    if USE_BALSAM:
        from libensemble.executors.balsam_executor import BalsamMPIExecutor
        exctr = BalsamMPIExecutor(auto_resources=False)
    else:
        from libensemble.executors.mpi_executor import MPIExecutor
        exctr = MPIExecutor(auto_resources=False)
        if exctr.workerID is not None:
            sys.exit("Something went wrong in creating Executor")
Exemple #5
0
def setup_executor_noreg():
    # sim_app = './my_simtask.x'
    if not os.path.isfile(sim_app):
        build_simfunc()

    if USE_BALSAM:
        from libensemble.executors.balsam_executor import BalsamMPIExecutor
        exctr = BalsamMPIExecutor(auto_resources=False)
    else:
        from libensemble.executors.mpi_executor import MPIExecutor
        exctr = MPIExecutor(auto_resources=False)

    exctr.register_calc(full_path=sim_app, calc_type='sim')
Exemple #6
0
    mess_resources = 'Auto_resources set to True'

if is_master:
    print('\nCores req: {} Cores avail: {}\n  {}\n'.format(
        cores_all_tasks, logical_cores, mess_resources))

sim_app = './my_simtask.x'
if not os.path.isfile(sim_app):
    build_simfunc()

if USE_BALSAM:
    from libensemble.executors.balsam_executor import BalsamMPIExecutor
    exctr = BalsamMPIExecutor(auto_resources=use_auto_resources)
else:
    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor(auto_resources=use_auto_resources)
exctr.register_calc(full_path=sim_app, calc_type='sim')

# if nworkers == 3:
#    CalcInfo.keep_worker_stat_files = True # Testing this functionality
# else:
#    CalcInfo.keep_worker_stat_files = False # Testing this functionality

sim_specs = {
    'sim_f': sim_f,
    'in': ['x'],
    'out': [('f', float), ('cstat', int)],
    'user': {
        'cores': cores_per_task
    }
}
Exemple #7
0
    dest='machinefile',
    help=
    'A machine file containing ordered list of nodes required for each libE rank'
)
args = parser.parse_args()

try:
    libE_machinefile = open(args.machinefile).read().splitlines()
except (TypeError, NameError):
    if is_master:
        print("WARNING: No machine file provided - defaulting to local node")
    libE_machinefile = [MPI.Get_processor_name()] * MPI.COMM_WORLD.Get_size()

sim_app = pkg_resources.resource_filename('libensemble.sim_funcs',
                                          'helloworld.py')
exctr = MPIExecutor()
exctr.register_calc(full_path=sim_app, calc_type='sim')

n = 2
sim_specs = {
    'sim_f': sim_f,
    'in': ['x', 'num_nodes', 'ranks_per_node'],
    'out': [('f', float)],
    'user': {
        'nodelist': libE_machinefile
    }
}

gen_specs = {
    'gen_f':
    gen_f,
# temp
sim_app = './my_simtask.x'

if not os.path.isfile(sim_app):
    build_simfunc()

USE_BALSAM = False  # Take as arg
# USE_BALSAM = True # Take as arg

# Create and add exes to registry
if USE_BALSAM:
    from libensemble.baslam_executor import BalsamMPIExecutor
    exctr = BalsamMPIExecutor()
else:
    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor()

exctr.register_calc(full_path=sim_app, calc_type='sim')

# Alternative to IF could be using eg. fstring to specify: e.g:
# EXECUTOR = 'Balsam'
# registry = f"{EXECUTOR}Register()"

# --------------- Worker: sim func -------------------------------------------------------------
# Should work with Balsam or not

# Can also use an internal iterable list of tasks in EXECUTOR - along with all_done func etc...


def polling_loop(exctr, task_list, timeout_sec=40.0, delay=1.0):
    import time
Exemple #9
0
if is_master:
    print('\nRunning with {} workers\n'.format(nworkers))

if not os.path.isfile('./forces.x'):
    if os.path.isfile('./build_forces.sh'):
        import subprocess
        subprocess.check_call(['./build_forces.sh'])
sim_app = os.path.abspath('./forces.x')

# Create executor and register sim to it.
if USE_BALSAM:
    from libensemble.executors.balsam_executor import BalsamMPIExecutor
    exctr = BalsamMPIExecutor({{ balsam_exctr_args }})  # Use allow_oversubscribe=False to prevent oversubscription
else:
    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor({{ mpi_exctr_args }})  # Use allow_oversubscribe=False to prevent oversubscription
exctr.register_calc(full_path=sim_app, calc_type='sim')

# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.

# State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {'sim_f': run_forces,         # Function whose output is being minimized
             'in': ['x'],                 # Name of input for sim_f
             'out': [('energy', float)],  # Name, type of output from sim_f
             'user': {'keys': ['seed'],
                      {%- if cores is defined %} 'cores': {{ cores }}, {% endif %}
                      'sim_particles': {{ num_sim_particles }},
                      'sim_timesteps': 5,
                      'sim_kill_minutes': 10.0,
                      'particle_variance': 0.2,
                      'kill_rate': 0.5,
        f.flush()
        os.fsync(f)
if comms == 'mpi':
    libE_specs['comm'].Barrier()

# Mock up system
customizer = {
    'mpi_runner': 'mpich',  # Select runner: mpich, openmpi, aprun, srun, jsrun
    'runner_name': 'mpirun',  # Runner name: Replaces run command if not None
    'cores_on_node': (16, 64),  # Tuple (physical cores, logical cores)
    'node_file': node_file
}  # Name of file containing a node-list

# Create executor and register sim to it.
exctr = MPIExecutor(zero_resource_workers=in_place,
                    central_mode=True,
                    auto_resources=True,
                    custom_info=customizer)
exctr.register_calc(full_path=sim_app, calc_type='sim')

if nworkers < 2:
    sys.exit(
        "Cannot run with a persistent worker if only one worker -- aborting..."
    )

n = 2
sim_specs = {
    'sim_f': runline_check,
    'in': ['x'],
    'out': [('f', float)],
}
Exemple #11
0
def test_task_funcs():
    dummyappname = os.getcwd() + '/myapp.x'
    exctr = MPIExecutor(auto_resources=False)
    exctr.register_calc(full_path=dummyappname, calc_type='gen', desc='A dummy calc')
    exctr.register_calc(full_path=dummyappname, calc_type='sim', desc='A dummy calc')

    dirname = 'dir_taskc_tests'
    if os.path.exists(dirname):
        shutil.rmtree(dirname)
    os.mkdir(dirname)
    os.chdir(dirname)
    myworkdir = os.getcwd()

    # First try no app - check exception raised?
    jc_triggered = False
    try:
        _ = Task(workdir=myworkdir, stdout='stdout.txt', stderr='stderr.txt')
    except ExecutorException:
        jc_triggered = True

    assert jc_triggered, "Failed to raise exception if create task with no app"

    # Now with no workdir specified
    dummyapp = exctr.gen_default_app
    task1 = Task(app=dummyapp, stdout='stdout.txt', stderr='stderr.txt')
    wd_exist = task1.workdir_exists()
    assert not wd_exist  # , "No workdir specified, yet workdir_exists does not return False"
    stdout_exist = task1.stdout_exists()
    assert not stdout_exist
    f_exist = task1.file_exists_in_workdir('running_output.txt')
    assert not f_exist

    # Create task properly specified
    task2 = Task(app=dummyapp, workdir=myworkdir, stdout='stdout.txt', stderr='stderr.txt')

    # Workdir does exist
    wd_exist = task2.workdir_exists()
    assert wd_exist

    # Files do not exist
    stdout_exist = task2.stdout_exists()
    assert not stdout_exist
    stderr_exist = task2.stderr_exists()
    assert not stderr_exist
    f_exist = task2.file_exists_in_workdir('running_output.txt')
    assert not f_exist

    valerr_triggered = False
    try:
        task2.read_stdout()
    except ValueError:
        valerr_triggered = True
    assert valerr_triggered

    valerr_triggered = False
    try:
        task2.read_file_in_workdir('running_output.txt')
    except ValueError:
        valerr_triggered = True
    assert valerr_triggered

    # Now create files and check positive results
    with open("stdout.txt", "w") as f:
        f.write('This is stdout')
    with open("stderr.txt", "w") as f:
        f.write('This is stderr')
    with open("running_output.txt", "w") as f:
        f.write('This is running output')

    # task2 = Task(app = dummyapp, workdir = myworkdir, stdout = 'stdout.txt')
    # wd_exist = task2.workdir_exists()
    # assert wd_exist
    stdout_exist = task2.stdout_exists()
    assert stdout_exist
    stderr_exist = task2.stderr_exists()
    assert stderr_exist
    f_exist = task2.file_exists_in_workdir('running_output.txt')
    assert f_exist
    assert 'This is stdout' in task2.read_stdout()
    assert 'This is stderr' in task2.read_stderr()
    assert 'This is running output' in task2.read_file_in_workdir('running_output.txt')

    # Check if workdir does not exist
    task2.workdir = task2.workdir + '/bubbles'
    wd_exist = task2.workdir_exists()
    assert not wd_exist

    # Check timing
    assert not task2.submit_time and not task2.runtime and not task2.total_time
    task2.calc_task_timing()
    assert not task2.submit_time and not task2.runtime and not task2.total_time
    task2.submit_time = time.time()
    task2.calc_task_timing()
    assert task2.runtime is not None and task2.runtime == task2.total_time
    save_runtime, save_total_time = task2.runtime, task2.total_time
    task2.calc_task_timing()
    assert save_runtime == task2.runtime
    assert save_total_time == task2.total_time

    # Clean up
    os.chdir('../')
    shutil.rmtree(dirname)
    from libensemble.alloc_funcs.persistent_aposmm_alloc \
        import persistent_aposmm_alloc as alloc_f
else:
    print("you shouldn' hit that")
    sys.exit()

from libensemble.tools import parse_args, save_libE_output, \
    add_unique_random_streams
from libensemble import libE_logger

if USE_BALSAM:
    from libensemble.executors.balsam_executor import BalsamMPIExecutor
    exctr = BalsamMPIExecutor(central_mode=True{% if zero_resource_workers is defined %}, zero_resource_workers=[{{ zero_resource_workers }}]{% endif %})
else:
    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor(central_mode=True{% if zero_resource_workers is defined %}, zero_resource_workers=[{{ zero_resource_workers }}]{% endif %})

libE_logger.set_level('DEBUG')

nworkers, is_master, libE_specs, _ = parse_args()

# Set to full path of warp executable
sim_app = os.path.abspath({{ sim_app }})

# Problem dimension. This is the number of input parameters exposed,
# that LibEnsemble will vary in order to minimize a single output parameter.
n = 4

exctr.register_calc(full_path=sim_app, calc_type='sim')

# State the objective function, its arguments, output, and necessary parameters
Exemple #13
0
#!/usr/bin/env python
import os
import numpy as np
from tutorial_forces_simf import run_forces  # Sim func from current dir

from libensemble.libE import libE
from libensemble.gen_funcs.sampling import uniform_random_sample
from libensemble.tools import parse_args, add_unique_random_streams
from libensemble.executors.mpi_executor import MPIExecutor

nworkers, is_master, libE_specs, _ = parse_args()  # Convenience function

# Create executor and register sim to it
exctr = MPIExecutor(auto_resources=False)  # Use auto_resources=False to oversubscribe

# Register simulation executable with executor
sim_app = os.path.join(os.getcwd(), 'forces.x')
exctr.register_calc(full_path=sim_app, calc_type='sim')

# State the sim_f, its arguments, output, and parameters (and their sizes)
sim_specs = {'sim_f': run_forces,         # sim_f, imported above
             'in': ['x'],                 # Name of input for sim_f
             'out': [('energy', float)],  # Name, type of output from sim_f
             'user': {'simdir_basename': 'forces',  # User parameters for the sim_f
                      'keys': ['seed'],
                      'cores': 2,
                      'sim_particles': 1e3,
                      'sim_timesteps': 5,
                      'sim_kill_minutes': 10.0,
                      'particle_variance': 0.2,
                      'kill_rate': 0.5}
else:
    print("you shouldn' hit that")
    sys.exit()

libE_logger.set_level('INFO')

nworkers, is_master, libE_specs, _ = parse_args()

# Set to full path of warp executable
sim_app = machine_specs['sim_app']

# Problem dimension. This is the number of input parameters exposed,
# that LibEnsemble will vary in order to minimize a single output parameter.
n = 4

exctr = MPIExecutor(central_mode=True)
exctr.register_calc(full_path=sim_app, calc_type='sim')

# State the objective function, its arguments, output, and necessary parameters
# (and their sizes). Here, the 'user' field is for the user's (in this case,
# the simulation) convenience. Feel free to use it to pass number of nodes,
# number of ranks per note, time limit per simulation etc.
sim_specs = {
    # Function whose output is being minimized. The parallel WarpX run is
    # launched from run_WarpX.
    'sim_f':
    run_warpx,
    # Name of input for sim_f, that LibEnsemble is allowed to modify.
    # May be a 1D array.
    'in': ['x'],
    'out': [
sim_app = os.path.join(os.getcwd(), 'forces.x')

# Normally would be pre-compiled
if not os.path.isfile('forces.x'):
    if os.path.isfile('build_forces.sh'):
        import subprocess
        subprocess.check_call(['./build_forces.sh'])

# Create executor and register sim to it.
if USE_BALSAM:
    from libensemble.executors.balsam_executor import BalsamMPIExecutor
    exctr = BalsamMPIExecutor(
    )  # Use allow_oversubscribe=False to prevent oversubscription
else:
    from libensemble.executors.mpi_executor import MPIExecutor
    exctr = MPIExecutor(
    )  # Use allow_oversubscribe=False to prevent oversubscription
exctr.register_calc(full_path=sim_app, calc_type='sim')

# Note: Attributes such as kill_rate are to control forces tests, this would not be a typical parameter.

# State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {
    'sim_f': run_forces,  # Function whose output is being minimized
    'in': ['x'],  # Name of input for sim_f
    'out': [('energy', float)],  # Name, type of output from sim_f
    'user': {
        'keys': ['seed'],
        'cores': 2,
        'sim_particles': 1e3,
        'sim_timesteps': 5,
        'sim_kill_minutes': 10.0,
Exemple #16
0
# The number of concurrent evaluations of the objective function will be N-1.
# """

# Do not change these lines - they are parsed by run-tests.sh
# TESTSUITE_COMMS: mpi local tcp
# TESTSUITE_NPROCS: 2 4

import numpy as np

# Import libEnsemble items for this test
from libensemble.libE import libE
from libensemble.sim_funcs.comms_testing import float_x1000 as sim_f
from libensemble.gen_funcs.sampling import uniform_random_sample as gen_f
from libensemble.tools import parse_args, save_libE_output, add_unique_random_streams
from libensemble.executors.mpi_executor import MPIExecutor  # Only used to get workerID in float_x1000
exctr = MPIExecutor(auto_resources=False)

nworkers, is_master, libE_specs, _ = parse_args()

array_size = int(1e6)  # Size of large array in sim_specs
rounds = 2  # Number of work units for each worker
sim_max = nworkers * rounds

sim_specs = {
    'sim_f': sim_f,
    'in': ['x'],
    'out': [('arr_vals', float, array_size), ('scal_val', float)]
}

gen_specs = {
    'gen_f': gen_f,