Esempio n. 1
0
def choose_pool(mpi=False, processes=1, **kwargs):
    """
    Extends the capabilities of the schwimmbad.choose_pool method.

    It handles the `use_dill` parameters in kwargs, that would otherwise raise an error when processes > 1.
    Any thread in the returned multiprocessing pool (e.g. processes > 1) also default

    The requirement of schwimmbad relies on the master branch (as specified in requirements.txt).
    The 'use_dill' functionality can raise if not following the requirement specified.

    Choose between the different pools given options from, e.g., argparse.

    Parameters
    ----------
    mpi : bool, optional
        Use the MPI processing pool, :class:`~schwimmbad.mpi.MPIPool`. By
        default, ``False``, will use the :class:`~schwimmbad.serial.SerialPool`.
    processes : int, optional
        Use the multiprocessing pool,
        :class:`~schwimmbad.multiprocessing.MultiPool`, with this number of
        processes. By default, ``processes=1``, will use them:class:`~schwimmbad.serial.SerialPool`.

    Any additional kwargs are passed in to the pool class initializer selected by the arguments.

    """
    # Imports moved here to avoid crashing at import time if dependencies
    # are missing
    from lenstronomy.Sampling.Pool.multiprocessing import MultiPool
    from schwimmbad.serial import SerialPool
    from schwimmbad.mpi import MPIPool

    if mpi:
        if not MPIPool.enabled():
            raise SystemError("Tried to run with MPI but MPIPool not enabled.")
        try:
            pool = MPIPool(**kwargs)
        except:
            raise ImportError(
                'MPIPool of schwimmbad can not be generated. lenstronomy uses a specific branch of '
                'schwimmbad specified in the requirements.txt. Make sure you are using the correct '
                'version of schwimmbad. In particular the "use_dill" argument is not supported in the '
                'pypi version 0.3.0.')
        if not pool.is_master():
            pool.wait()
            sys.exit(0)

        log.info("Running with MPI on {0} cores".format(pool.size))
        return pool

    elif processes != 1 and MultiPool.enabled():
        if 'use_dill' in kwargs:
            # schwimmbad MultiPool does not support dill so we remove this option from the kwargs
            _ = kwargs.pop('use_dill')
        log.info("Running with MultiPool on {0} cores".format(processes))
        return MultiPool(processes=processes, **kwargs)

    else:
        log.info("Running with SerialPool")
        return SerialPool(**kwargs)
Esempio n. 2
0
def test_mpi():
    with MPIPool() as pool:
        all_tasks = [[random.random() for i in range(1000)]]

        # test map alone
        for tasks in all_tasks:
            results = pool.map(_function, tasks)
            for r1, r2 in zip(results, [_function(x) for x in tasks]):
                assert isclose(r1, r2)

            assert len(results) == len(tasks)

        # test map with callback
        for tasks in all_tasks:
            results = pool.map(_function, tasks, callback=_callback)
            for r1, r2 in zip(results, [_function(x) for x in tasks]):
                assert isclose(r1, r2)

            assert len(results) == len(tasks)
Esempio n. 3
0
def test_mpi_with_dill():
    pool = MPIPool(use_dill=True)

    pool.wait(lambda: sys.exit(0))

    all_tasks = [[random.random() for i in range(1000)]]

    # test map alone
    for tasks in all_tasks:
        results = pool.map(_function, tasks)
        for r1, r2 in zip(results, [_function(x) for x in tasks]):
            assert isclose(r1, r2)

        assert len(results) == len(tasks)

    # test map with callback
    for tasks in all_tasks:
        results = pool.map(_function, tasks, callback=_callback)
        for r1, r2 in zip(results, [_function(x) for x in tasks]):
            assert isclose(r1, r2)

        assert len(results) == len(tasks)

    pool.close()
Esempio n. 4
0
"""

# Standard library
import random
import sys

# Project
from schwimmbad.mpi import MPIPool, MPI
from schwimmbad.tests.test_pools import isclose, _function

def callback(x):
    assert MPI.COMM_WORLD.Get_rank() == 0

if MPI is not None and MPI.COMM_WORLD.Get_size() > 1:

    pool = MPIPool()

    pool.wait(lambda: sys.exit(0))

    all_tasks = [[random.random() for i in range(1000)]]

    # test map alone
    for tasks in all_tasks:
        results = pool.map(_function, tasks)
        for r1,r2 in zip(results, [_function(x) for x in tasks]):
            assert isclose(r1, r2)

    # test map with callback
    for tasks in all_tasks:
        results = pool.map(_function, tasks, callback=callback)
        for r1,r2 in zip(results, [_function(x) for x in tasks]):
Esempio n. 5
0
def test_mpi_worker_error():
    with MPIPool() as pool:
        tasks = [random.random() for i in range(1000)]
        pool.map(worker_error, tasks)  # should fail
Esempio n. 6
0
pars.set_matter_power(redshifts=[0.], kmax=200.0)
pars.NonLinearModel.set_params(halofit_version='takahashi')
camb.set_feedback_level(level=100)
results = camb.get_results(pars)

if shellnums is None:
    shellnum_min = int(
        results.comoving_radial_distance(zmin) * h // shellwidth)
    shellnum_max = int(
        results.comoving_radial_distance(zmax) * h // shellwidth + 1)
    shellnums = list(range(shellnum_min, shellnum_max + 1))
else:
    shellnums = list(map(int, shellnums.split(",")))

try:
    pool = MPIPool()
except:
    pool = None

if is_cutsky:
    args = zip(product([1, 2, 3], shellnums), repeat(snapshot_cutsky))
else:
    args = zip(product([1, 2, 3], shellnums), repeat(None))

if pool is not None:
    if not pool.is_master():
        pool.wait()
        sys.exit(0)
    pool.map(generate_lightcone_shell, args)
else:
    map(generate_lightcone_shell, args)
pars.set_matter_power(redshifts=[0.], kmax=200.0)
pars.NonLinearModel.set_params(halofit_version='takahashi')
camb.set_feedback_level(level=100)
results = camb.get_results(pars)

if shellnums is None:
    shellnum_min = int(
        results.comoving_radial_distance(zmin) * h // shellwidth)
    shellnum_max = int(
        results.comoving_radial_distance(zmax) * h // shellwidth + 1)
    shellnums = list(range(shellnum_min, shellnum_max + 1))
else:
    shellnums = list(map(int, shellnums.split(",")))

try:
    pool = MPIPool()
except:
    pool = None

### Loop over galtypes and shells for galaxies and randoms
args = product(shellnums, nz_pars, [nrandoms], [dir_out],
               [lightcone_name_template], [output_name_template],
               [random_name_template])
if pool is not None:
    if not pool.is_master():
        pool.wait()
        sys.exit(0)
    else:
        pool.map(process_shell, args)
else:
    map(process_shell, args)