Esempio n. 1
0
def get_pool(mpi=False,threads=1):
   if mpi: # using MPI
      from mpipool import MPIPool
      pool = MPIPool()
      pool.start()
      if not pool.is_master():
         sys.exit(0)
   elif threads>1: # using multiprocessing
      from multiprocessing import Pool
      pool = Pool(processes=threads)
   else:
      raise RuntimeError,"Wrong arguments: either mpi=True or threads>1."
   return pool
Esempio n. 2
0
def get_pool(mpi=False, threads=None, **kwargs):
    """
    Get a pool object to pass to emcee for parallel processing.
    If mpi is False and threads is None, pool is None.

    Parameters
    ----------
    mpi : bool
        Use MPI or not. If specified, ignores the threads kwarg.
    threads : int (optional)
        If mpi is False and threads is specified, use a Python
        multiprocessing pool with the specified number of threads.
    **kwargs
        Any other keyword arguments are passed through to the pool
        initializers.

    """

    if mpi:
        from mpipool import MPIPool

        # Initialize the MPI pool
        pool = MPIPool(**kwargs)

        # Make sure the thread we're running on is the master
        if not pool.is_master():
            pool.wait()
            sys.exit(0)
        logger.debug("Running with MPI...")

    elif threads > 1:
        logger.debug("Running with multiprocessing on {} cores..."
                     .format(threads))
        pool = multiprocessing.Pool(threads, **kwargs)

    else:
        logger.debug("Running serial...")
        pool = SerialPool(**kwargs)

    return pool
Esempio n. 3
0
def main():

    # Initialize the MPI pool
    pool = MPIPool()

    # Make sure only we run map() on the master process
    if not pool.is_master():
        pool.wait()
        sys.exit(0)

    # create some random input data
    x = np.random.uniform(size=10000)
    y = np.random.uniform(size=10000)
    tasks = np.vstack((x,y)).T

    vals = pool.map(worker, tasks)

    pool.close()
Esempio n. 4
0
def get_pool(mpi=False, threads=1):
    if mpi:  # using MPI
        from mpipool import MPIPool
        pool = MPIPool()
        pool.start()
        if not pool.is_master():
            sys.exit(0)
    elif threads > 1:  # using multiprocessing
        from multiprocessing import Pool
        pool = Pool(processes=threads)
    else:
        raise RuntimeError, "Wrong arguments: either mpi=True or threads>1."
    return pool
Esempio n. 5
0
def get_pool(mpi=False, threads=1):
    """
    Create a thread pool for paralleling DEM simulations within GrainLearning

    :param mpi: bool, default=False

    :param threads: int, default=1
    """
    if mpi:  # using MPI
        from mpipool import MPIPool
        pool = MPIPool()
        pool.start()
        if not pool.is_master():
            sys.exit(0)
    elif threads > 1:  # using multiprocessing
        from multiprocessing import Pool
        pool = Pool(processes=threads, maxtasksperchild=10)
    else:
        raise RuntimeError("Wrong arguments: either mpi=True or threads>1.")
    return pool
Esempio n. 6
0
def get_pool(mpi=False, threads=None, **kwargs):
    """ Get a pool object to pass to emcee for parallel processing.
        If mpi is False and threads is None, pool is None.

        Parameters
        ----------
        mpi : bool
            Use MPI or not. If specified, ignores the threads kwarg.
        threads : int (optional)
            If mpi is False and threads is specified, use a Python
            multiprocessing pool with the specified number of threads.
        **kwargs
            Any other keyword arguments are passed through to the pool
            initializers.
    """

    if mpi:
        from mpipool import MPIPool

        # Initialize the MPI pool
        pool = MPIPool(**kwargs)

        # Make sure the thread we're running on is the master
        if not pool.is_master():
            pool.wait()
            sys.exit(0)
        logger.debug("Running with MPI...")

    elif threads is not None and threads > 1:
        logger.debug(
            "Running with multiprocessing on {} cores...".format(threads))
        pool = multiprocessing.Pool(threads, **kwargs)

    else:
        logger.debug("Running serial...")
        pool = SerialPool(**kwargs)

    return pool
Esempio n. 7
0
# mpipool_demo.py

import sys
import numpy as np
from mpipool import MPIPool


# define the function that will be applied to tasks
def worker(task):
    x, y = task
    return x**2 + 2 * y


# create the pool
pool = MPIPool()

# only run map() on the master process, all other processes wait for their work
if not pool.is_master():
    pool.wait()
    # worker processes exit after they have done their work
    sys.exit(0)

# the following code is executed by the master process only
# create some random input data
x = np.random.uniform(size=10)
y = np.random.uniform(size=10)
tasks = list(zip(x, y))


# crate a callback function
def cb(x):
Esempio n. 8
0
# mpipool_demo1.py

import numpy as np
from mpipool import MPIPool


# define the function that will be applied to tasks
def worker(task):
    x, y = task
    return 5 * x + y**2


with MPIPool() as pool:
    # only run map() on the master process, all other processes wait for their work
    if not pool.is_master():
        pool.wait()
    else:
        # the following code is executed by the master process only
        # create some random input data
        x = np.random.uniform(size=10)
        y = np.random.uniform(size=10)
        tasks = list(zip(x, y))

        # crate a callback function
        def cb(x):
            print x

        # map the function worker to tasks
        # and execute them parallel by processes other than the master
        results = pool.map(worker, tasks, callback=cb)
    output = stats_wrapper(dataset1, dataset2, statistics=stats)[0]

    for stat in output:
        stats_dict[stat].append([output[stat], os.path.basename(name1),
                                 os.path.basename(name2)])


print("Starting pool at {}".format(datetime.now()))

if multiprocess:

    use_mpi = False
    if use_mpi:
        from mpipool import MPIPool
        pool = MPIPool(loadbalance=False)

        if not pool.is_master():
            # Wait for instructions from the master process.
            pool.wait()
            sys.exit(0)
    else:
        from multiprocessing import cpu_count, Pool
        psize = cpu_count()
        print("Found {} CPUs to run on.".format(psize))
        pool = Pool(processes=psize)

print("Pool created at {}".format(datetime.now()))

# Run distance between comparisons
Esempio n. 10
0
    def lnposterior(self, x):
        return multivariate_normal.logpdf(x, mean=self.mean, cov=self.cov)

    def __call__(self, x):
        return self.lnposterior(x)

ndim = 3
A = np.random.rand(ndim, ndim)
mean = np.zeros(ndim)
cov = A*A.T + ndim*np.eye(ndim)

# create an ND Gaussian model
model = Model(mean, cov)

# define an MPI pool
pool = MPIPool()

# # Make sure the thread we're running on is the master
if not pool.is_master():
    pool.wait()
    sys.exit(0)

nwalkers = 500
sampler = kombine.Sampler(nwalkers, ndim, model, pool=pool)

p0 = np.random.uniform(-10, 10, size=(nwalkers, ndim))
p, post, q = sampler.burnin(p0)
p, post, q = sampler.run_mcmc(100)

# close the MPI poll
pool.close()
Esempio n. 11
0
    if output_dir[-1] != "/":
        output_dir += "/"

    save_name = sys.argv[7]

    # Load the list of complete cubes in

    obs_cubes = [obs_dir + f for f in os.listdir(obs_dir) if f[-4:] == 'fits']

    # sim_dir = "/Volumes/RAIDers_of_the_lost_ark/SimSuite8/"

    # Toggle the pool on here

    if multiproc == "MPI":
        from mpipool import MPIPool
        pool = MPIPool(loadbalance=False)

        if not pool.is_master():
            # Wait for instructions from the master process.
            pool.wait()
            sys.exit(0)
    elif multiproc == "noMPI":
        from multiprocessing import Pool
        # pool = Pool(processes=12)
        pool = Pool(processes=4, maxtasksperchild=100)
    else:
        pool = None

    # Do the actual comparisons

    if comparison == "Obs_to_Fid":
Esempio n. 12
0
        return multivariate_normal.logpdf(x, mean=self.mean, cov=self.cov)

    def __call__(self, x):
        return self.lnposterior(x)


ndim = 3
A = np.random.rand(ndim, ndim)
mean = np.zeros(ndim)
cov = A * A.T + ndim * np.eye(ndim)

# create an ND Gaussian model
model = Model(mean, cov)

# define an MPI pool
pool = MPIPool()

# # Make sure the thread we're running on is the master
if not pool.is_master():
    pool.wait()
    sys.exit(0)

nwalkers = 500
sampler = kombine.Sampler(nwalkers, ndim, model, pool=pool)

p0 = np.random.uniform(-10, 10, size=(nwalkers, ndim))
p, post, q = sampler.burnin(p0)
p, post, q = sampler.run_mcmc(100)

# close the MPI poll
pool.close()