Example #1
0
def map(r,func, args=None, modules=None):
	"""
	Before you run parallel.map, start your cluster (e.g. ipcluster start -n 4)
	
	map(r,func, args=None, modules=None):
	args=dict(arg0=arg0,...)
	modules='numpy, scipy'    
	
	examples:
	func= lambda x: numpy.random.rand()**2.
	z=parallel.map(r_[0:1000], func, modules='numpy, numpy.random')
	plot(z)
	
	A=ones((1000,1000));
	l=range(0,1000)
	func=lambda x : A[x,l]**2.
	z=parallel.map(r_[0:1000], func, dict(A=A, l=l))
	z=array(z)
	
	"""
	mec = Client()
	mec.clear()
	lview=mec.load_balanced_view()
	for k in mec.ids:
		mec[k].activate()
		if args is not None:
			mec[k].push(args)
		if modules is not None:
			mec[k].execute('import '+modules)
	z=lview.map(func, r)
	out=z.get()
	return out
Example #2
0
def map(r, func, args=None, modules=None):
    """
	Before you run parallel.map, start your cluster (e.g. ipcluster start -n 4)
	
	map(r,func, args=None, modules=None):
	args=dict(arg0=arg0,...)
	modules='numpy, scipy'    
	
	examples:
	func= lambda x: numpy.random.rand()**2.
	z=parallel.map(r_[0:1000], func, modules='numpy, numpy.random')
	plot(z)
	
	A=ones((1000,1000));
	l=range(0,1000)
	func=lambda x : A[x,l]**2.
	z=parallel.map(r_[0:1000], func, dict(A=A, l=l))
	z=array(z)
	
	"""
    mec = Client()
    mec.clear()
    lview = mec.load_balanced_view()
    for k in mec.ids:
        mec[k].activate()
        if args is not None:
            mec[k].push(args)
        if modules is not None:
            mec[k].execute('import ' + modules)
    z = lview.map(func, r)
    out = z.get()
    return out
Example #3
0
    def __init__(self,mdl):
        """The constructor.
                mdl : model (of class Red3 or Red6)
        """
        IntGen.__init__(self,mdl)
        #find the engine processes
        rc = Client(profile='mpi')
        rc.clear()
        #Create a view of the processes
        self.view = rc[:]

        #number of clients
        nCl = len(rc.ids)

        if mdl.Y.ndim >2:
            #divisors of nCl
            div = [i for i in range(1,nCl+1) if nCl%i==0]
            ldiv = len(div)
            #the surface will be divided into nbx rows and nby columns
            if ldiv %2 == 0:
                self.nbx = div[ldiv/2]
                self.nby = div[ldiv/2-1]
            else:
                self.nbx = self.nby = div[ldiv/2]
        else:
            self.nbx = nCl
            self.nby = 0
Example #4
0
def init(profile="mpi"):
    """Initialize pyDive.

    :param str profile: The name of the cluster profile of *IPython.parallel*. Has to be an MPI-profile.\
        Defaults to 'mpi'.
    """
    # init direct view
    global view

    client = Client(profile=profile)
    client.clear()
    view = client[:]
    view.block = True
    view.execute(
        """\
        import numpy as np
        from mpi4py import MPI
        import h5py as h5
        import os, sys
        import psutil
        import math
        os.environ["onTarget"] = 'True'
        from pyDive import structured
        from pyDive import algorithm
        from pyDive.distribution import interengine
        try:
            import pyDive.arrays.local.h5_ndarray
        except ImportError:
            pass
        try:
            import pyDive.arrays.local.ad_ndarray
        except ImportError:
            pass
        try:
            import pyDive.arrays.local.gpu_ndarray
            import pycuda.autoinit
        except ImportError:
            pass
         """
    )

    # get number of processes per node (ppn)
    def hostname():
        import socket

        return socket.gethostname()

    hostnames = view.apply(interactive(hostname))
    global ppn
    ppn = max(Counter(hostnames).values())

    # mpi ranks
    get_rank = interactive(lambda: MPI.COMM_WORLD.Get_rank())
    all_ranks = view.apply(get_rank)
    view["target2rank"] = all_ranks
Example #5
0
def init(profile='mpi'):
    """Initialize pyDive.

    :param str profile: The name of the cluster profile of *IPython.parallel*. Has to be an MPI-profile.\
        Defaults to 'mpi'.
    """
    # init direct view
    global view

    client = Client(profile=profile)
    client.clear()
    view = client[:]
    view.block = True
    view.execute('''\
        import numpy as np
        from mpi4py import MPI
        import h5py as h5
        import os, sys
        import psutil
        import math
        os.environ["onTarget"] = 'True'
        from pyDive import structured
        from pyDive import algorithm
        from pyDive.distribution import interengine
        try:
            import pyDive.arrays.local.h5_ndarray
        except ImportError:
            pass
        try:
            import pyDive.arrays.local.ad_ndarray
        except ImportError:
            pass
        try:
            import pyDive.arrays.local.gpu_ndarray
            import pycuda.autoinit
        except ImportError:
            pass
         ''')

    # get number of processes per node (ppn)
    def hostname():
        import socket
        return socket.gethostname()
    hostnames = view.apply(interactive(hostname))
    global ppn
    ppn = max(Counter(hostnames).values())

    # mpi ranks
    get_rank = interactive(lambda: MPI.COMM_WORLD.Get_rank())
    all_ranks = view.apply(get_rank)
    view['target2rank'] = all_ranks
def time_scaling(nengines=(1, 2, 4, 8), filename="data.npy", repeats=3):
    """Time FFT times for various cluster sizes."""
    client = Client()

    results = []
    for n in nengines:
        runs = []
        for i in range(repeats):
            client.clear()
            view = client[:n]
            tic = time.time()
            parallel_ffts(view, abspath(filename))
            toc = time.time()
            runs.append(toc - tic)
        results.append(min(runs))

    return results
Example #7
0
#-------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------
from __future__ import print_function

import time

from IPython.parallel import Client

#-------------------------------------------------------------------------
# Setup
#-------------------------------------------------------------------------

mux = Client()[:]

mux.clear()

mux.block = False

ar1 = mux.apply(time.sleep, 5)
ar2 = mux.push(dict(a=10, b=30, c=range(20000), d='The dog went swimming.'))
ar3 = mux.pull(('a', 'b', 'd'), block=False)

print("Try a non-blocking get_result")
ar4 = mux.get_result()

print("Now wait for all the results")
mux.wait([ar1, ar2, ar3, ar4])
print("The last pull got:", ar4.r)
Example #8
0
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
from __future__ import print_function

import time

from IPython.parallel import Client

#-------------------------------------------------------------------------------
# Setup
#-------------------------------------------------------------------------------

mux = Client()[:]

mux.clear()

mux.block = False

ar1 = mux.apply(time.sleep, 5)
ar2 = mux.push(dict(a=10, b=30, c=range(20000), d='The dog went swimming.'))
ar3 = mux.pull(('a', 'b', 'd'), block=False)

print("Try a non-blocking get_result")
ar4 = mux.get_result()

print("Now wait for all the results")
mux.wait([ar1, ar2, ar3, ar4])
print("The last pull got:", ar4.r)
Example #9
0
    "-p",
    "--profile",
    dest="client_profile",
    default="unissh",
    action="store_const",
    help="the profile to use for ipython.parallel",
)
options, args = opt_parser.parse_args()

# START: create remote evaluators and a few (or one) special one for #
# generating new points
logger.info("init")
from IPython.parallel import Client, require

c = Client(profile=options.client_profile)
c.clear()  # clears remote engines
c.purge_results("all")  # all results are memorized in the hub

if len(c.ids) < 2:
    raise Exception("I need at least 2 clients.")
nbGens = min(1, len(c.ids) - 1)
generators = c.load_balanced_view(c.ids[:nbGens])
evaluators = c.load_balanced_view(c.ids[nbGens:])

# MAX number of tasks in total
MAX = 5000
# length of test data, sent over the wire
DIMSIZE = 10
# when adding machines, this is the number of additional tasks
# beyond the number of free machines
new_extra = DIMSIZE