def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will default to 1. If source is not given, will attempt to minimially use TemporaryFiles. If workdir is not given, will default to scheduler's workdir or $WORKDIR. If scheduler is not given, will default to only run on the current node. If timeout is not given, will default to scheduler's timelimit or INF. For more details, see the docstrings for the "map" method, or the man page for the associated launcher (e.g mpirun, mpiexec). """ AbstractWorkerPool.__init__(self, *args, **kwds) self.scheduler = kwds.get('scheduler', None) self.scatter = True #bool(kwds.get('scatter', True)) self.source = bool(kwds.get('source', False)) self.workdir = kwds.get('workdir', None) self.timeout = kwds.get('timeout', None) if self.timeout == None: if self.scheduler: from pyina.tools import isoseconds self.timeout = isoseconds(self.scheduler.timelimit) else: from numpy import inf self.timeout = inf #XXX: better than defaults.timelimit ? elif isinstance(self.timeout, str): from pyina.tools import isoseconds self.timeout = isoseconds(self.timeout) if self.workdir == None: if self.scheduler: self.workdir = self.scheduler.workdir else: self.workdir = os.environ.get('WORKDIR', os.path.curdir) self.workdir = os.path.abspath(self.workdir) return
def amap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) override = True if 'size' in kwds else False elem_size = kwds.pop('size', 2) length = min(len(task) for task in args) args = zip(*args) #XXX: zip iterator ok? or should be list? # submit all jobs, to be collected later with 'get()' tasks = [submit(*task) for task in args] tasks = [ApplyResult(task) for task in tasks] # build a correctly sized results object nodes = self.nodes if self.nodes in ['*','autodetect',None]: _pool = self._serve() nodes = _pool.get_ncpus() #XXX: local workers only? # try to quickly find a small chunksize that gives good results maxsize = 2**62 #XXX: HOPEFULLY, this will never be reached... chunksize = 1 while chunksize < maxsize: chunksize, extra = divmod(length, nodes * elem_size) if override: break # the user *wants* to override this loop if extra >= length: break # we found something that 'works' elem_size = elem_size * 2 if extra: chunksize += 1 m = MapResult((chunksize,length)) # queue the tasks m.queue(*tasks) return m
def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) def imap_unordered(it): """build a unordered map iterator""" it = list(it) while len(it): for i, job in enumerate(it): if job.finished: yield it.pop(i)() break # yield it.pop(0).get() # wait for the first element? # *subprocess* # alternately, loop in a subprocess return #raise StopIteration # submit all jobs, then collect results as they become available return imap_unordered(builtins.map(submit, *args))
def amap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" #print "using", __STATE['server'].get_ncpus(), 'local workers' return __STATE['server'].submit(f, argz, globals=globals()) override = True if kwds.has_key('size') else False elem_size = kwds.pop('size', 2) args = zip(*args) # submit all jobs, to be collected later with 'get()' tasks = [submit(*task) for task in args] tasks = [ApplyResult(task) for task in tasks] # build a correctly sized results object length = len(args) nodes = self.nodes if self.nodes in ['*','autodetect',None]: nodes = __STATE['server'].get_ncpus() #XXX: local workers only? # try to quickly find a small chunksize that gives good results maxsize = 2**62 #XXX: HOPEFULLY, this will never be reached... chunksize = 1 while chunksize < maxsize: chunksize, extra = divmod(length, nodes * elem_size) if override: break # the user *wants* to override this loop if extra >= length: break # we found something that 'works' elem_size = elem_size * 2 if extra: chunksize += 1 m = MapResult((chunksize,length)) # queue the tasks m.queue(*tasks) return m
def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" #print "using", __STATE['server'].get_ncpus(), 'local workers' return __STATE['server'].submit(f, argz, globals=globals()) # submit all jobs, then collect results as they become available return (subproc() for subproc in __builtin__.map(submit, *args))
def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print "using", _pool.get_ncpus(), 'local workers' return _pool.submit(f, argz, globals=globals()) # submit all jobs, then collect results as they become available return (subproc() for subproc in __builtin__.map(submit, *args))
def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) # submit all jobs, then collect results as they become available return (subproc() for subproc in list(builtins.map(submit, *args)))
def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print "using", _pool.get_ncpus(), 'local workers' try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) # submit all jobs, then collect results as they become available return (subproc() for subproc in __builtin__.map(submit, *args))
def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print "using", _pool.get_ncpus(), 'local workers' try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) def imap_unordered(it): """build a unordered map iterator""" while len(it): for i,job in enumerate(it): if job.finished: yield it.pop(i)() break # yield it.pop(0).get() # wait for the first element? # *subprocess* # alternately, loop in a subprocess raise StopIteration # submit all jobs, then collect results as they become available return imap_unordered(__builtin__.map(submit, *args))
def amap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" #print "using", __STATE['server'].get_ncpus(), 'local workers' return __STATE['server'].submit(f, argz, globals=globals()) elem_size = kwds.pop('size', 8) #FIXME: should be size of output type args = zip(*args) # submit all jobs, to be collected later with 'get()' tasks = [submit(*task) for task in args] tasks = [ApplyResult(task) for task in tasks] # build a correctly sized results object length = len(args) nodes = self.nodes if self.nodes in ['*','autodetect',None]: nodes = __STATE['server'].get_ncpus() #XXX: local workers only? chunksize, extra = divmod(length, nodes * elem_size) if extra: chunksize += 1 m = MapResult((chunksize,length)) # queue the tasks m.queue(*tasks) return m
def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() return _pool.map(star(f), zip(*args)) # chunksize
def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) return __STATE['pool'].imap(star(f), zip(*args)) # chunksize
def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) return list(self.imap(f, *args))
def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) return __STATE['threads'].map(star(f), zip(*args)) # chunksize
def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) return __STATE['threads'].imap_unordered(star(f), zip(*args)) # chunksize
def amap(self, f, *args, **kwds): # register a callback ? AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() return _pool.map_async(star(f), zip(*args)) # chunksize
def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) _pool = self._serve() return _pool.imap_unordered(star(f), zip(*args)) # chunksize
def amap(self, f, *args, **kwds): # register a callback ? AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) return __STATE['threads'].map_async(star(f), zip(*args)) # chunksize