def init_mp_pool(reset=False): """Necessary because at import time, cfg might be uninitialized""" global _mp_manager, _mp_pool if _mp_pool and _mp_manager and not reset: return _mp_pool cfg.CONFIG_MODIFIED = False if _mp_pool: _mp_pool.terminate() _mp_pool = None if _mp_manager: cfg.set_manager(None) _mp_manager.shutdown() _mp_manager = None if cfg.PARAMS['use_mp_spawn']: mp = multiprocessing.get_context('spawn') else: mp = multiprocessing _mp_manager = mp.Manager() cfg.set_manager(_mp_manager) cfg_contents = cfg.pack_config() global_lock = _mp_manager.Lock() mpp = cfg.PARAMS['mp_processes'] _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
def init_mp_pool(reset=False): """Necessary because at import time, cfg might be uninitialized""" global _mp_pool if _mp_pool and not reset: return _mp_pool cfg.CONFIG_MODIFIED = False if _mp_pool and reset: _mp_pool.terminate() _mp_pool = None cfg_contents = cfg.pack_config() global_lock = mp.Manager().Lock() mpp = cfg.PARAMS['mp_processes'] if mpp == -1: try: mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE']) log.info('Multiprocessing: using slurm allocated ' 'processors (N={})'.format(mpp)) except KeyError: mpp = mp.cpu_count() log.info('Multiprocessing: using all available ' 'processors (N={})'.format(mpp)) else: log.info('Multiprocessing: using the requested number of ' 'processors (N={})'.format(mpp)) _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
def init_mp_pool(reset=False): """Necessary because at import time, cfg might be uninitialized""" global _mp_pool if _mp_pool and not reset: return _mp_pool cfg.CONFIG_MODIFIED = False if _mp_pool and reset: _mp_pool.terminate() _mp_pool = None if cfg.PARAMS['use_mp_spawn']: mp = multiprocessing.get_context('spawn') else: mp = multiprocessing cfg_contents = cfg.pack_config() global_lock = mp.Manager().Lock() mpp = cfg.PARAMS['mp_processes'] log.workflow('Initializing multiprocessing pool with ' 'N={} processes.'.format(mpp)) _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
def init_mp_pool(reset=False): """Necessary because at import time, cfg might be uninitialized""" global _mp_pool if _mp_pool and not reset: return _mp_pool cfg.CONFIG_MODIFIED = False if _mp_pool and reset: _mp_pool.terminate() _mp_pool = None cfg_contents = cfg.pack_config() global_lock = mp.Manager().Lock() mpp = cfg.PARAMS['mp_processes'] if mpp == -1: try: mpp = int(os.environ['SLURM_JOB_CPUS_PER_NODE']) log.workflow('Multiprocessing: using slurm allocated ' 'processors (N={})'.format(mpp)) except KeyError: mpp = mp.cpu_count() log.workflow('Multiprocessing: using all available ' 'processors (N={})'.format(mpp)) else: log.workflow('Multiprocessing: using the requested number of ' 'processors (N={})'.format(mpp)) _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
def mpi_master_spin_tasks(task, gdirs): comm = OGGM_MPI_COMM cfg_store = cfg.pack_config() msg_list = [gdir for gdir in gdirs if gdir is not None] + ([None] * OGGM_MPI_SIZE) _imprint("Starting MPI task distribution...") comm.bcast((cfg_store, task), root=OGGM_MPI_ROOT) status = MPI.Status() for msg in msg_list: comm.recv(source=MPI.ANY_SOURCE, status=status) comm.send(obj=msg, dest=status.Get_source()) _imprint("MPI task distribution done, collecting results...") comm.gather(sendobj=None, root=OGGM_MPI_ROOT) _imprint("MPI task results gotten!")
def _init_pool(): """Necessary because at import time, cfg might be uninitialized""" global _mp_pool if _mp_pool: return _mp_pool cfg_contents = cfg.pack_config() global_lock = mp.Manager().Lock() mpp = cfg.PARAMS['mp_processes'] if mpp == -1: mpp = mp.cpu_count() log.info('Multiprocessing: using all available ' 'processors (N={})'.format(mp.cpu_count())) else: log.info('Multiprocessing: using the requested number of ' 'processors (N={})'.format(mpp)) _mp_pool = mp.Pool(mpp, initializer=_init_pool_globals, initargs=(cfg_contents, global_lock)) return _mp_pool
def mpi_master_spin_tasks(task, gdirs): comm = OGGM_MPI_COMM cfg_store = cfg.pack_config() msg_list = [gdir for gdir in gdirs if gdir is not None ] + ([None] * OGGM_MPI_SIZE) _imprint("Starting MPI task distribution...") comm.bcast((cfg_store, task), root=OGGM_MPI_ROOT) status = MPI.Status() for msg in msg_list: comm.recv(source=MPI.ANY_SOURCE, status=status) comm.send(obj=msg, dest=status.Get_source()) _imprint("MPI task distribution done, collecting results...") comm.gather(sendobj=None, root=OGGM_MPI_ROOT) _imprint("MPI task results gotten!")
def _init_pool(): """Necessary because at import time, cfg might be unitialized""" cfg_contents = cfg.pack_config() return mp.Pool(cfg.PARAMS['mp_processes'], initializer=_init_pool_globals, initargs=(cfg_contents, ))
def _init_pool(): """Necessary because at import time, cfg might be unitialized""" cfg_contents = cfg.pack_config() return mp.Pool(cfg.PARAMS['mp_processes'], initializer=_init_pool_globals, initargs=(cfg_contents,))