def _init_mpi(): """provides a way to manually set the thread init mode for MPI if necessary. Needs to happen as early as possible, otherwise mpi4py might auto-init somewhere else. """ try: import mpi4py except ImportError: return # only change finalize setting if unset finalize = (mpi4py.rc.finalize is None) or mpi4py.rc.finalize mpi4py.rc(initialize=False, finalize=finalize) from mpi4py import MPI if not MPI.Is_initialized(): required_level = int( os.environ.get('PYMOR_MPI_INIT_THREAD', MPI.THREAD_MULTIPLE)) supported_lvl = MPI.Init_thread(required_level) if supported_lvl < required_level: print( f'MPI does support threading level {required_level}, running with {supported_lvl} instead', flush=True) try: # this solves sporadic mpi calls happening after finalize import petsc4py petsc4py.init() except ImportError: return
def test_mp4py_rc(): import mpi4py.rc mpi4py.rc( initialize = True, threads = True, thread_level = 'multiple', finalize = None, fast_reduce = True, recv_mprobe = True, errors = 'exception', ) try: mpi4py.rc(querty=False) except TypeError: pass
def test_mp4py_rc(): import mpi4py.rc mpi4py.rc( initialize = True, threads = True, thread_level = 'multiple', finalize = None, fast_reduce = True, recv_mprobe = True, errors = 'exception', ) try: mpi4py.rc(qwerty=False) except TypeError: pass else: raise RuntimeError
def testBadAttribute(self): error = lambda: mpi4py.rc(ABCXYZ=123456) self.assertRaises(TypeError, error) error = lambda: setattr(mpi4py.rc, 'ABCXYZ', 123456) self.assertRaises(TypeError, error) error = lambda: getattr(mpi4py.rc, 'ABCXYZ') self.assertRaises(AttributeError, error)
import multiprocessing import mpi4py # noqa mpi4py.rc(initialize=False, finalize=False) # noqa from mpi4py import MPI # noqa from mpi4py.futures import MPICommExecutor # noqa import cpmg.data_handlers as h import cpmg.generators as g import cpmg.ranges as r from cpmg.config import CAPACITY import cpmg.config as config from cpmg.parallelism import LEVEL_0, LEVEL_1, LEVEL_2 from cpmg.timer import GlobalTimer from cpmg.exceptions import InvalidChunkSize class ExecutionParameters: def __init__(self, *, record_type, parallelism, chunk_size, **kwargs): self.operation = record_type self.parallelism = parallelism self.chunk_size = chunk_size self.operation_parameters = self.__extract_operation_parameters(kwargs) def __extract_operation_parameters(self, command_line_args): if self.operation == g.SidechainModifier.STRING: operation_parameters = { 'sidechain_key': r.Key(r.WholeRange()), 'connection_key': r.Key(r.WholeRange()) } elif self.operation == g.MonomerGenerator.STRING:
""" Command line interface for DANDI client TODO: - consider placing common option definitions into options.py submodule. pipenv is a nice example although common command definitions are somewhat too cubmersome. yoh thinks he saw a bit more lightweight somewhere. e.g. girder-client """ try: # A trick found on https://github.com/h5py/h5py/issues/1079#issuecomment-567081386 # to avoid some weird behavior on Yarik's laptop where MPI fails to initialize # and that takes h5py additional 5 seconds to import import mpi4py mpi4py.rc(initialize=False) except Exception: pass
def testRC3(self): error = lambda: rc(ABCXYZ=123456) self.assertRaises(TypeError, error)
def testRC1(self): rc(initialize=rc.initialize) rc(threads=rc.threads) rc(thread_level=rc.thread_level) rc(finalize=rc.finalize) rc(fast_reduce=rc.fast_reduce) rc(recv_mprobe=rc.recv_mprobe)
:undoc-members: :members: """ from __future__ import print_function, division, absolute_import __author__ = "W.R.Saunders" __copyright__ = "Copyright 2016, W.R.Saunders" __license__ = "GPL" # system level import mpi4py mpi4py.rc(thread_level='serialized') from mpi4py import MPI import sys, atexit, traceback import ctypes import numpy as np from functools import reduce import os if sys.version_info[0] >= 3: import queue as Queue else: import Queue if not MPI.Is_initialized(): MPI.Init()
def testRC2(self): kwargs = rc.__dict__.copy() rc(**kwargs)
from awrams.calibration.support import * from .logger import CalibrationLoggerProcess from awrams.utils.messaging import Message import numpy as np from collections import OrderedDict from time import sleep import pickle from awrams.utils.messaging.general import get_traceback from .mpi_support import * import signal import mpi4py mpi4py.rc(finalize=True) def sigterm_handler(signal, frame): raise KeyboardInterrupt def run_server(cal_spec): #(optimizer_spec,objective_spec,extent_map,node_alloc,catch_node_map,run_period,node_mapping,model,n_workers,logfile): prerun_action = cal_spec.get('prerun_action') if prerun_action is not None: prerun_action = funcspec_to_callable(prerun_action) prerun_action(cal_spec)
import mpi4py mpi4py.get_include() mpi4py.get_config() import mpi4py.rc mpi4py.rc( initialize=True, threads=True, thread_level='multiple', finalize=None, fast_reduce=True, recv_mprobe=True, errors='exception', ) try: mpi4py.rc(querty=False) except TypeError: pass import mpi4py mpi4py.profile() mpi4py.profile('mpe') mpi4py.profile('mpe', path="/usr/lib") mpi4py.profile('mpe', path=["/usr/lib"]) mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('vt') mpi4py.profile('vt', path="/usr/lib") mpi4py.profile('vt', path=["/usr/lib"]) mpi4py.profile('vt', logfile="mpi4py") mpi4py.profile('vt', logfile="mpi4py")
from cached_property import cached_property import numpy as np from cgen import Struct, Value from devito.data import LEFT, CENTER, RIGHT, Decomposition from devito.parameters import configuration from devito.tools import EnrichedTuple, as_tuple, ctypes_to_cstr, is_integer from devito.types import CompositeObject, Object # Do not prematurely initialize MPI # This allows launching a Devito program from within another Python program # that has *already* initialized MPI try: import mpi4py mpi4py.rc(initialize=False, finalize=False) from mpi4py import MPI # noqa # From the `atexit` documentation: "At normal program termination [...] # all functions registered are in last in, first out order.". So, MPI.Finalize # will be called only at the very end, after all cloned communicators # will have been freed def cleanup(): if MPI.Is_initialized(): MPI.Finalize() atexit.register(cleanup) except ImportError: # Dummy fallback in case mpi4py/MPI aren't available class NoneMetaclass(type): def __getattr__(self, name): return None
import mpi4py mpi4py.get_include() mpi4py.get_config() import mpi4py.rc mpi4py.rc( initialize = True, threads = True, thread_level = 'multiple', finalize = None, fast_reduce = True, recv_mprobe = True, errors = 'exception', ) try: mpi4py.rc(querty=False) except TypeError: pass import mpi4py mpi4py.profile() mpi4py.profile('mpe') mpi4py.profile('mpe', path="/usr/lib") mpi4py.profile('mpe', path=["/usr/lib"]) mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('mpe', logfile="mpi4py") mpi4py.profile('vt') mpi4py.profile('vt', path="/usr/lib") mpi4py.profile('vt', path=["/usr/lib"]) mpi4py.profile('vt', logfile="mpi4py") mpi4py.profile('vt', logfile="mpi4py") try: mpi4py.profile('@querty') except ValueError: pass
def testRC1(self): rc(initialize = rc.initialize) rc(threads = rc.threads) rc(thread_level = rc.thread_level) rc(finalize = rc.finalize) rc(fast_reduce = rc.fast_reduce) rc(recv_mprobe = rc.recv_mprobe)