コード例 #1
0
ファイル: init.py プロジェクト: yomori/halo2fluxmap3
def initialize(parameterfile):

    params = getparameters(parameterfile)

    if params.serial: mpi4py.rc.initialize = False
    from mpi4py import MPI

    if MPI.Is_initialized():
        params.comm = MPI.COMM_WORLD
        params.rank = params.comm.Get_rank()
        params.size = params.comm.Get_size()
        params.parallel = True
    else:
        params.rank = 0
        params.size = 1
        params.parallel = False

    fmt = '%H:%M:%S on %m/%d/%Y'
    timestamp = datetime.datetime.now().strftime(fmt)

    if (params.rank == 0):
        print('')
        bar = 72 * '-'
        print(bar)
        print('Running on', params.size, 'processor(s)')
        print('Time:      ' + timestamp)
        print('Directory: ' + os.getcwd())
        print(bar)
        print('')
コード例 #2
0
ファイル: pflee.py プロジェクト: sannedonker/CSS_Group2
 def __init__(self):
     if not MPI.Is_initialized():
         print("Manual MPI_Init performed.")
         MPI.Init()
     self.comm = MPI.COMM_WORLD
     self.rank = self.comm.Get_rank()
     self.size = self.comm.Get_size()
コード例 #3
0
ファイル: __init__.py プロジェクト: tobiasleibner/pymor
def _init_mpi():
    """provides a way to manually set the thread init mode for MPI if necessary.
    Needs to happen as early as possible, otherwise mpi4py might auto-init somewhere else.
    """
    try:
        import mpi4py
    except ImportError:
        return
    # only change finalize setting if unset
    finalize = (mpi4py.rc.finalize is None) or mpi4py.rc.finalize
    mpi4py.rc(initialize=False, finalize=finalize)
    from mpi4py import MPI
    if not MPI.Is_initialized():
        required_level = int(
            os.environ.get('PYMOR_MPI_INIT_THREAD', MPI.THREAD_MULTIPLE))
        supported_lvl = MPI.Init_thread(required_level)
        if supported_lvl < required_level:
            print(
                f'MPI does support threading level {required_level}, running with {supported_lvl} instead',
                flush=True)
    try:
        # this solves sporadic mpi calls happening after finalize
        import petsc4py
        petsc4py.init()
    except ImportError:
        return
コード例 #4
0
ファイル: mpi_helper.py プロジェクト: vibhatha/deep500
    def __enter__(self):
        try:
            # Import MPI4Py without initializing it
            import mpi4py.rc
            mpi4py.rc.initialize = False
            from mpi4py import MPI
            if MPI.Is_initialized():
                return "child"
        except:
            pass

        n = self._n
        if n <= 1:
            return "child"
        if os.getenv('D500_IN_MPI') is None:
            env = os.environ.copy()
            env.update(D500_IN_MPI="1")
            # n = 2
            args = ['mpiexec', '-np', str(n)]
            args += [sys.executable] + sys.argv
            print(' '.join(args))
            subprocess.check_call(args, env=env)
            return "parent"
        else:
            return "child"
コード例 #5
0
def start_mpi(block_nonroot_stdout=True):
    """
    Check if MPI has already been initialized. If so, just set the communicators,
    Npus, and rank variables.

    Parameters
    ----------

    block_nonroot_stdout : bool (True)
        Redirect stdout on nonzero ranks to /dev/null, for cleaner output.

    """
    global world_comm, node_comm, rank_comm, rank, Npus
    if not MPI.Is_initialized():
        MPI.Init_thread(MPI.THREAD_MULTIPLE)
        atexit.register(MPI.Finalize)
    world_comm = MPI.COMM_WORLD
    node_comm = world_comm.Split_type(MPI.COMM_TYPE_SHARED)
    rank_comm = world_comm.Split(color=node_comm.rank)

    Npus = world_comm.Get_size()
    rank = world_comm.Get_rank()
    set_mpi_excepthook(world_comm)

    world_comm.Barrier()

    if (not rank == 0) and block_nonroot_stdout:  # pragma: no cover
        # For non-root ranks, do not print to stdout.
        # (Uncovered until we have multi-rank tests)
        sys.stdout = open('/dev/null', 'w')
コード例 #6
0
def report_collective_grid_sizes(ug):
  from mpi4py import MPI
  num_cells = ug.GetNumberOfCells()
  mem_size = ug.GetActualMemorySize()
  if MPI.Is_initialized():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()
    total_cell_count = comm.reduce(num_cells, op=MPI.SUM)
    max_cell_count = comm.reduce(num_cells, op=MPI.MAX)
    min_cell_count = comm.reduce(num_cells, op=MPI.MIN)
    total_mem_used = comm.reduce(mem_size, op=MPI.SUM)
    min_mem_used = comm.reduce(mem_size, op=MPI.MIN)
    max_mem_used = comm.reduce(mem_size, op=MPI.MAX)
    if rank == 0:
      print "Average grid cell count over MPI ranks: {:.1e}"\
        .format(total_cell_count/float(size))
      print "Minimum grid cell count over MPI ranks: {:.1e}"\
        .format(min_cell_count)
      print "Maximum grid cell count over MPI ranks: {:.1e}"\
        .format(max_cell_count)
      print "Average grid memory used over MPI ranks: {} MB"\
        .format((total_mem_used/float(size))/1000.0)
      print "Minimum grid memory used over MPI ranks: {} MB"\
        .format(min_mem_used/1000.0)
      print "Maximum grid memory used over MPI ranks: {} MB"\
        .format(max_mem_used/1000.0)
コード例 #7
0
ファイル: ext_module.py プロジェクト: rahulgovind/pysph
    def __init__(self,
                 src,
                 extension='pyx',
                 root=None,
                 verbose=False,
                 depends=None,
                 extra_inc_dirs=None,
                 extra_compile_args=None,
                 extra_link_args=None):
        """Initialize ExtModule.

        Parameters
        -----------

        src : str : source code.

        ext : str : extension for source code file.
            Do not specify the '.' (defaults to 'pyx').

        root : str: root of directory to store code and modules in.
            If not set it defaults to "~/.cpy/source/<platform-directory>".
            where <platform-directory> is platform specific.

        verbose : Bool : Print messages for convenience.

        depends : list : a list of modules that this extension depends on
            if any of these have an m_time greater than the compiled extension
            module, the extension will be recompiled.

        extra_inc_dirs : list : a list of directories to look for .pxd, .h
            and other files.

        extra_compile_args: list : a list of extra compilation flags.

        extra_link_args: list : a list of extra link flags.
        """
        self._setup_root(root)
        self.code = src
        self.hash = get_md5(src)
        self.extension = extension
        self.name = 'm_{0}'.format(self.hash)
        self._setup_filenames()
        self.verbose = verbose
        self.depends = depends
        self.extra_inc_dirs = extra_inc_dirs if extra_inc_dirs else []
        self.extra_compile_args = (extra_compile_args
                                   if extra_compile_args else [])
        self.extra_link_args = extra_link_args if extra_link_args else []

        if MPI is not None and MPI.Is_initialized():
            self.comm = MPI.COMM_WORLD
            self.rank = self.comm.Get_rank()
            self.num_procs = self.comm.Get_size()
        else:
            self.rank = 0
            self.num_procs = 1

        self.shared_filesystem = False
        self._create_source()
コード例 #8
0
def using_horovod():
    """
    Returns true if the MPI environment is initialized, indicating that
    `hvd.init()` has been called.
    """
    if MPI is not None:
        return MPI.Is_initialized()
    return False
コード例 #9
0
def test_init():
    if not initialized():
        initialize()
    from mpi4py import MPI

    assert initialized() == True
    assert MPI.Is_initialized() == True
    assert finalized() == False
    assert MPI.Is_finalized() == False
コード例 #10
0
ファイル: mpi_helper.py プロジェクト: vibhatha/deep500
def mpi_end_barrier():
    """ Invokes a barrier and finalization if MPI is running, or nothing 
        otherwise. """
    # Import MPI4Py without initializing it
    import mpi4py.rc
    mpi4py.rc.initialize = False
    from mpi4py import MPI
    if MPI.Is_initialized():
        MPI.COMM_WORLD.Barrier()
        MPI.Finalize()
コード例 #11
0
 def __init__(self):
   from mpi4py import MPI
   if MPI.Is_initialized():
     self.comm = MPI.COMM_WORLD
     self.rank = self.comm.Get_rank()
     self.size = self.comm.Get_size()
     self.start_time = time.time()
   else:
     self.size = 1
     self.rank = 0
コード例 #12
0
ファイル: mpi_helper.py プロジェクト: vibhatha/deep500
    def __exit__(self, type, value, traceback):
        try:
            # Import MPI4Py without initializing it
            import mpi4py.rc
            mpi4py.rc.initialize = False
            from mpi4py import MPI

            mpi_init = MPI.Is_initialized()
        except:
            mpi_init = False
        if mpi_init or os.getenv('D500_IN_MPI') is not None:
            MPI.Finalize()
コード例 #13
0
    def __init__(self, shape, dimensions, input_comm=None, topology=None):
        super(Distributor, self).__init__(shape, dimensions)

        if configuration['mpi']:
            # First time we enter here, we make sure MPI is initialized
            if not MPI.Is_initialized():
                MPI.Init()
                global init_by_devito
                init_by_devito = True

            self._input_comm = (input_comm or MPI.COMM_WORLD).Clone()

            # Make sure the cloned communicator will be freed up upon exit
            def cleanup():
                if self._input_comm is not None:
                    self._input_comm.Free()

            atexit.register(cleanup)

            if topology is None:
                # `MPI.Compute_dims` sets the dimension sizes to be as close to each other
                # as possible, using an appropriate divisibility algorithm. Thus, in 3D:
                # * topology[0] >= topology[1] >= topology[2]
                # * topology[0] * topology[1] * topology[2] == self._input_comm.size
                # However, `MPI.Compute_dims` is distro-dependent, so we have to enforce
                # some properties through our own wrapper (e.g., OpenMPI v3 does not
                # guarantee that 9 ranks are arranged into a 3x3 grid when shape=(9, 9))
                self._topology = compute_dims(self._input_comm.size,
                                              len(shape))
            else:
                self._topology = topology

            if self._input_comm is not input_comm:
                # By default, Devito arranges processes into a cartesian topology.
                # MPI works with numbered dimensions and follows the C row-major
                # numbering of the ranks, i.e. in a 2x3 Cartesian topology (0,0)
                # maps to rank 0, (0,1) maps to rank 1, (0,2) maps to rank 2, (1,0)
                # maps to rank 3, and so on.
                self._comm = self._input_comm.Create_cart(self._topology)
            else:
                self._comm = input_comm
        else:
            self._input_comm = None
            self._comm = MPI.COMM_NULL
            self._topology = tuple(1 for _ in range(len(shape)))

        # The domain decomposition
        self._decomposition = [
            Decomposition(np.array_split(range(i), j), c)
            for i, j, c in zip(shape, self.topology, self.mycoords)
        ]
コード例 #14
0
ファイル: ext_module.py プロジェクト: rahulgovind/pysph
    def build(self, force=False):
        """Build source into an extension module.  If force is False
        previously compiled module is returned.
        """
        if not self.shared_filesystem or self.rank == 0:
            with self._lock():
                if force or self.should_recompile():
                    self._message("Compiling code at:", self.src_path)
                    inc_dirs = [numpy.get_include()]
                    inc_dirs.extend(self.extra_inc_dirs)
                    extra_compile_args, extra_link_args = (
                        self._get_extra_args())

                    extension = Extension(
                        name=self.name,
                        sources=[self.src_path],
                        include_dirs=inc_dirs,
                        extra_compile_args=extra_compile_args,
                        extra_link_args=extra_link_args,
                        language="c++")

                    if not hasattr(sys.stdout, 'errors'):
                        # FIXME: This happens when nosetests replaces the
                        # stdout with the a Tee instance.  This Tee instance
                        # does not have errors which breaks the tests so we
                        # disable verbose reporting.
                        script_args = []
                    else:
                        script_args = ['--verbose']
                    try:
                        with CaptureMultipleStreams() as stream:
                            mod = pyxbuild.pyx_to_dll(
                                self.src_path,
                                extension,
                                pyxbuild_dir=self.build_dir,
                                force_rebuild=True,
                                setup_args={'script_args': script_args})
                    except (CompileError, LinkError):
                        hline = "*" * 80
                        print(hline + "\nERROR")
                        print(stream.get_output()[0])
                        print(stream.get_output()[1])
                        msg = "Compilation of code failed, please check "\
                              "error messages above."
                        print(hline + "\n" + msg)
                        sys.exit(1)
                    shutil.copy(mod, self.ext_path)
                else:
                    self._message("Precompiled code from:", self.src_path)
        if MPI is not None and MPI.Is_initialized():
            self.comm.barrier()
コード例 #15
0
ファイル: mpiutil.py プロジェクト: kunal-puri/PyFR
    def onexit():
        if not MPI.Is_initialized() or MPI.Is_finalized():
            return

        # Get the current exception (if any)
        exc = excepthook.exception

        # If we are exiting normally then call MPI_Finalize
        if (MPI.COMM_WORLD.size == 1 or exc is None
                or isinstance(exc, KeyboardInterrupt)
                or (isinstance(exc, SystemExit) and exc.code == 0)):
            MPI.Finalize()
        # Otherwise forcefully abort
        else:
            MPI.COMM_WORLD.Abort(1)
コード例 #16
0
    def __new__(cls):
        """Creates a Serial WorkManager if size is 1.  Otherwise creates a
        single Manager and size-1 Worker.
        """
        log.debug('MPIWorkManager.__new__()')
        assert MPI.Is_initialized()
        assert MPI.Is_thread_main()

        rank = MPI.COMM_WORLD.Get_rank()
        size = MPI.COMM_WORLD.Get_size()

        if size == 1:
            return super().__new__(Serial)
        elif rank == 0:
            return super().__new__(Manager)
        else:
            return super().__new__(Worker)
コード例 #17
0
 def __init__(
     self,
     run_function,
     num_workers: int = None,
     callbacks=None,
     run_function_kwargs=None,
     comm=None,
 ):
     super().__init__(run_function, num_workers, callbacks,
                      run_function_kwargs)
     if not MPI.Is_initialized():
         MPI.Init_thread()
     self.comm = comm if comm else MPI.COMM_WORLD
     self.num_workers = self.comm.Get_size() - 1  # 1 rank is the master
     self.sem = asyncio.Semaphore(self.num_workers)
     logging.info(
         f"Creating MPIPoolExecutor with {self.num_workers} max_workers...")
     self.executor = MPIPoolExecutor(max_workers=self.num_workers)
     logging.info("Creation of MPIPoolExecutor done")
コード例 #18
0
    def exit(self, status=0):
        """Exit the mpi4py processor with the given status.

        @keyword status:    The program exit status.
        @type status:       int
        """

        # Execution on the slave.
        if MPI.COMM_WORLD.rank != 0:
            # Catch sys.exit being called on an executing slave.
            if self.in_main_loop:
                raise Exception('sys.exit unexpectedly called on slave!')

            # Catch sys.exit
            else:
                sys.stderr.write('\n')
                sys.stderr.write('***********************************************\n')
                sys.stderr.write('\n')
                sys.stderr.write('warning sys.exit called before mpi4py main loop\n')
                sys.stderr.write('\n')
                sys.stderr.write('***********************************************\n')
                sys.stderr.write('\n')
                MPI.COMM_WORLD.Abort()

        # Execution on the master.
        else:
            # Slave clean up.
            if MPI.Is_initialized() and not MPI.Is_finalized() and MPI.COMM_WORLD.rank == 0:
                # Send the exit command to all slaves.
                self._broadcast_command(Exit_command())

                # Dump all results.
                self._ditch_all_results()

            # Exit the program with the given status.
            sys.exit(status)
コード例 #19
0
ファイル: fancylogger.py プロジェクト: vsoch/vsc-base
                                                       'never')

APOCALYPTIC = 'APOCALYPTIC'
# register new loglevelname
logging.addLevelName(logging.CRITICAL * 2 + 1, APOCALYPTIC)
# register QUIET, EXCEPTION and FATAL alias
logging._levelNames['EXCEPTION'] = logging.ERROR
logging._levelNames['FATAL'] = logging.CRITICAL
logging._levelNames['QUIET'] = logging.WARNING

# mpi rank support
_MPIRANK = MPIRANK_NO_MPI
if not _env_to_boolean('FANCYLOGGER_IGNORE_MPI4PY'):
    try:
        from mpi4py import MPI
        if MPI.Is_initialized():
            _MPIRANK = str(MPI.COMM_WORLD.Get_rank())
            if MPI.COMM_WORLD.Get_size() > 1:
                # enable mpi rank when mpi is used
                FANCYLOG_FANCYRECORD = True
                DEFAULT_LOGGING_FORMAT = DEFAULT_LOGGING_FORMAT_MPI
    except ImportError:
        pass


class MissingLevelName(KeyError):
    pass


def getLevelInt(level_name):
    """Given a level name, return the int value"""
コード例 #20
0
import os
from mpi4py import rc

assert rc.initialize is True
assert rc.finalize is None
assert rc.thread_level == 'multiple'

os.environ['MPI4PY_RC_INITIALIZE'] = 'false'
os.environ['MPI4PY_RC_FINALIZE'] = 'off'
os.environ['MPI4PY_RC_THREAD_LEVEL'] = 'single'

from mpi4py import MPI
assert not MPI.Is_initialized()
assert not MPI.Is_finalized()

assert rc.initialize is False
assert rc.finalize is False
assert rc.thread_level == 'single'
コード例 #21
0
 def test_initialized_mpi4py(self):
     # test mpi initialization (automatically when including mpi4py: https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html)
     self.assertTrue(mpi.Is_initialized())
コード例 #22
0
 def testIsInitialized(self):
     flag = MPI.Is_initialized()
     self.assertTrue(type(flag) is bool)
     self.assertTrue(flag)
コード例 #23
0
ファイル: test_workflow.py プロジェクト: wapsyed/gmxapi
import os
import gmx.util
from gmx.util import to_string
from gmx.util import to_utf8

# # Get a test tpr filename
# from gmx.data import tpr_filename

# These tests will need to be updated when the workspec schema changes. These features appear in
# release 0.0.4, and schema changes that break these tests warrant a bump in workspec_version.
workspec_version = "gmxapi_workspec_0_1"

try:
    from mpi4py import MPI
    withmpi_only = pytest.mark.skipif(
        not MPI.Is_initialized() or MPI.COMM_WORLD.Get_size() >= 2,
        reason=
        "Test requires at least 2 MPI ranks, but MPI is not initialized or too small."
    )
except ImportError:
    withmpi_only = pytest.mark.skip(
        reason=
        "Test requires at least 2 MPI ranks, but mpi4py is not available.")

# Some constants for this test module
file1 = "a.tpr"
file2 = "b.tpr"


class WorkSpecApiLevelTestCase(unittest.TestCase):
    """Make sure the tests match the module."""
コード例 #24
0
ファイル: testsupport.py プロジェクト: yingli2009/gromacs
import os
import shutil
import tempfile
import warnings
from contextlib import contextmanager
from enum import Enum
from typing import Union

import pytest

mpi_status = 'Test requires mpi4py managing 2 MPI ranks.'
skip_mpi = False
try:
    from mpi4py import MPI

    if not MPI.Is_initialized():
        skip_mpi = True
        mpi_status += ' MPI is not initialized'
    elif MPI.COMM_WORLD.Get_size() < 2:
        skip_mpi = True
        mpi_status += ' MPI context is too small.'
except ImportError:
    skip_mpi = True
    mpi_status += ' mpi4py is not available.'


def pytest_configure(config):
    config.addinivalue_line(
        "markers", "withmpi_only: test requires mpi4py managing 2 MPI ranks.")

コード例 #25
0
try:
    import gmxapi as gmx
    from gmxapi.simulation.context import Context as _context
    from gmxapi.simulation.workflow import WorkElement, from_tpr
    from gmxapi.version import api_is_at_least
except (ImportError, ModuleNotFoundError):
    import gmx
    from gmx import get_context as _context
    from gmx.version import api_is_at_least
    from gmx.workflow import from_tpr, WorkElement

nompi = lambda f: f
try:
    from mpi4py import MPI
    if MPI.Is_initialized():
        rank = MPI.COMM_WORLD.Get_rank()
        if MPI.COMM_WORLD.Get_size() > 1:
            nompi = pytest.mark.skip(
                reason='Test cannot run in a multirank MPI environment.')
    else:
        rank = 0

    # Get a fixture for tests that should only run with 2 MPI ranks.
    withmpi_only = pytest.mark.skipif(
        not MPI.Is_initialized() or MPI.COMM_WORLD.Get_size() < 2,
        reason=
        "Test requires at least 2 MPI ranks, but MPI is not initialized or too small."
    )
except (ImportError, ModuleNotFoundError):
    withmpi_only = pytest.mark.skip(
コード例 #26
0
 def cleanup():
     if MPI.Is_initialized():
         MPI.Finalize()
コード例 #27
0
 def cleanup():
     global init_by_devito
     if init_by_devito and MPI.Is_initialized() and not MPI.Is_finalized():
         MPI.Finalize()
コード例 #28
0
ファイル: mdi.py プロジェクト: MolSSI-MDI/MDI_Library
def MDI_Init(arg1, arg2 = None):
    global world_comm
    global intra_code_comm
    global use_mpi4py
    global MPI

    # attempt to import mpi4py
    try:
        import mpi4py
        mpi4py.rc.initialize = False
        from mpi4py import MPI
        mpi4py.rc.initialize = True
        if MPI.Is_initialized():
            use_mpi4py = True
    except ImportError:
        pass

    comm = None
    if use_mpi4py:
        comm = MPI.COMM_WORLD

    # if this is a plugin code, get the plugin's MPI communicator
    plugin_mode = MDI_Get_plugin_mode()
    if ( plugin_mode == 1  and use_mpi4py ):
        # Get a pointer to the C MPI communicator
        python_plugin_mpi_world_ptr = MDI_Get_python_plugin_mpi_world_ptr()

        # Convert the C MPI communicator to an MPI4Py communicator
        c_mpi_communicator = ctypes.cast(python_plugin_mpi_world_ptr, ctypes.POINTER(ctypes.c_void_p)).contents.value
        handle_t = ctypes.c_void_p
        newobj = type(MPI.COMM_WORLD)()
        handle_new = handle_t.from_address(MPI._addressof(newobj))
        handle_new.value = c_mpi_communicator
        __mdi_plugin_mpi_intra_comm__ = newobj

        # Confirm that the new MPI communicator works
        __mdi_plugin_mpi_intra_comm__.Get_size()

        comm = __mdi_plugin_mpi_intra_comm__


    # prepend the _language option, so that MDI knows this is a Python code
    arg1 = "_language Python " + arg1

    # determine the communication method
    args = arg1.split()
    mdi_method = None
    for i in range(len(args)):
        if args[i] == "-method" and i < len(args) - 1:
            mdi_method = args[i+1]
    if not mdi_method:
        raise Exception("MDI Error: Unable to find -method option")

    if mdi_method == "MPI":
        # ensure that mpi4py is available
        #if not use_mpi4py:
        #    raise Exception("MDI Error: When using the MPI communication method, mpi4py must be available")

        # ensure that numpy is available
        if use_mpi4py:
            if not found_numpy:
                raise Exception("MDI Error: When using the MPI communication method, numpy must be available")

    if use_mpi4py:
        world_comm = comm
        intra_code_comm = comm

        # send basic information about the MPI communicator to the MDI libarary
        mpi_rank = comm.Get_rank()
        mpi_world_size = comm.Get_size()
        mdi.MDI_Set_World_Rank(mpi_rank)
        mdi.MDI_Set_World_Size(mpi_world_size)

    # set the MPI4Py callback functions
    set_mpi4py_recv_callback()
    set_mpi4py_send_callback()
    set_mpi4py_size_callback()
    set_mpi4py_rank_callback()
    set_mpi4py_gather_names_callback()
    set_mpi4py_barrier_callback()
    set_mpi4py_split_callback()

    # call MDI_Init
    command = arg1.encode('utf-8')
    ret = mdi.MDI_Init_with_options(ctypes.c_char_p(command) )
    if ret != 0:
        raise Exception("MDI Error: MDI_Init failed")

    return ret
コード例 #29
0
    def spawn(self, **kwargs):
        """
        Spawn MPI processes for and execute each of the managed targets.

        Parameters
        ----------
        kwargs: dict
                options for the `info` argument in mpi spawn process.
                see https://www.open-mpi.org/doc/v4.0/man3/MPI_Comm_spawn.3.php
        """

        # Typcially MPI must be have intialized before spawning.
        if not MPI.Is_initialized():
            MPI.Init()

        if self._is_parent:
            # Find the path to the mpi_backend.py script (which should be in the
            # same directory as this module:
            parent_dir = os.path.dirname(__file__)
            mpi_backend_path = os.path.join(parent_dir, 'mpi_backend.py')

            # Set spawn option. Due to --oversubscribe, we will use none in binding
            info = Info.Create()
            info.Set('bind_to', "none")

            for k, v in kwargs.items():
                info.Set(k, v)

            # Spawn processes:
            self._intercomm = MPI.COMM_SELF.Spawn(sys.executable,
                                                  args=[mpi_backend_path],
                                                  maxprocs=len(self),
                                                  info=info)

            # First, transmit twiggy logging emitters to spawned processes so
            # that they can configure their logging facilities:
            for i in self._targets:
                self._intercomm.send(twiggy.emitters, i)

            # Next, serialize the routing table ONCE and then transmit it to all
            # of the child nodes:
            try:
                routing_table = self.routing_table
            except:
                routing_table = RoutingTable()
                self.log_warning(
                    'Routing Table is null, using empty routing table.')

            self._intercomm.bcast(routing_table, root=MPI.ROOT)

            # Transmit class to instantiate, globals required by the class, and
            # the constructor arguments; the backend will wait to receive
            # them and then start running the targets on the appropriate nodes.
            req = MPI.Request()
            r_list = []
            for i in self._targets:
                target_globals = all_global_vars(self._targets[i])

                # Serializing atexit with dill appears to fail in virtualenvs
                # sometimes if atexit._exithandlers contains an unserializable function:
                if 'atexit' in target_globals:
                    del target_globals['atexit']
                data = (self._targets[i], target_globals, self._kwargs[i])
                r_list.append(self._intercomm.isend(data, i))

                # Need to clobber data to prevent all_global_vars from
                # including it in its output:
                del data
            req.Waitall(r_list)
コード例 #30
0
ファイル: test_4.py プロジェクト: mpi4py/mpi4py
from mpi4py import rc
rc.finalize = False

from mpi4py import MPI
assert MPI.Is_initialized()
assert not MPI.Is_finalized()

MPI.Finalize()
assert MPI.Is_initialized()
assert MPI.Is_finalized()