def __init__(self, mpi_comm=None, ranks_per_partition=None):

        # check ranks_per_partition
        if ranks_per_partition is not None:
            if not hoomd.version.mpi_enabled:
                raise RuntimeError(
                    "The ranks_per_partition option is only available in MPI.\n"
                )

        mpi_available = hoomd.version.mpi_enabled

        self.cpp_mpi_conf = None

        # create the specified configuration
        if mpi_comm is None:
            self.cpp_mpi_conf = _hoomd.MPIConfiguration()
        else:
            if not mpi_available:
                raise RuntimeError(
                    "mpi_comm is not supported in serial builds")

            handled = False

            # pass in pointer to MPI_Comm object provided by mpi4py
            try:
                import mpi4py
                if isinstance(mpi_comm, mpi4py.MPI.Comm):
                    addr = mpi4py.MPI._addressof(mpi_comm)
                    self.cpp_mpi_conf = \
                        _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
                    handled = True
            except ImportError:
                # silently ignore when mpi4py is missing
                pass

            # undocumented case: handle plain integers as pointers to MPI_Comm
            # objects
            if not handled and isinstance(mpi_comm, int):
                self.cpp_mpi_conf = \
                    _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
                handled = True

            if not handled:
                raise RuntimeError(
                    "Invalid mpi_comm object: {}".format(mpi_comm))

        if ranks_per_partition is not None:
            # check validity
            if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
                raise RuntimeError(
                    'Total number of ranks is not a multiple of '
                    'ranks_per_partition.')

            # split the communicator into partitions
            self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
Example #2
0
def _create_mpi_conf(mpi_comm, options):
    global mpi_conf

    # use a cached MPI configuration if available
    if mpi_conf is not None:
        return mpi_conf

    mpi_available = _hoomd.is_MPI_available();

    # create the specified configuration
    if mpi_comm is None:
        mpi_conf = _hoomd.MPIConfiguration();
    else:
        if not mpi_available:
            raise RuntimeError("mpi_comm is not supported in serial builds");

        handled = False;

        # pass in pointer to MPI_Comm object provided by mpi4py
        try:
            import mpi4py
            if isinstance(mpi_comm, mpi4py.MPI.Comm):
                addr = mpi4py.MPI._addressof(mpi_comm);
                mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr);
                handled = True
        except ImportError:
            # silently ignore when mpi4py is missing
            pass

        # undocumented case: handle plain integers as pointers to MPI_Comm objects
        if not handled and isinstance(mpi_comm, int):
            mpi_conf = _hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm);
            handled = True

        if not handled:
            raise RuntimeError("Invalid mpi_comm object: {}".format(mpi_comm));

    if options.nrank is not None:
        # check validity
        nrank = options.nrank
        if (mpi_conf.getNRanksGlobal() % nrank):
            raise RuntimeError('Total number of ranks is not a multiple of --nrank');

        # split the communicator into partitions
        mpi_conf.splitPartitions(nrank)

    return mpi_conf