Exemplo n.º 1
0
    def __init__(self,
                 n=1,
                 mpirun="mpiexec",
                 working_directory=None,
                 options=None):
        """
        `n` - the number of hosts to run on.
        `mpirun` - the path to the mpirun or mpiexec executable. If a full path
                   is not given, the user's PATH will be searched.
        `options` - extra options for SLURM
        `working_directory` - directory in which to run on the hosts
        """
        LaunchMode.__init__(self, working_directory, options)

        class MPI(Executable):
            name = mpirun
            default_executable_name = mpirun

        if os.path.exists(mpirun):  # mpirun is a full path
            mpi_cmd = MPI(path=mpirun)
        else:
            mpi_cmd = MPI(path=None)
        self.mpirun = mpi_cmd.path
        # should warn if mpirun not found
        assert n > 0
        self.n = int(n)
Exemplo n.º 2
0
    def __init__(self,
                 n=1,
                 mpirun="mpiexec",
                 hosts=[],
                 options=None,
                 pfi_path="/usr/local/bin/pfi.py",
                 working_directory=None):
        """
        `n` - the number of hosts to run on.
        `mpirun` - the path to the mpirun or mpiexec executable. If a full path
                 is not given, the user's PATH will be searched.
        `hosts` - a list of host names to run on. **Currently not used.**
        `options` - extra command line options for mpirun/mpiexec
        `pfi_path` - the path to the pfi.py script provided with Sumatra, which
                     should be installed on every node and is used to obtain
                    platform information.
        `working_directory` - directory in which to run on the hosts
        """
        LaunchMode.__init__(self, working_directory, options)

        class MPI(Executable):
            name = mpirun
            default_executable_name = mpirun

        if os.path.exists(mpirun):  # mpirun is a full path
            mpi_cmd = MPI(path=mpirun)
        else:
            mpi_cmd = MPI(path=None)
        self.mpirun = mpi_cmd.path
        # should warn if mpirun not found
        self.hosts = hosts
        self.n = n
        self.mpi_info = {}
        self.pfi_path = pfi_path
Exemplo n.º 3
0
    import _gpaw
    try:
        world = _gpaw.Communicator()
    except AttributeError:
        pass
elif '_asap' in sys.modules:
    import _asap
    try:
        world = _asap.Communicator()
    except AttributeError:
        pass
elif 'mpi4py' in sys.modules:
    world = MPI4PY()

if world is None:
    world = MPI()


def barrier():
    world.barrier()


def broadcast(obj, root=0, comm=world):
    """Broadcast a Python object across an MPI communicator and return it."""
    if comm.rank == root:
        string = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
        n = np.array([len(string)], int)
    else:
        string = None
        n = np.empty(1, int)
    comm.broadcast(n, root)
Exemplo n.º 4
0
"""
This tutorial provides an introduction to MPI in python.

MPI (Message Passing Interface) is a protocol used for parallel computation. 
It is designed to allow multiple CPUs with isolated memory to perform tasks in 
parallel, communicating with each other through 'message passing'. These CPUs 
could be a network of desktop computers or a supercomputer cluster (like Magnus). 
MPI is highly portable and scalable, making it one of the most commonly used 
forms of parallelisation in scientific computing.

You will not need to do much MPI programming to use QuOp_MPI, but understanding 
how QuOp_MPI achieves parallelisation will assist you in defining your quality functions.

MPI has implementations in the  C, Fortran, Java and Python programming languages. 
Here we will be using the python implementation of MPI, 'mpi4py'.
"""

from mpi4py import MPI

"""
MPI programs work by running multiple copies of the same program simultaneously, 
these programs work independently, except when they send messages to each other 
using calls to MPI functions. Each copy of the program is referred to as an MPI 'node', 
and an 'MPI communicator' groups these nodes. In an MPI communicator, the nodes 
are individually identified by a 'rank' number.

The number of nodes is chosen when the MPI program is run, the terminal command:

    mpiexec -N 1 python3 mpi_tutorial.py