def barrier(tag=0, sleep=0.01): ''' A better mpi barrier The original MPI.comm.barrier() may cause idle processes to still occupy the CPU, while this barrier waits. ''' if SIZE == 1: return mask = 1 while mask < SIZE: dst = (RANK + mask) % SIZE src = (RANK - mask + SIZE) % SIZE req = COMM.isend(None, dst, tag) while not COMM.Iprobe(src, tag): time.sleep(sleep) COMM.recv(None, src, tag) req.Wait() mask <<= 1
def elect(): '''elect() randomly chooses a node from all the nodes as the president. Input: None Output: the rank of the president ''' president = COMM.bcast(np.random.randint(SIZE)) return president
def root_decide(decision): """Returns the root decision.""" return COMM.bcast(decision)
def mpi_all(decision): """the logical all() over all instances """ return all(COMM.allgather(decision))
from mpi4py import MPI COMM = MPI.COMM_WORLD _IS_DUMMY = False except ImportError as error: logging.warning( "Warning: I cannot import mpi4py. Using a dummpy single noded " "implementation instead. The program will run in single node mode " "even if you executed me with mpirun or mpiexec.\n" "\n" "We STRONGLY recommend you to try to install mpi and " "mpi4py.\n") logging.warning("mpi4py exception message is: %s", error) from decaf.util._mpi_dummy import COMM _IS_DUMMY = True RANK = COMM.Get_rank() SIZE = COMM.Get_size() HOST = socket.gethostname() _MPI_PRINT_MESSAGE_TAG = 560710 _MPI_BUFFER_LIMIT = 2**30 # we need to set the random seed different for each mpi instance logging.info('blop.util.mpi: seting different random seeds for each node.') random.seed(time.time() * RANK) def is_dummy(): '''Returns True if this is a dummy version of MPI.''' return _IS_DUMMY