示例#1
0
 def __halo_end_exchange(self, dim):
     """End a halo exchange along a given Dimension."""
     for d, i, payload, req in list(self._in_flight):
         if d == dim:
             status = MPI.Status()
             req.Wait(status=status)
             if payload is not None and status.source != MPI.PROC_NULL:
                 # The MPI.Request `req` originated from a `comm.Irecv`
                 # Now need to scatter the data to the right place
                 self._data_in_region(HALO, d, i)[:] = payload
         self._in_flight.remove((d, i, payload, req))
示例#2
0
    def timer_on(self, name, comm=None):
        """
        Measure the execution time of a Python-level code region.

        Parameters
        ----------
        name : str
            A representative string for the timed region.
        comm : MPI communicator, optional
            If provided, the global execution time is derived by a single MPI
            rank, with timers started and stopped right after an MPI barrier.
        """
        if comm and comm is not MPI.COMM_NULL:
            comm.Barrier()
            tic = MPI.Wtime()
            yield
            comm.Barrier()
            toc = MPI.Wtime()
        else:
            tic = seq_time()
            yield
            toc = seq_time()
        self.py_timers[name] = toc - tic
示例#3
0
 def _halo_exchange(self):
     """Perform the halo exchange with the neighboring processes."""
     if not MPI.Is_initialized() or MPI.COMM_WORLD.size == 1:
         # Nothing to do
         return
     if MPI.COMM_WORLD.size > 1 and self._distributor is None:
         raise RuntimeError("`%s` cannot perform a halo exchange as it has "
                            "no Grid attached" % self.name)
     if self._in_flight:
         raise RuntimeError("`%s` cannot initiate a halo exchange as previous "
                            "exchanges are still in flight" % self.name)
     for i in self.space_dimensions:
         self.__halo_begin_exchange(i)
         self.__halo_end_exchange(i)
     self._is_halo_dirty = False
     assert not self._in_flight
示例#4
0
class MPIMsg(CompositeObject):

    _C_field_bufs = 'bufs'
    _C_field_bufg = 'bufg'
    _C_field_sizes = 'sizes'
    _C_field_rrecv = 'rrecv'
    _C_field_rsend = 'rsend'

    if MPI._sizeof(MPI.Request) == sizeof(c_int):
        c_mpirequest_p = type('MPI_Request', (c_int, ), {})
    else:
        c_mpirequest_p = type('MPI_Request', (c_void_p, ), {})

    def __init__(self, name, function, halos, fields=None):
        self._function = function
        self._halos = halos
        fields = (fields or []) + [
            (MPIMsg._C_field_bufs, c_void_p),
            (MPIMsg._C_field_bufg, c_void_p),
            (MPIMsg._C_field_sizes, POINTER(c_int)),
            (MPIMsg._C_field_rrecv, MPIMsg.c_mpirequest_p),
            (MPIMsg._C_field_rsend, MPIMsg.c_mpirequest_p),
        ]
        super(MPIMsg, self).__init__(name, 'msg', fields)

        # Required for buffer allocation/deallocation before/after jumping/returning
        # to/from C-land
        self._allocator = default_allocator()
        self._memfree_args = []

    def __del__(self):
        self._C_memfree()

    def _C_memfree(self):
        # Deallocate the MPI buffers
        for i in self._memfree_args:
            self._allocator.free(*i)
        self._memfree_args[:] = []

    def __value_setup__(self, dtype, value):
        # We eventually produce an array of `struct msg` that is as big as
        # the number of peers we have to communicate with
        return (dtype._type_ * self.npeers)()

    @property
    def function(self):
        return self._function

    @property
    def halos(self):
        return self._halos

    @property
    def npeers(self):
        return len(self._halos)

    def _arg_defaults(self, alias=None):
        function = alias or self.function
        for i, halo in enumerate(self.halos):
            entry = self.value[i]
            # Buffer size for this peer
            shape = []
            for dim, side in zip(*halo):
                try:
                    shape.append(getattr(function._size_owned[dim], side.name))
                except AttributeError:
                    assert side is CENTER
                    shape.append(function._size_domain[dim])
            entry.sizes = (c_int * len(shape))(*shape)
            # Allocate the send/recv buffers
            size = reduce(mul, shape)
            ctype = dtype_to_ctype(function.dtype)
            entry.bufg, bufg_memfree_args = self._allocator._alloc_C_libcall(
                size, ctype)
            entry.bufs, bufs_memfree_args = self._allocator._alloc_C_libcall(
                size, ctype)
            # The `memfree_args` will be used to deallocate the buffer upon returning
            # from C-land
            self._memfree_args.extend([bufg_memfree_args, bufs_memfree_args])

        return {self.name: self.value}

    def _arg_values(self, args=None, **kwargs):
        return self._arg_defaults(
            alias=kwargs.get(self.function.name, self.function))

    def _arg_apply(self, *args, **kwargs):
        self._C_memfree()

    # Pickling support
    _pickle_args = ['name', 'function', 'halos']
示例#5
0
            clear_cache()

            gflopss, oi, timings, _ = self.func(*args, **kwargs)

            for key in timings.keys():
                self.register(gflopss[key], measure="gflopss", event=key.name)
                self.register(oi[key], measure="oi", event=key.name)
                self.register(timings[key], measure="timings", event=key.name)

    return DevitoExecutor(func)


if __name__ == "__main__":
    # If running with MPI, we emit logging messages from rank0 only
    try:
        MPI.Init()  # Devito starts off with MPI disabled!
        set_log_level('DEBUG', comm=MPI.COMM_WORLD)

        if MPI.COMM_WORLD.size > 1 and not configuration['mpi']:
            warning(
                "It seems that you're running over MPI with %d processes, but "
                "DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`..." %
                MPI.COMM_WORLD.size)
            configuration['mpi'] = 'basic'
    except TypeError:
        # MPI not available
        pass

    # Profiling at max level
    configuration['profiling'] = 'advanced'
示例#6
0
    last_res = None
    for params in sweep(kwargs, keys=sweep_options):
        kwargs.update(params)
        _, _, _, res = run(**kwargs)

        if last_res is None:
            last_res = res
        else:
            for i in range(len(res)):
                assert np.isclose(res[i], last_res[i])


if __name__ == "__main__":
    # If running with MPI, we emit logging messages from rank0 only
    try:
        MPI.Init()  # Devito starts off with MPI disabled!
        set_log_level('DEBUG', comm=MPI.COMM_WORLD)

        if MPI.COMM_WORLD.size > 1 and not configuration['mpi']:
            warning(
                "It seems that you're running over MPI with %d processes, but "
                "DEVITO_MPI is unset. Setting `DEVITO_MPI=basic`..." %
                MPI.COMM_WORLD.size)
            configuration['mpi'] = 'basic'
    except TypeError:
        # MPI not available
        pass

    # Benchmarking cannot be done at basic level
    if configuration['profiling'] == 'basic':
        configuration['profiling'] = 'advanced'