コード例 #1
1
ファイル: test_ctypes.py プロジェクト: benkirk/mpi_playground
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
コード例 #2
0
def ncmpi_open(name):
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Comm.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    errcheck(retval)
    return ncid.value
コード例 #3
0
def main(forest_path, io_size, chunk_size, value_chunk_size):

    comm = MPI.COMM_WORLD
    rank = comm.rank

    if io_size == -1:
        io_size = comm.size
    if rank == 0:
        print('%i ranks have been allocated' % comm.size)
    sys.stdout.flush()

    population = 'GC'
    count = 0
    start_time = time.time()
    for gid, morph_dict in NeurotreeGen(MPI._addressof(comm),
                                        forest_path,
                                        population,
                                        io_size=io_size):
        local_time = time.time()
        # mismatched_section_dict = {}
        synapse_dict = {}
        if gid is not None:
            print('Rank %i gid: %i' % (rank, gid))
            cell = DG_GC(neurotree_dict=morph_dict, gid=gid, full_spines=False)
            # this_mismatched_sections = cell.get_mismatched_neurotree_sections()
            # if this_mismatched_sections is not None:
            #    mismatched_section_dict[gid] = this_mismatched_sections
            synapse_dict[gid] = cell.export_neurotree_synapse_attributes()
            del cell
            print('Rank %i took %i s to compute syn_locs for %s gid: %i' %
                  (rank, time.time() - local_time, population, gid))
            count += 1
        else:
            print('Rank %i gid is None' % rank)
        # print 'Rank %i before append_cell_attributes' % rank
        append_cell_attributes(MPI._addressof(comm),
                               forest_path,
                               population,
                               synapse_dict,
                               namespace='Synapse_Attributes',
                               io_size=io_size,
                               chunk_size=chunk_size,
                               value_chunk_size=value_chunk_size)
        sys.stdout.flush()
        del synapse_dict
        gc.collect()
    # print 'Rank %i completed iterator' % rank

    # len_mismatched_section_dict_fragments = comm.gather(len(mismatched_section_dict), root=0)
    global_count = comm.gather(count, root=0)
    if rank == 0:
        print(
            'target: %s, %i ranks took %i s to compute syn_locs for %i cells' %
            (population, comm.size, time.time() - start_time,
             np.sum(global_count)))
コード例 #4
0
ファイル: test_ctypes.py プロジェクト: benkirk/mpi_playground
 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
コード例 #5
0
ファイル: test_ctypes.py プロジェクト: mpi4py/mpi4py
 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
コード例 #6
0
def ncmpi_open(name):
    if sys.version_info >= (3,0,0):
        name = bytes(name, 'utf-8')
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Info.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    # print("TEST")
    errcheck(retval)
    # print("TEST")
    return ncid.value
コード例 #7
0
ファイル: simpli_cpu.py プロジェクト: vigji/fastpli
    def __init__(self, mpi_comm=None):

        super().__init__()

        # LIBRARIES
        self.__gen = _Generator()
        self.__sim = _Simulator()
        self.mpi = None
        if mpi_comm:
            from mpi4py import MPI
            self.__gen.set_mpi_comm(MPI._addressof(mpi_comm))
            self.__sim.set_mpi_comm(MPI._addressof(mpi_comm))
            self.mpi = _mpi._MPI(mpi_comm)

        # freeze class
        self.__freeze()
コード例 #8
0
  def init(self, comm=None):
    """A function that initializes Horovod.

    Args:
      comm: List specifying ranks for the communicator, relative to the
        MPI_COMM_WORLD communicator OR the MPI communicator to use. Given
        communicator will be duplicated. If None, Horovod will use
        MPI_COMM_WORLD Communicator.
    """
    if comm is None:
      comm = []

    atexit.register(self.shutdown)

    if not isinstance(comm, list):
      from mpi4py import MPI  # pylint: disable=import-outside-toplevel
      if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):  # pylint: disable=protected-access
        MPI_Comm = ctypes.c_int
      else:
        MPI_Comm = ctypes.c_void_p
        self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

      comm_obj = MPI_Comm.from_address(MPI._addressof(comm))  # pylint: disable=protected-access
      self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
    else:
      comm_size = len(comm)
      self.MPI_LIB_CTYPES.horovod_init((ctypes.c_int * comm_size)(*comm),
                                       ctypes.c_int(comm_size))
コード例 #9
0
def init(comm=None):
    """A function that initializes Horovod.

    Args:
      comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
        communicator OR the MPI communicator to use. Given communicator will be duplicated.
        If None, Horovod will use MPI_COMM_WORLD Communicator.

    """
    if comm is None:
        comm = []

    atexit.register(shutdown)

    if not isinstance(comm, list):
        from mpi4py import MPI
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
            MPI_Comm = ctypes.c_int
        else:
            MPI_Comm = ctypes.c_void_p
            MPI_COMMON_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

        comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
        return MPI_COMMON_LIB_CTYPES.horovod_init_comm(comm_obj)
    else:
        comm_size = len(comm)
        return MPI_COMMON_LIB_CTYPES.horovod_init(
            (ctypes.c_int * comm_size)(*comm), ctypes.c_int(comm_size))
コード例 #10
0
def init(args=None, intracomm=None):
    """Initialize OpenMC

    Parameters
    ----------
    args : list of str
        Command-line arguments
    intracomm : mpi4py.MPI.Intracomm or None
        MPI intracommunicator

    """
    if args is not None:
        args = ['openmc'] + list(args)
    else:
        args = ['openmc']

    argc = len(args)
    # Create the argv array. Note that it is actually expected to be of
    # length argc + 1 with the final item being a null pointer.
    argv = (POINTER(c_char) * (argc + 1))()
    for i, arg in enumerate(args):
        argv[i] = create_string_buffer(arg.encode())

    if intracomm is not None:
        # If an mpi4py communicator was passed, convert it to void* to be passed
        # to openmc_init
        try:
            from mpi4py import MPI
        except ImportError:
            intracomm = None
        else:
            address = MPI._addressof(intracomm)
            intracomm = c_void_p(address)

    _dll.openmc_init(argc, argv, intracomm)
コード例 #11
0
 def __init__(self, comm=None):
     if comm is None:
         # Should only end up here upon unpickling
         comm = MPI.COMM_WORLD
     comm_ptr = MPI._addressof(comm)
     comm_val = self.dtype.from_address(comm_ptr)
     self.value = comm_val
コード例 #12
0
ファイル: mdi.py プロジェクト: MolSSI-MDI/MDI_Library
def MDI_Launch_plugin(plugin_name, options, mpi_comm, driver_callback_func, driver_callback_obj):
    global driver_node_callback

    driver_node_callback = ( driver_callback_func, driver_callback_obj, mpi_comm )

    plugin_name_c = plugin_name.encode('utf-8')
    options_c = options.encode('utf-8')

    # this is just a dummy pointer; the actual object is stored in execute_command_dict
    class_obj_pointer = ctypes.c_void_p()

    # if this is a plugin code, get the plugin's MPI communicator
    c_mpi_communicator_ptr = ctypes.c_void_p()
    if ( use_mpi4py ):

        #handle_t = ctypes.c_void_p
        handle_t = ctypes.c_void_p
        c_mpi_communicator_ptr = handle_t.from_address(MPI._addressof(mpi_comm))

    ret = mdi.MDI_Launch_plugin( ctypes.c_char_p(plugin_name_c),
                                 ctypes.c_char_p(options_c),
                                 c_mpi_communicator_ptr,
                                 MDI_plugin_driver_callback_c,
                                 class_obj_pointer )
    if ret != 0:
        raise Exception("MDI Error: MDI_Launch_plugin failed")

    return ret
コード例 #13
0
ファイル: distributed.py プロジェクト: opesci/devito
 def __init__(self, comm=None):
     if comm is None:
         # Should only end up here upon unpickling
         comm = MPI.COMM_WORLD
     comm_ptr = MPI._addressof(comm)
     comm_val = self.dtype.from_address(comm_ptr)
     self.value = comm_val
コード例 #14
0
def MDI_Init(arg1, comm):
    global intra_code_comm

    # append the _language option, so that MDI knows this is a Python code
    arg1 = arg1 + " _language Python"

    # call MDI_Init
    command = arg1.encode('utf-8')
    if comm is None:
        mpi_communicator_ptr = None
        do_mpi_split = False
    else:
        if use_mpi4py:
            mpi_communicator = MPI._addressof(comm)
            #mpi_communicator_ptr = ctypes.c_int(mpi_communicator)
            mpi_communicator_ptr = ctypes.c_void_p(mpi_communicator)
            do_mpi_split = True
        else:
            raise Exception("MDI Error: An MPI communicator was passed to MPI_Init, but mpi4py is not found")

    ret = mdi.MDI_Init(ctypes.c_char_p(command), mpi_communicator_ptr )

    # split the intra-code communicator
    if do_mpi_split:
        mpi_color = mdi.MDI_Get_MPI_Code_Rank()
        intra_code_comm = comm.Split(mpi_color, comm.Get_rank())
        mdi.MDI_Set_MPI_Intra_Rank( intra_code_comm.Get_rank() )
        comm.Barrier()

    return ret
コード例 #15
0
    def init(self, comm=None):
        """A function that initializes Horovod.

        Args:
          comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
            communicator OR the MPI communicator to use. Given communicator will be duplicated.
            If None, Horovod will use MPI_COMM_WORLD Communicator.
        """
        if comm is None:
            comm = []

        atexit.register(self.shutdown)

        if not isinstance(comm, list):
            mpi_built = self.MPI_LIB_CTYPES.horovod_mpi_built()
            if not bool(mpi_built):
                raise ValueError(
                    "Horovod has not been built with MPI support. Ensure MPI is installed and "
                    "reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error.")

            from mpi4py import MPI
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
                MPI_Comm = ctypes.c_int
            else:
                MPI_Comm = ctypes.c_void_p
                self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

            comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
            self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
        else:
            comm_size = len(comm)
            self.MPI_LIB_CTYPES.horovod_init(
                (ctypes.c_int * comm_size)(*comm), ctypes.c_int(comm_size))
コード例 #16
0
ファイル: basics.py プロジェクト: rongou/horovod
    def _comm_process_set_id(self, comm: MPI.Comm) -> int:
        """ Returns the (previously registered) process set id corresponding to the MPI communicator comm. """
        if not self.mpi_built():
            raise ValueError(
                "Horovod has not been built with MPI support. Ensure MPI is installed and "
                "reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error."
            )

        from mpi4py import MPI
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
            MPI_Comm = ctypes.c_int
        else:
            MPI_Comm = ctypes.c_void_p

        self.MPI_LIB_CTYPES.horovod_comm_process_set.argtypes = [MPI_Comm]
        comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
        result = int(self.MPI_LIB_CTYPES.horovod_comm_process_set(comm_obj))
        if result == self.HOROVOD_PROCESS_SET_ERROR_INIT:
            raise ValueError(
                'Horovod has not been initialized or MPI has not been enabled; use hvd.init().'
            )
        elif result == self.HOROVOD_PROCESS_SET_ERROR_UNKNOWN_SET:
            raise ValueError(
                'MPI communicator does not correspond to any registered process set.'
            )
        return result
コード例 #17
0
ファイル: test_ctypes.py プロジェクト: mpi4py/mpi4py
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
コード例 #18
0
    def from_global_points(cls, points, balanced, comm):
        """
        Construct a distributed tree from a set of globally distributed points.

        Params:
        ------
        points : np.array(shape=(n_points, 3), dtype=np.float64)
            Cartesian points at this processor.
        balanced : bool
            If 'True' constructs a balanced tree, if 'False' constructs an unbalanced tree.
        comm : Intracomm
           An mpi4py Intracommunicator.
        """
        points = np.array(points, dtype=np.float64, order="C", copy=False)
        npoints, _ = points.shape
        points_data = ffi.from_buffer(f"double(*)[3]", points)
        balanced_data = ffi.cast("bool", np.bool(balanced))
        npoints_data = ffi.cast("size_t", npoints)
        p_comm = MPI._addressof(comm)
        raw_comm = ffi.cast("uintptr_t*", p_comm)

        return cls(
            lib.distributed_tree_from_points(points_data, npoints_data,
                                             balanced_data, raw_comm),
            comm,
            p_comm,
            raw_comm,
        )
コード例 #19
0
ファイル: heffte.py プロジェクト: af-ayala/heffte
def fft3d(backend_tag, inbox, outbox, comm):
    '''
    Initialize a fft3d operation, the syntax is near identical to C++.

    backend_tag replaces the template type-tag, use of the heffte.backend
                constants, e.g., heffte.backend.fftw
    '''
    if backend_tag not in backend.valid:
        raise heffte_input_error(
            "Invalid backend, use one of the entries in heffte.backend")

    plan = heffte_fft_plan()
    plan.use_r2c = False

    # Define ctypes API for each library method
    comm_value = MPI_Comm.from_address(mpi._addressof(comm))

    # Initialize
    plan.fft_comm = comm
    plan.plan = LP_plan()
    options = plan_options(0, 1, 1)

    herr = libheffte.heffte_plan_create(backend_tag, inbox.low, inbox.high,
                                        inbox.order, outbox.low, outbox.high,
                                        outbox.order, comm_value, options,
                                        plan.plan)

    if herr != 0:
        raise heffte_input_error(
            "heFFTe encountered internal error with code: {0:1d}".format(herr))
    return plan
コード例 #20
0
ファイル: basics.py プロジェクト: yushinliu/horovod
    def init(self, comm=None):
        """A function that initializes Horovod.

        Args:
          comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
            communicator OR the MPI communicator to use. Given communicator will be duplicated.
            If None, Horovod will use MPI_COMM_WORLD Communicator.
        """
        if comm is None:
            comm = []

        atexit.register(self.shutdown)

        if not isinstance(comm, list):
            mpi_enabled = self.MPI_LIB_CTYPES.horovod_mpi_enabled()
            if not bool(mpi_enabled):
                raise ValueError(
                    'Horovod MPI is not enabled; Please make sure it\'s installed and enabled.'
                )

            from mpi4py import MPI
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
                MPI_Comm = ctypes.c_int
            else:
                MPI_Comm = ctypes.c_void_p
                self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

            comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
            self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
        else:
            comm_size = len(comm)
            self.MPI_LIB_CTYPES.horovod_init((ctypes.c_int * comm_size)(*comm),
                                             ctypes.c_int(comm_size))
コード例 #21
0
    def init(self, calling_realm):

        self.realm = calling_realm
        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        try:
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
                MPI_Comm = c_int
            else:
                MPI_Comm = c_void_p
        #Some versions of MPI4py have no _sizeof method.
        except AttributeError:
            MPI_Comm = c_int

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value
        self.COMM = newcomm

        return newcomm
コード例 #22
0
 def testHandleValue(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('uint32_t'): 'uint32_t',
                ffi.sizeof('uint64_t'): 'uint64_t',}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0]
         self.assertEqual(handle, MPI._handleof(obj))
コード例 #23
0
def MDI_Init(arg1, comm):
    global world_comm
    global intra_code_comm

    # prepend the _language option, so that MDI knows this is a Python code
    arg1 = "_language Python " + arg1

    command = arg1.encode('utf-8')
    if comm is None:
        mpi_communicator_ptr = None
    else:
        if use_mpi4py:
            world_comm = comm
            intra_code_comm = comm
            mpi_communicator = MPI._addressof(comm)
            mpi_communicator_ptr = ctypes.c_void_p(mpi_communicator)

            # send basic information about the MPI communicator to the MDI libarary
            mpi_rank = comm.Get_rank()
            mpi_world_size = comm.Get_size()
            mdi.MDI_Set_World_Rank(mpi_rank)
            mdi.MDI_Set_World_Size(mpi_world_size)
        else:
            raise Exception(
                "MDI Error: An MPI communicator was passed to MPI_Init, but MPI4Py is not found"
            )

    # determine if the communication method is MPI
    args = arg1.split()
    mdi_method = None
    for i in range(len(args)):
        if args[i] == "-method" and i < len(args) - 1:
            mdi_method = args[i + 1]
    if not mdi_method:
        raise Exception("MDI Error: Unable to find -method option")

    # set the MPI4Py callback functions
    set_mpi4py_recv_callback()
    set_mpi4py_send_callback()
    set_mpi4py_size_callback()
    set_mpi4py_rank_callback()
    set_mpi4py_gather_names_callback()
    set_mpi4py_barrier_callback()
    set_mpi4py_split_callback()

    # if using MPI, ensure that numpy is available
    if mdi_method == "MPI":
        if not found_numpy:
            raise Exception(
                "MDI Error: When using the MPI communication method, numpy must be available"
            )

    # call MDI_Init
    ret = mdi.MDI_Init(ctypes.c_char_p(command), mpi_communicator_ptr)
    if ret != 0:
        raise Exception("MDI Error: MDI_Init failed")

    return ret
コード例 #24
0
    def __init__(self, name="", cmdargs=None, comm=None):

        # load liblammps.so by default
        # if name = "g++", load liblammps_g++.so

        try:
            if not name: self.lib = CDLL("liblammps.so", RTLD_GLOBAL)
            else: self.lib = CDLL("liblammps_%s.so" % name, RTLD_GLOBAL)
        except:
            type, value, tb = sys.exc_info()
            traceback.print_exception(type, value, tb)
            raise OSError, "Could not load LAMMPS dynamic library"

        # create an instance of LAMMPS
        # don't know how to pass an MPI communicator from PyPar
        # no_mpi call lets LAMMPS use MPI_COMM_WORLD
        # cargs = array of C strings from args

        if cmdargs:
            cmdargs.insert(0, "lammps.py")
            narg = len(cmdargs)
            cargs = (c_char_p * narg)(*cmdargs)
            self.lmp = c_void_p()
            if comm is None:
                self.lib.lammps_open_no_mpi(narg, cargs, byref(self.lmp))
            else:
                if MPI._sizeof(MPI.Comm) == sizeof(c_int):
                    MPI_Comm = c_int
                else:
                    MPI_Comm = c_void_p
                comm_ptr = MPI._addressof(comm)
                comm_val = MPI_Comm.from_address(comm_ptr)
                self.lib.lammps_open(narg, cargs, comm_val, byref(self.lmp))
        else:
            self.lmp = c_void_p()
            if comm is None:
                self.lib.lammps_open_no_mpi(0, None, byref(self.lmp))
            else:
                if MPI._sizeof(MPI.Comm) == sizeof(c_int):
                    MPI_Comm = c_int
                else:
                    MPI_Comm = c_void_p
                comm_ptr = MPI._addressof(comm)
                comm_val = MPI_Comm.from_address(comm_ptr)
                self.lib.lammps_open(0, None, comm_val, byref(self.lmp))
コード例 #25
0
ファイル: gcomm.py プロジェクト: andokazu1979/gcpy
def scatter_c(comm, sendbuf, recvbuf, root):
    comm_ptr = MPI._addressof(comm)
    comm_val = MPI_Comm.from_address(comm_ptr)
    if sendbuf is None:
        sendbuf = np.zeros([recvbuf.size], dtype='f4')
        size_ = sendbuf.size
    else:
        size_ = sendbuf.size
    _libc.scatter(comm_val, sendbuf, size_, recvbuf, recvbuf.size, root)
コード例 #26
0
ファイル: sendrecv.py プロジェクト: dionhaefner/mpi4jax
def mpi_sendrecv_xla_encode(c, sendbuf, recvbuf, token, source, dest, sendtag,
                            recvtag, comm, status):
    from ..cython.mpi_xla_bridge import MPI_STATUS_IGNORE_ADDR

    warn_missing_omnistaging()

    c = _unpack_builder(c)

    recv_shape = c.GetShape(recvbuf)
    recv_dtype = recv_shape.element_type()
    recv_dims = recv_shape.dimensions()

    # compute total number of elements in array
    _recv_nitems = _constant_s32_scalar(c, _np.prod(recv_dims, dtype=int))
    _recv_dtype_ptr = dtype_ptr(recv_dtype)

    send_shape = c.GetShape(sendbuf)
    send_dtype = send_shape.element_type()
    send_dims = send_shape.dimensions()

    # compute total number of elements in array
    _send_nitems = _constant_s32_scalar(c, _np.prod(send_dims, dtype=int))
    _send_dtype_ptr = dtype_ptr(send_dtype)

    sh = xla_client.Shape.tuple_shape([
        xla_client.Shape.array_shape(recv_dtype, recv_dims),
        xla_client.Shape.token_shape(),
    ])

    if status is None:
        _status = MPI_STATUS_IGNORE_ADDR
    else:
        _status = _MPI._addressof(status)

    operands = (
        _send_nitems,
        sendbuf,
        _constant_s32_scalar(c, dest),
        _constant_s32_scalar(c, sendtag),
        _constant_u64_scalar(c, _send_dtype_ptr),
        _recv_nitems,
        _constant_s32_scalar(c, source),
        _constant_s32_scalar(c, recvtag),
        _constant_u64_scalar(c, _recv_dtype_ptr),
        _constant_u64_scalar(c, to_mpi_ptr(comm)),
        _constant_u64_scalar(c, _status),
        token,
    )

    return _ops.CustomCall(
        c,
        b"mpi_sendrecv",
        operands=operands,
        shape=sh,
        has_side_effect=True,
    )
コード例 #27
0
 def testHandleAddress(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('int'): 'int', ffi.sizeof('void*'): 'void*'}
     typename = lambda t: t.__name__.rsplit('.', 1)[-1]
     for tp in self.mpitypes:
         handle_t = typemap[MPI._sizeof(tp)]
         mpi_t = 'MPI_' + typename(tp)
         ffi.cdef("typedef %s %s;" % (handle_t, mpi_t))
     for obj in self.objects:
         if isinstance(obj, MPI.Comm):
             mpi_t = 'MPI_Comm'
         else:
             mpi_t = 'MPI_' + typename(type(obj))
         oldobj = obj
         newobj = type(obj)()
         handle_old = ffi.cast(mpi_t + '*', MPI._addressof(oldobj))
         handle_new = ffi.cast(mpi_t + '*', MPI._addressof(newobj))
         handle_new[0] = handle_old[0]
         self.assertEqual(oldobj, newobj)
コード例 #28
0
ファイル: utils.py プロジェクト: kiminh/mpi4jax
def MPIComm_from_ptr(ptr):
    """
    MPIComm_from_ptr(ptr)

    Constructs a MPI Comm object from a pointer
    """
    comm = _MPI.Comm()
    comm_ptr = ctypes.c_void_p.from_address(_MPI._addressof(comm))
    comm_ptr.value = int(ptr)
    return comm
コード例 #29
0
ファイル: utils.py プロジェクト: kiminh/mpi4jax
def MPIOp_from_ptr(ptr):
    """
    MPIOp_from_ptr(ptr)

    Constructs a MPI Op object from a pointer
    """
    op = _MPI.Op()
    op_ptr = ctypes.c_void_p.from_address(_MPI._addressof(op))
    op_ptr.value = int(ptr)
    return op
コード例 #30
0
ファイル: test_cffi.py プロジェクト: benkirk/mpi_playground
 def testHandleAddress(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('int'): 'int',
                ffi.sizeof('void*'): 'void*'}
     typename = lambda t: t.__name__.rsplit('.', 1)[-1]
     for tp in self.mpitypes:
         handle_t = typemap[MPI._sizeof(tp)]
         mpi_t = 'MPI_' + typename(tp)
         ffi.cdef("typedef %s %s;" % (handle_t, mpi_t))
     for obj in self.objects:
         if isinstance(obj, MPI.Comm):
             mpi_t = 'MPI_Comm'
         else:
             mpi_t = 'MPI_' + typename(type(obj))
         oldobj = obj
         newobj = type(obj)()
         handle_old = ffi.cast(mpi_t+'*', MPI._addressof(oldobj))
         handle_new = ffi.cast(mpi_t+'*', MPI._addressof(newobj))
         handle_new[0] = handle_old[0]
         self.assertEqual(oldobj, newobj)
コード例 #31
0
ファイル: __init__.py プロジェクト: tpeterka/mfa
def convert_mpi_comm(self, *args, **kwargs):
    if len(args) == 0:
        init(self, *args, **kwargs)
    else:
        comm = args[0]

        if not isinstance(comm, mpi.MPIComm):
            from mpi4py import MPI
            comm = MPI._addressof(comm)

        init(self, comm, *args[1:], **kwargs)
コード例 #32
0
def get_task_comm():
    from mpi4py import MPI
    import ctypes

    # print("turbine_helpers.task_comm: %i" % task_comm)
    # sys.stdout.flush()

    mpi4py_comm = MPI.Intracomm()
    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
        # MPICH
        comm_int = ctypes.c_int
        mpi4py_comm_ptr = comm_int.from_address(MPI._addressof(mpi4py_comm))
        mpi4py_comm_ptr.value = task_comm
    elif MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_void_p):
        # OpenMPI
        comm_pointer = ctypes.c_void_p
        mpi4py_comm = MPI.Intracomm()
        handle = comm_pointer.from_address(MPI._addressof(mpi4py_comm))
        handle.value = task_comm

    return mpi4py_comm
コード例 #33
0
def to_mpi_ptr(mpi_obj):
    """
    to_mpi_ptr(mpi_obj)

    Returns the ptr to the underlying C mpi object
    """
    try:
        addr = _MPI._handleof(mpi_obj)
    except NotImplementedError:
        # some objects like Status only work with addressof
        addr = _MPI._addressof(mpi_obj)

    return _np.uint64(addr)
コード例 #34
0
ファイル: par_utils.py プロジェクト: pbouda95/mgmetis
def comm_ptr(comm):
    """Get the pointer to the communicator
    """
    # see https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
    comm = get_comm(comm)
    from mpi4py import MPI

    if MPI._sizeof(MPI.Comm) == c.sizeof(c.c_int):
        MPI_Comm = c.c_int
    else:
        # must be pointer
        MPI_Comm = c.c_void_p
    return c.byref(MPI_Comm.from_address(MPI._addressof(comm)))
コード例 #35
0
ファイル: test_6.py プロジェクト: swift-lang/swift-work
def get_task_comm():
    import os,sys
    print("gtc");sys.stdout.flush()
    from mpi4py import MPI
    print("gtc2");sys.stdout.flush()
    import ctypes
    task_comm_string = os.getenv("task_comm")
    print("task_comm_string: " + task_comm_string)
    task_comm_int = int(task_comm_string)
    MPI_Comm = ctypes.c_int
    MPI_Comm.from_address(task_comm_int)
    newcomm = MPI.Intracomm()
    newcomm_ptr = MPI._addressof(newcomm)
    comm_val = MPI_Comm.from_address(newcomm_ptr)
    # comm_val.value = task_comm_int
    print("gtc3");sys.stdout.flush()
    # newcomm.barrier()
    print("gtc4");sys.stdout.flush()
    return newcomm
コード例 #36
0
ファイル: cplpy.py プロジェクト: Crompulence/cpl-library
    def init(self, calling_realm):

        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
            MPI_Comm = c_int
        else:
            MPI_Comm = c_void_p

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value

        return newcomm
コード例 #37
0
ファイル: hello.py プロジェクト: AbhishekKapoor/lectures
def say_hello(comm):
    r"""Given a communicator, have each process in the communicator call
    say_hello().

    This Python function will be executed by each MPI process. Each will create
    its own copy of the data array `a` and send that array to the C function
    `say_hello()`.

    """
    comm_ptr = MPI._addressof(comm)
    comm_val = MPI_Comm.from_address(comm_ptr)

    N = 8
    a = numpy.ascontiguousarray(numpy.zeros(N, dtype=numpy.double))
    libhello.say_hello(a.ctypes.data, N, comm_val)

    print '%d ---- test: %s'%(comm.rank, a)
    if (comm.rank == 0):
        return a
    else:
        return None
コード例 #38
0
ファイル: test_3.py プロジェクト: swift-lang/swift-work
def go(comm_int):
    print("go(%i) ..." % comm_int)
    comm = MPI.COMM_WORLD
    print("size: %i" % comm.Get_size())
    comm.barrier()

    # MPICH mode:
    # MPI_Comm = ctypes.c_int
    # MPI_Comm.from_address(comm_int)
    # newcomm = MPI.Intracomm()
    # newcomm_ptr = MPI._addressof(newcomm)
    # comm_val = MPI_Comm.from_address(newcomm_ptr)
    # comm_val.value = comm_int
    # newcomm.barrier()
    # sys.stdout.flush()

    # OpenMPI mode (from Zaki)
    comm_pointer = ctypes.c_void_p
    mpi4py_comm = MPI.Intracomm()
    handle = comm_pointer.from_address(MPI._addressof(mpi4py_comm))
    handle.value = comm_int
    mpi4py_comm.barrier()
コード例 #39
0
ファイル: wrappers.py プロジェクト: boruil/homework4
def heat_parallel(uk, dx, Nx, dt, num_steps, comm):
    r"""Solve the heat equation in paralllel. This Python function is executed by
    each spawned process.

    Parameters
    ----------
    uk : array
        Function values for process k.

    Returns
    -------
    uk : array
        The updated function values after heat_parallel()
    """
    if (len(uk) != Nx):
        raise ValueError("Nx should equal the number of grid points.")

    # note that the code below inherently returns a copy of the original input
    uk = numpy.ascontiguousarray(
        numpy.array(uk, dtype=numpy.double)).astype(numpy.double)

    # mpi comm setup
    comm_ptr = MPI._addressof(comm)
    comm_val = c_mpi_comm.from_address(comm_ptr)

    # set function types and evaluate
    try:
        f = homework4library.heat_parallel
        f.restype = None
        f.argtypes = [c_void_p, c_double, c_size_t, c_double,
                      c_size_t, c_mpi_comm]
        f(uk.ctypes.data, dx, Nx, dt, num_steps, comm_val)
    except AttributeError:
        raise AttributeError("Something wrong happened when calling the C "
                             "library function.")
    return uk
コード例 #40
0
def sayhello(comm):
    comm_ptr = MPI._addressof(comm)
    comm_val = MPI_Comm.from_address(comm_ptr)
    _lib.sayhello(comm_val)
コード例 #41
0
ファイル: test.py プロジェクト: mrzv/henson
from __future__ import print_function

import pyhenson as h
import numpy as np
from mpi4py import MPI

w = MPI.COMM_WORLD.Dup()
print(w.rank, w.size)

a = MPI._addressof(w)       # required to interface with mpi4py
pm = h.ProcMap(a, [('world', w.size)])
nm = h.NameMap()

sim = h.Puppet('../../examples/simple/simulation', ['1250'], pm, nm)
ana = h.Puppet('../../examples/simple/analysis',   [],       pm, nm)

sim.proceed()
ana.proceed()
while sim.running():
    a = nm.get("data")
    t = nm.get('t')
    print("[%d]: From Python: %d -> %f" % (w.rank, t, np.sum(a)))
    sim.proceed()
    ana.proceed()

t = sim.total_time()
print("Total time:", h.clock_to_string(t))
コード例 #42
0
ファイル: rest.py プロジェクト: dimaleks/uDeviceX
    
    pv = ymr.ParticleVectors.ParticleVector('pv', mass = 1)
    ic = ymr.InitialConditions.Uniform(density=2)
    u.registerParticleVector(pv=pv, ic=ic)

    dpd = ymr.Interactions.DPD('dpd', 1.0, a=10.0, gamma=10.0, kbt=1.0, power=0.5)
    u.registerInteraction(dpd)
    u.setInteraction(dpd, pv, pv)
    
    vv = ymr.Integrators.VelocityVerlet('vv')
    u.registerIntegrator(vv)
    u.setIntegrator(vv, pv)
    
    stats = ymr.Plugins.createStats('stats', statsFname, 1000)
    u.registerPlugins(stats)
    
    u.run(niter)

comm = MPI.COMM_WORLD
run(2002, "stats1.txt", MPI._addressof(comm))
run(2002, "stats2.txt", MPI._addressof(comm))


# nTEST: mpi.rest.consecutive
# cd mpi
# rm -rf stats*.txt
# ymr.run --runargs "-n 4" ./rest.py > /dev/null
# cat stats1.txt | awk '{print $1, $2, $3, $4, $5}' >  stats.out.txt
# cat stats2.txt | awk '{print $1, $2, $3, $4, $5}' >> stats.out.txt

コード例 #43
0
ファイル: objectVector.py プロジェクト: dimaleks/uDeviceX
import trimesh

from mpi4py import MPI

parser = argparse.ArgumentParser()
parser.add_argument("--restart", action='store_true', default=False)
parser.add_argument("--ranks", type=int, nargs=3)
args = parser.parse_args()

comm   = MPI.COMM_WORLD
ranks  = args.ranks
domain = (16, 16, 16)
dt = 0

if args.restart:
    u = ymr.ymero(ranks, domain, dt, comm_ptr=MPI._addressof(comm), debug_level=8, log_filename='log', checkpoint_every=0)
else:
    u = ymr.ymero(ranks, domain, dt, comm_ptr=MPI._addressof(comm), debug_level=8, log_filename='log', checkpoint_every=5)

    
mesh = trimesh.creation.icosphere(subdivisions=1, radius = 0.1)
    
udx_mesh = ymr.ParticleVectors.MembraneMesh(mesh.vertices.tolist(), mesh.faces.tolist())
pv       = ymr.ParticleVectors.MembraneVector("pv", mass=1.0, mesh=udx_mesh)

if args.restart:
    ic   = ymr.InitialConditions.Restart("restart/")
else:
    nobjs = 10
    pos = [ np.array(domain) * t for t in np.linspace(0, 1.0, nobjs) ]
    Q = [ np.array([1.0, 0., 0., 0.])  for i in range(nobjs) ]
コード例 #44
0
 def testAddressOf(self):
     for obj in self.objects:
         addr = MPI._addressof(obj)
コード例 #45
0
def sayhello(comm):
    comm_ptr = MPI._addressof(comm)
    comm_val = ffi.cast('MPI_Comm*', comm_ptr)[0]
    lib.sayhello(comm_val)
コード例 #46
0
ファイル: schwarz.py プロジェクト: feelpp/hpddm
     mu = 1
 if hpddm.optionSet(opt, b'schwarz_coarse_correction'):
     nu = ctypes.c_ushort(int(hpddm.optionVal(opt, b'geneo_nu')))
     if nu.value > 0:
         if hpddm.optionApp(opt, b'nonuniform'):
             nu.value += max(int(-hpddm.optionVal(opt, b'geneo_nu') + 1), (-1)**rankWorld * rankWorld)
         threshold = hpddm.underlying(max(0, hpddm.optionVal(opt, b'geneo_threshold')))
         hpddm.schwarzSolveGEVP(A, MatNeumann, ctypes.byref(nu), threshold)
         addr = hpddm.optionAddr(opt, b'geneo_nu')
         addr.contents.value = nu.value
     else:
         nu = 1
         deflation = numpy.ones((dof, nu), order = 'F', dtype = hpddm.scalar)
         hpddm.setVectors(hpddm.schwarzPreconditioner(A), nu, deflation)
     hpddm.initializeCoarseOperator(hpddm.schwarzPreconditioner(A), nu)
     hpddm.schwarzBuildCoarseOperator(A, hpddm.MPI_Comm.from_address(MPI._addressof(MPI.COMM_WORLD)))
 hpddm.schwarzCallNumfact(A)
 if rankWorld != 0:
     hpddm.optionRemove(opt, b'verbosity')
 comm = hpddm.getCommunicator(hpddm.schwarzPreconditioner(A))
 it = hpddm.solve(A, f, sol, comm)
 storage = numpy.empty(2 * mu, order = 'F', dtype = hpddm.underlying)
 hpddm.schwarzComputeError(A, sol, f, storage)
 if rankWorld == 0:
     for nu in xrange(mu):
         if nu == 0:
             print(' --- error = ', end = '')
         else:
             print('             ', end = '')
         print('{:e} / {:e}'.format(storage[1 + 2 * nu], storage[2 * nu]), end = '')
         if mu > 1:
コード例 #47
0
  def __init__(self, library=None, style='spherical', dim=3, units='si', path=None, cmdargs=[], comm=None, ptr=None):
    # What is ptr used for?

    if library:
      if not comm.Get_rank():
        print("Using " + library + " as a shared library for DEM computations")
    else:
      if not comm.Get_rank():
        print('No library supplies.')
      
      sys.exit()

    if not comm:
      comm = MPI.COMM_WORLD

    try:
      self.lib = ctypes.CDLL(library, ctypes.RTLD_GLOBAL)
    except:
      etype,value,tb = sys.exc_info()
      traceback.print_exception(etype,value,tb)
      raise RuntimeError("Could not load LIGGGHTS dynamic library")

    # if no ptr provided, create an instance of LIGGGHTS
    #   don't know how to pass an MPI communicator from PyPar
    #   but we can pass an MPI communicator from mpi4py v2.0.0 and later
    #   no_mpi call lets LIGGGHTS use MPI_COMM_WORLD
    #   cargs = array of C strings from args
    # if ptr, then are embedding Python in LIGGGHTS input script
    #   ptr is the desired instance of LIGGGHTS
    #   just convert it to ctypes ptr and store in self.lmp

    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
      MPI_Comm = ctypes.c_int
    else:
      MPI_Comm = ctypes.c_void_p

    if not ptr:
      # with mpi4py v2, can pass MPI communicator to LIGGGHTS
      # need to adjust for type of MPI communicator object
      # allow for int (like MPICH) or void* (like OpenMPI)

        narg = 0
        cargs = 0

        if cmdargs:
          cmdargs.insert(0, "liggghts.py")
          narg = len(cmdargs)
          for i in range(narg):
            if isinstance(cmdargs[i], str):
              cmdargs[i] = cmdargs[i].encode()

          cargs = (ctypes.c_char_p*narg)(*cmdargs)
          self.lib.lammps_open.argtypes = [ctypes.c_int, ctypes.c_char_p*narg, \
                                           MPI_Comm, ctypes.c_void_p()]
        else:
          self.lib.lammps_open.argtypes = [ctypes.c_int, ctypes.c_int, \
                                           MPI_Comm, ctypes.c_void_p()]

        self.lib.lammps_open.restype = None
        self.opened = 1
        self.lmp = ctypes.c_void_p()
        comm_ptr = MPI._addressof(comm)
        comm_val = MPI_Comm.from_address(comm_ptr)
        self.lib.lammps_open(narg,cargs,comm_val,ctypes.byref(self.lmp))

        self.opened = True
    else:
      self.opened = False

      if sys.version_info >= (3, 0):
        # Python 3 (uses PyCapsule API)
        ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
        ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
        self.lmp = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(ptr, None))
      else:
        # Python 2 (uses PyCObject API)
        ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
        ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
        self.lmp = ctypes.c_void_p(ctypes.pythonapi.PyCObject_AsVoidPtr(ptr))