コード例 #1
1
ファイル: test_ctypes.py プロジェクト: benkirk/mpi_playground
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
コード例 #2
0
def init(comm=None):
    """A function that initializes Horovod.

    Args:
      comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
        communicator OR the MPI communicator to use. Given communicator will be duplicated.
        If None, Horovod will use MPI_COMM_WORLD Communicator.

    """
    if comm is None:
        comm = []

    atexit.register(shutdown)

    if not isinstance(comm, list):
        from mpi4py import MPI
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
            MPI_Comm = ctypes.c_int
        else:
            MPI_Comm = ctypes.c_void_p
            MPI_COMMON_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

        comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
        return MPI_COMMON_LIB_CTYPES.horovod_init_comm(comm_obj)
    else:
        comm_size = len(comm)
        return MPI_COMMON_LIB_CTYPES.horovod_init(
            (ctypes.c_int * comm_size)(*comm), ctypes.c_int(comm_size))
コード例 #3
0
    def init(self, comm=None):
        """A function that initializes Horovod.

        Args:
          comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
            communicator OR the MPI communicator to use. Given communicator will be duplicated.
            If None, Horovod will use MPI_COMM_WORLD Communicator.
        """
        if comm is None:
            comm = []

        atexit.register(self.shutdown)

        if not isinstance(comm, list):
            mpi_built = self.MPI_LIB_CTYPES.horovod_mpi_built()
            if not bool(mpi_built):
                raise ValueError(
                    "Horovod has not been built with MPI support. Ensure MPI is installed and "
                    "reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error.")

            from mpi4py import MPI
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
                MPI_Comm = ctypes.c_int
            else:
                MPI_Comm = ctypes.c_void_p
                self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

            comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
            self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
        else:
            comm_size = len(comm)
            self.MPI_LIB_CTYPES.horovod_init(
                (ctypes.c_int * comm_size)(*comm), ctypes.c_int(comm_size))
コード例 #4
0
ファイル: basics.py プロジェクト: rongou/horovod
    def _comm_process_set_id(self, comm: MPI.Comm) -> int:
        """ Returns the (previously registered) process set id corresponding to the MPI communicator comm. """
        if not self.mpi_built():
            raise ValueError(
                "Horovod has not been built with MPI support. Ensure MPI is installed and "
                "reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error."
            )

        from mpi4py import MPI
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
            MPI_Comm = ctypes.c_int
        else:
            MPI_Comm = ctypes.c_void_p

        self.MPI_LIB_CTYPES.horovod_comm_process_set.argtypes = [MPI_Comm]
        comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
        result = int(self.MPI_LIB_CTYPES.horovod_comm_process_set(comm_obj))
        if result == self.HOROVOD_PROCESS_SET_ERROR_INIT:
            raise ValueError(
                'Horovod has not been initialized or MPI has not been enabled; use hvd.init().'
            )
        elif result == self.HOROVOD_PROCESS_SET_ERROR_UNKNOWN_SET:
            raise ValueError(
                'MPI communicator does not correspond to any registered process set.'
            )
        return result
コード例 #5
0
ファイル: test_ctypes.py プロジェクト: mpi4py/mpi4py
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
コード例 #6
0
class MPICommObject(Object):

    name = 'comm'

    # See https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
    if MPI._sizeof(MPI.Comm) == sizeof(c_int):
        dtype = type('MPI_Comm', (c_int, ), {})
    else:
        dtype = type('MPI_Comm', (c_void_p, ), {})

    def __init__(self, comm=None):
        if comm is None:
            # Should only end up here upon unpickling
            comm = MPI.COMM_WORLD
        comm_ptr = MPI._addressof(comm)
        comm_val = self.dtype.from_address(comm_ptr)
        self.value = comm_val
        self.comm = comm

    def _arg_values(self, *args, **kwargs):
        grid = kwargs.get('grid', None)
        # Update `comm` based on object attached to `grid`
        if grid is not None:
            return grid.distributor._obj_comm._arg_defaults()
        else:
            return self._arg_defaults()

    # Pickling support
    _pickle_args = []
コード例 #7
0
ファイル: basics.py プロジェクト: yushinliu/horovod
    def init(self, comm=None):
        """A function that initializes Horovod.

        Args:
          comm: List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
            communicator OR the MPI communicator to use. Given communicator will be duplicated.
            If None, Horovod will use MPI_COMM_WORLD Communicator.
        """
        if comm is None:
            comm = []

        atexit.register(self.shutdown)

        if not isinstance(comm, list):
            mpi_enabled = self.MPI_LIB_CTYPES.horovod_mpi_enabled()
            if not bool(mpi_enabled):
                raise ValueError(
                    'Horovod MPI is not enabled; Please make sure it\'s installed and enabled.'
                )

            from mpi4py import MPI
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
                MPI_Comm = ctypes.c_int
            else:
                MPI_Comm = ctypes.c_void_p
                self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

            comm_obj = MPI_Comm.from_address(MPI._addressof(comm))
            self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
        else:
            comm_size = len(comm)
            self.MPI_LIB_CTYPES.horovod_init((ctypes.c_int * comm_size)(*comm),
                                             ctypes.c_int(comm_size))
コード例 #8
0
    def init(self, calling_realm):

        self.realm = calling_realm
        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        try:
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
                MPI_Comm = c_int
            else:
                MPI_Comm = c_void_p
        #Some versions of MPI4py have no _sizeof method.
        except AttributeError:
            MPI_Comm = c_int

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value
        self.COMM = newcomm

        return newcomm
コード例 #9
0
  def init(self, comm=None):
    """A function that initializes Horovod.

    Args:
      comm: List specifying ranks for the communicator, relative to the
        MPI_COMM_WORLD communicator OR the MPI communicator to use. Given
        communicator will be duplicated. If None, Horovod will use
        MPI_COMM_WORLD Communicator.
    """
    if comm is None:
      comm = []

    atexit.register(self.shutdown)

    if not isinstance(comm, list):
      from mpi4py import MPI  # pylint: disable=import-outside-toplevel
      if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):  # pylint: disable=protected-access
        MPI_Comm = ctypes.c_int
      else:
        MPI_Comm = ctypes.c_void_p
        self.MPI_LIB_CTYPES.horovod_init_comm.argtypes = [MPI_Comm]

      comm_obj = MPI_Comm.from_address(MPI._addressof(comm))  # pylint: disable=protected-access
      self.MPI_LIB_CTYPES.horovod_init_comm(comm_obj)
    else:
      comm_size = len(comm)
      self.MPI_LIB_CTYPES.horovod_init((ctypes.c_int * comm_size)(*comm),
                                       ctypes.c_int(comm_size))
コード例 #10
0
 def testHandleValue(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('uint32_t'): 'uint32_t',
                ffi.sizeof('uint64_t'): 'uint64_t',}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0]
         self.assertEqual(handle, MPI._handleof(obj))
コード例 #11
0
    def __init__(self, name="", cmdargs=None, comm=None):

        # load liblammps.so by default
        # if name = "g++", load liblammps_g++.so

        try:
            if not name: self.lib = CDLL("liblammps.so", RTLD_GLOBAL)
            else: self.lib = CDLL("liblammps_%s.so" % name, RTLD_GLOBAL)
        except:
            type, value, tb = sys.exc_info()
            traceback.print_exception(type, value, tb)
            raise OSError, "Could not load LAMMPS dynamic library"

        # create an instance of LAMMPS
        # don't know how to pass an MPI communicator from PyPar
        # no_mpi call lets LAMMPS use MPI_COMM_WORLD
        # cargs = array of C strings from args

        if cmdargs:
            cmdargs.insert(0, "lammps.py")
            narg = len(cmdargs)
            cargs = (c_char_p * narg)(*cmdargs)
            self.lmp = c_void_p()
            if comm is None:
                self.lib.lammps_open_no_mpi(narg, cargs, byref(self.lmp))
            else:
                if MPI._sizeof(MPI.Comm) == sizeof(c_int):
                    MPI_Comm = c_int
                else:
                    MPI_Comm = c_void_p
                comm_ptr = MPI._addressof(comm)
                comm_val = MPI_Comm.from_address(comm_ptr)
                self.lib.lammps_open(narg, cargs, comm_val, byref(self.lmp))
        else:
            self.lmp = c_void_p()
            if comm is None:
                self.lib.lammps_open_no_mpi(0, None, byref(self.lmp))
            else:
                if MPI._sizeof(MPI.Comm) == sizeof(c_int):
                    MPI_Comm = c_int
                else:
                    MPI_Comm = c_void_p
                comm_ptr = MPI._addressof(comm)
                comm_val = MPI_Comm.from_address(comm_ptr)
                self.lib.lammps_open(0, None, comm_val, byref(self.lmp))
コード例 #12
0
ファイル: test_ctypes.py プロジェクト: mpi4py/mpi4py
 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
コード例 #13
0
ファイル: test_ctypes.py プロジェクト: benkirk/mpi_playground
 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
コード例 #14
0
def get_task_comm():
    from mpi4py import MPI
    import ctypes

    # print("turbine_helpers.task_comm: %i" % task_comm)
    # sys.stdout.flush()

    mpi4py_comm = MPI.Intracomm()
    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
        # MPICH
        comm_int = ctypes.c_int
        mpi4py_comm_ptr = comm_int.from_address(MPI._addressof(mpi4py_comm))
        mpi4py_comm_ptr.value = task_comm
    elif MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_void_p):
        # OpenMPI
        comm_pointer = ctypes.c_void_p
        mpi4py_comm = MPI.Intracomm()
        handle = comm_pointer.from_address(MPI._addressof(mpi4py_comm))
        handle.value = task_comm

    return mpi4py_comm
コード例 #15
0
ファイル: par_utils.py プロジェクト: pbouda95/mgmetis
def comm_ptr(comm):
    """Get the pointer to the communicator
    """
    # see https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
    comm = get_comm(comm)
    from mpi4py import MPI

    if MPI._sizeof(MPI.Comm) == c.sizeof(c.c_int):
        MPI_Comm = c.c_int
    else:
        # must be pointer
        MPI_Comm = c.c_void_p
    return c.byref(MPI_Comm.from_address(MPI._addressof(comm)))
コード例 #16
0
ファイル: distributed.py プロジェクト: tccw/devito
    def _C_comm(self):
        """
        A :class:`Object` wrapping an MPI communicator.

        Extracted from: ::

            https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
        """
        from devito.types import CompositeObject
        ptype = c_int if MPI._sizeof(self._comm) == sizeof(c_int) else c_void_p
        obj = CompositeObject('comm', 'MPI_Comm', ptype, [])
        comm_ptr = MPI._addressof(self._comm)
        comm_val = obj.dtype.from_address(comm_ptr)
        obj.value = comm_val
        return obj
コード例 #17
0
ファイル: distributed.py プロジェクト: RajatRasal/devito
    def _C_comm(self):
        """
        A :class:`Object` wrapping an MPI communicator.

        Extracted from: ::

            https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
        """
        from devito.types import Object
        if MPI._sizeof(self._comm) == sizeof(c_int):
            ctype = type('MPI_Comm', (c_int, ), {})
        else:
            ctype = type('MPI_Comm', (c_void_p, ), {})
        comm_ptr = MPI._addressof(self._comm)
        comm_val = ctype.from_address(comm_ptr)
        return Object(name='comm', dtype=ctype, value=comm_val)
コード例 #18
0
 def testHandleAddress(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('int'): 'int', ffi.sizeof('void*'): 'void*'}
     typename = lambda t: t.__name__.rsplit('.', 1)[-1]
     for tp in self.mpitypes:
         handle_t = typemap[MPI._sizeof(tp)]
         mpi_t = 'MPI_' + typename(tp)
         ffi.cdef("typedef %s %s;" % (handle_t, mpi_t))
     for obj in self.objects:
         if isinstance(obj, MPI.Comm):
             mpi_t = 'MPI_Comm'
         else:
             mpi_t = 'MPI_' + typename(type(obj))
         oldobj = obj
         newobj = type(obj)()
         handle_old = ffi.cast(mpi_t + '*', MPI._addressof(oldobj))
         handle_new = ffi.cast(mpi_t + '*', MPI._addressof(newobj))
         handle_new[0] = handle_old[0]
         self.assertEqual(oldobj, newobj)
コード例 #19
0
class MPICommObject(Object):

    name = 'comm'

    # See https://github.com/mpi4py/mpi4py/blob/master/demo/wrap-ctypes/helloworld.py
    if MPI._sizeof(MPI.Comm) == sizeof(c_int):
        dtype = type('MPI_Comm', (c_int, ), {})
    else:
        dtype = type('MPI_Comm', (c_void_p, ), {})

    def __init__(self, comm=None):
        if comm is None:
            # Should only end up here upon unpickling
            comm = MPI.COMM_WORLD
        comm_ptr = MPI._addressof(comm)
        comm_val = self.dtype.from_address(comm_ptr)
        self.value = comm_val

    # Pickling support
    _pickle_args = []
コード例 #20
0
ファイル: pbt.py プロジェクト: samadejacobs/CANDLESup
    def __init__(self, comm, dest, outdir):
        # For python 2 compatibility
        super(DataSpacesPBTClient, self).__init__(comm, dest, outdir)
        path = os.path.dirname(os.path.abspath(__file__))
        self.lib = ctypes.cdll.LoadLibrary("{}/libpbt_ds.so".format(path))
        # different mpi implementation use different types for
        # MPI_Comm, this determines which type to use
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
            self.mpi_comm_type = ctypes.c_int
        else:
            self.mpi_comm_type = ctypes.c_void_p

        group = comm.Get_group()
        newgroup = group.Excl([dest])
        ds_comm = comm.Create(newgroup)

        self.mpi_comm_self = self.make_comm_arg(MPI.COMM_SELF)
        mpi_comm_ds = self.make_comm_arg(ds_comm)
        world_size = ds_comm.Get_size()
        self.lib.pbt_ds_init(ctypes.c_int(world_size), mpi_comm_ds)
コード例 #21
0
ファイル: cplpy.py プロジェクト: Crompulence/cpl-library
    def init(self, calling_realm):

        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
            MPI_Comm = c_int
        else:
            MPI_Comm = c_void_p

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value

        return newcomm
コード例 #22
0
ファイル: test_cffi.py プロジェクト: benkirk/mpi_playground
 def testHandleAddress(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('int'): 'int',
                ffi.sizeof('void*'): 'void*'}
     typename = lambda t: t.__name__.rsplit('.', 1)[-1]
     for tp in self.mpitypes:
         handle_t = typemap[MPI._sizeof(tp)]
         mpi_t = 'MPI_' + typename(tp)
         ffi.cdef("typedef %s %s;" % (handle_t, mpi_t))
     for obj in self.objects:
         if isinstance(obj, MPI.Comm):
             mpi_t = 'MPI_Comm'
         else:
             mpi_t = 'MPI_' + typename(type(obj))
         oldobj = obj
         newobj = type(obj)()
         handle_old = ffi.cast(mpi_t+'*', MPI._addressof(oldobj))
         handle_new = ffi.cast(mpi_t+'*', MPI._addressof(newobj))
         handle_new[0] = handle_old[0]
         self.assertEqual(oldobj, newobj)
コード例 #23
0
ファイル: common_mpi.py プロジェクト: KGHustad/FYS3150
def diffusion_2d_mpi(v, iterations, kappa):
    #libdiffuse = load_lib()
    libdiffuse = load_lib_alt()

    # type stuff
    from ctypes import c_double, c_int
    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
        MPI_Comm = ctypes.c_int
        print "MPI_COMM is an int"
    else:
        MPI_Comm = ctypes.c_void_p
        print "MPI_COMM is a void pointer"

    float64_array_2d = np.ctypeslib.ndpointer(dtype=c_double, ndim=2,
                                          flags="contiguous")

    # MPI_Comm
    comm = MPI.COMM_WORLD
    comm_ptr = MPI._addressof(comm)
    comm_val = MPI_Comm.from_address(comm_ptr)

    height, width = v.shape

    libdiffuse.solve_2d_mpi.restype = None
    libdiffuse.solve_2d_mpi.argtypes = [float64_array_2d,
                                        c_int,
                                        c_int,
                                        c_double,
                                        c_int,
                                        MPI_Comm]
    libdiffuse.solve_2d_mpi(v,
                            c_int(height),
                            c_int(width),
                            c_double(kappa),
                            c_int(iterations),
                            comm_val
                            )
コード例 #24
0
"""
Python wrapper for LJMD C code

Author: Krister Jazz Urog
Date: 17 February, 2020
"""
from ctypes import *
import sys
import os
from mpi4py import MPI

if MPI._sizeof(MPI.Comm) == sizeof(c_int):
    MPI_Comm = c_int
else:
    MPI_Comm = c_void_p


class MDSYS_T(Structure):
    """
    Molecular dynamics system structure object wrapper for a similar C struct.

    -----------------------------------------------
    Parameters:
            natoms = number of atoms
            nfi = current iteration step
            nsteps = total number of iteration steps
            dt = time step
            mass = mass of atom
            epsilon = constant
            sigma =  constant
            box = box size
コード例 #25
0
ファイル: mpi.py プロジェクト: Michaeldz36/PyMPDATA
import numba
import ctypes
import numpy as np
import platform
from mpi4py import MPI

if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
    _MPI_Comm_t = ctypes.c_int
else:
    _MPI_Comm_t = ctypes.c_void_p

if MPI._sizeof(MPI.Datatype) == ctypes.sizeof(ctypes.c_int):
    _MPI_Datatype_t = ctypes.c_int
else:
    _MPI_Datatype_t = ctypes.c_void_p

_MPI_Status_ptr_t = ctypes.c_void_p

if platform.system() == 'Linux':
    lib = 'libmpi.so'
elif platform.system() == 'Windows':
    lib = 'msmpi.dll'
elif platform.system() == 'Darwin':
    lib = 'libmpi.dylib'
else:
    raise NotImplementedError()
libmpi = ctypes.CDLL(lib)

_MPI_Initialized = libmpi.MPI_Initialized
_MPI_Initialized.restype = ctypes.c_int
_MPI_Initialized.argtypes = [ctypes.c_void_p]
コード例 #26
0
class CPL:
    # Shared attribute containing the library
    CFD_REALM = 1
    MD_REALM = 2
    GATHER_SCATTER = 1
    SEND_RECEIVE = 2
    NULL_REALM = 0
    _libname = "libcpl"
    try:
        _lib_path = os.environ["CPL_LIBRARY_PATH"]
        if os.path.exists(_lib_path + "/" + _libname + ".so"):
            _cpl_lib = load_library(_libname, _lib_path)
        else:
            raise CPLLibraryNotFound(
                "Compiled CPL library libcpl.so not found at " + _lib_path +
                "/" + _libname + ".so")
    except KeyError as e:
        print(
            "CPL info: ",
            "CPL_LIBRARY_PATH not defined. Looking in system directories...")
        try:
            _cpl_lib = cdll.LoadLibrary(_libname + ".so")
            print("CPL info: ", "Success!")
        except OSError as e:
            raise CPLLibraryNotFound("Library libcpl.so not found!")
            #TODO: Check this
            #time.sleep(2)
            #MPI.COMM_WORLD.Abort(errorcode=1)

    # Check for JSON support by cheking if load_param_file symbol exists
    JSON_SUPPORT = True
    try:
        _cpl_lib.CPLC_load_param_file
    except:
        JSON_SUPPORT = False

    def __init__(self):
        self._var = POINTER(POINTER(c_char_p))
        self.realm = None

    # py_test_python function
    py_test_python = _cpl_lib.CPLC_test_python
    py_test_python.argtypes = \
        [c_int,
         c_double,
         c_bool,
         ndpointer(np.int32, ndim=2, flags='aligned, f_contiguous'),
         ndpointer(np.float64, ndim=2,  flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(2,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(2,), flags='aligned, f_contiguous')]

    @abortMPI
    def test_python(self, int_p, doub_p, bool_p, int_pptr, doub_pptr):
        int_pptr_dims = np.array(int_pptr.shape, order='F', dtype=np.int32)
        doub_pptr_dims = np.array(doub_pptr.shape, order='F', dtype=np.int32)
        self.py_test_python(int_p, doub_p, bool_p, int_pptr, doub_pptr,
                            int_pptr_dims, doub_pptr_dims)

    _py_init = _cpl_lib.CPLC_init

    #OpenMPI comm greater than c_int
    try:
        if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
            _py_init.argtypes = [c_int, POINTER(c_int)]
        else:
            excptstr = "Problem is in create_comm wrapper, as the OpenMPI COMM handle is not "
            excptstr += "an integer, c_void_p should be used so C bindings needs something like **void"
            excptstr += "(No idea what to do in the Fortran code, maybe MPI_COMM_f2C required)"
            raise OpenMPI_Not_Supported(excptstr)
            _py_init.argtypes = [c_int, POINTER(c_void_p)]
    except AttributeError:
        _py_init.argtypes = [c_int, POINTER(c_int)]

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def init(self, calling_realm):

        self.realm = calling_realm
        # Build a communicator mpi4py python object from the
        # handle returned by the CPL_init function.
        try:
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
                MPI_Comm = c_int
            else:
                MPI_Comm = c_void_p
        #Some versions of MPI4py have no _sizeof method.
        except AttributeError:
            MPI_Comm = c_int

        # Call create comm
        returned_realm_comm = c_int()
        self._py_init(calling_realm, byref(returned_realm_comm))

        # Use an intracomm object as the template and override value
        newcomm = MPI.Intracomm()
        newcomm_ptr = MPI._addressof(newcomm)
        comm_val = MPI_Comm.from_address(newcomm_ptr)
        comm_val.value = returned_realm_comm.value
        self.COMM = newcomm

        return newcomm

    if JSON_SUPPORT:
        _py_load_param_file = _cpl_lib.CPLC_load_param_file
        _py_load_param_file.argtypes = [c_char_p]

    @abortMPI
    def load_param_file(self, fname):
        self._py_load_param_file(c_char_p(fname), c_int(len(fname)))

    if JSON_SUPPORT:
        _py_close_param_file = _cpl_lib.CPLC_close_param_file

    @abortMPI
    def close_param_file(self):
        self._py_close_param_file()

    @abortMPI
    def get_file_var(self, section, var_name, var_type):
        try:
            fun_name = _CPL_GET_FILE_VARS[var_type][0]
            var_ctype = _CPL_GET_FILE_VARS[var_type][1]

            fun = getattr(self._cpl_lib, "CPLC_" + fun_name)
            fun.argtypes = [c_char_p, c_char_p, POINTER(var_ctype)]

        except KeyError:
            print("CPL-ERROR: CPL Library function '" + str(fun_name) +
                  "' not found!")
            raise KeyError
        else:
            self._var = var_ctype()

            if ("array" in fun_name):
                print("ENTRO")
                var_len = c_int()
                fun.argtypes.append(POINTER(c_int))
                print("EY")
                fun(c_char_p(section), c_char_p(var_name), byref(self._var),
                    byref(var_len))
                print("len:", var_len.value)
                #print (self._var[0])
                #print (byref(var[0]))
                a = ([self._var[i] for i in xrange(var_len.value)])
                return a
            else:
                fun(c_char_p(section), c_char_p(var_name), byref(self._var))
                return self._var.value

    _py_finalize = _cpl_lib.CPLC_finalize

    @abortMPI
    def finalize(self):
        self._py_finalize()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_setup_cfd = _cpl_lib.CPLC_setup_cfd

    py_setup_cfd.argtypes = \
        [c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def setup_cfd(self, icomm_grid, xyzL, xyz_orig, ncxyz):
        """
            setup_cfd(icomm_grid, xyzL, xyz_orig, ncxyz):
        """

        if (((type(icomm_grid) is list) and (len(icomm_grid) is 3)) or
            ((type(icomm_grid) is np.array) and (icomm_grid.shape[0] is 3))):
            icomm_grid = self.COMM.Create_cart(
                [icomm_grid[0], icomm_grid[1], icomm_grid[2]])

        if ((type(xyzL) is list) or (xyzL.dtype != np.float64)
                or (not xyzL.flags["F_CONTIGUOUS"])):
            xyzL = np.array(xyzL, order='F', dtype=np.float64)

        if ((type(xyz_orig) is list) or (xyz_orig.dtype != np.float64)
                or (not xyz_orig.flags["F_CONTIGUOUS"])):
            xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)

        if ((type(ncxyz) is list) or (ncxyz.dtype != np.int32)
                or (not ncxyz.flags["F_CONTIGUOUS"])):
            ncxyz = np.array(ncxyz, order='F', dtype=np.int32)

        self.py_setup_cfd(MPI._handleof(icomm_grid), xyzL, xyz_orig, ncxyz)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_setup_md = _cpl_lib.CPLC_setup_md

    py_setup_md.argtypes = \
        [c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def setup_md(self, icomm_grid, xyzL, xyz_orig):
        """
        setup_md(icomm_grid, xyzL, xyz_orig)

        """
        if (((type(icomm_grid) is list) and (len(icomm_grid) is 3)) or
            ((type(icomm_grid) is np.array) and (icomm_grid.shape[0] is 3))):
            icomm_grid = self.COMM.Create_cart(
                [icomm_grid[0], icomm_grid[1], icomm_grid[2]])

        if ((type(xyzL) is list) or (xyzL.dtype != np.float64)
                or (not xyzL.flags["F_CONTIGUOUS"])):
            xyzL = np.array(xyzL, order='F', dtype=np.float64)

        if ((type(xyz_orig) is list) or (xyz_orig.dtype != np.float64)
                or (not xyz_orig.flags["F_CONTIGUOUS"])):
            xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)

        self.py_setup_md(MPI._handleof(icomm_grid), xyzL, xyz_orig)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_proc_extents = _cpl_lib.CPLC_proc_extents
    py_proc_extents.argtypes = \
        [ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         c_int,
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def proc_extents(self, coord, realm):
        coord = self._type_check(coord)
        extents = np.zeros(6, order='F', dtype=np.int32)
        self.py_proc_extents(coord, realm, extents)
        return extents

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_my_proc_extents = _cpl_lib.CPLC_my_proc_extents
    py_my_proc_extents.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def my_proc_extents(self):
        extents = np.zeros(6, order='F', dtype=np.int32)
        self.py_my_proc_extents(extents)
        return extents

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_proc_portion = _cpl_lib.CPLC_proc_portion
    py_proc_portion.argtypes = \
        [ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         c_int,
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def proc_portion(self, coord, realm, limits):
        coord = self._type_check(coord)
        limits = self._type_check(limits)
        portion = np.zeros(6, order='F', dtype=np.int32)
        self.py_proc_portion(coord, realm, limits, portion)
        return portion

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_my_proc_portion = _cpl_lib.CPLC_my_proc_portion
    py_my_proc_portion.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def my_proc_portion(self, limits):
        limits = self._type_check(limits)
        portion = np.zeros(6, order='F', dtype=np.int32)
        self.py_my_proc_portion(limits, portion)
        return portion

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_map_cfd2md_coord = _cpl_lib.CPLC_map_cfd2md_coord
    py_map_cfd2md_coord.argtypes = \
        [ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_cfd2md_coord(self, coord_cfd):
        coord_cfd = self._type_check(coord_cfd)
        coord_md = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_cfd2md_coord(coord_cfd, coord_md)
        return coord_md

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_map_md2cfd_coord = _cpl_lib.CPLC_map_md2cfd_coord
    py_map_md2cfd_coord.argtypes = \
        [ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_md2cfd_coord(self, coord_md):
        coord_md = self._type_check(coord_md)
        coord_cfd = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_md2cfd_coord(coord_md, coord_cfd)
        return coord_cfd

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_map_glob2loc_cell = _cpl_lib.CPLC_map_glob2loc_cell
    py_map_glob2loc_cell.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_glob2loc_cell(self, limits, glob_cell):
        limits = self._type_check(limits)
        glob_cell = self._type_check(glob_cell)
        loc_cell = np.zeros(3, order='F', dtype=np.int32)
        self.py_map_glob2loc_cell(limits, glob_cell, loc_cell)
        return loc_cell

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_map_cell2coord = _cpl_lib.CPLC_map_cell2coord
    py_map_cell2coord.argtypes = \
        [c_int, c_int, c_int,
         ndpointer(np.float64, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_cell2coord(self, i, j, k):
        coord = np.zeros(3, order='F', dtype=np.float64)
        self.py_map_cell2coord(i, j, k, coord)
        return coord

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_map_coord2cell = _cpl_lib.CPLC_map_coord2cell
    py_map_coord2cell.argtypes = \
        [c_double, c_double, c_double,
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def map_coord2cell(self, x, y, z):
        cell = np.zeros(3, order='F', dtype=np.int32)
        self.py_map_coord2cell(x, y, z, cell)
        return cell

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_get_no_cells = _cpl_lib.CPLC_get_no_cells
    py_get_no_cells.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(3,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_no_cells(self, limits):
        limits = self._type_check(limits)
        no_cells = np.zeros(3, order='F', dtype=np.int32)
        self.py_get_no_cells(limits, no_cells)
        return no_cells

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    #Limits of overlap region
    py_get_olap_limits = _cpl_lib.CPLC_get_olap_limits
    py_get_olap_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_olap_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_olap_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    #Limits of contraint region
    py_get_cnst_limits = _cpl_lib.CPLC_get_cnst_limits
    py_get_cnst_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_cnst_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_cnst_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    #Limits of boundary region
    py_get_bnry_limits = _cpl_lib.CPLC_get_bnry_limits
    py_get_bnry_limits.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def get_bnry_limits(self):
        limits = np.zeros(6, order='F', dtype=np.int32)
        self.py_get_bnry_limits(limits)
        return limits

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_set_timing = _cpl_lib.CPLC_set_timing
    py_set_timing.argtypes = \
        [c_int, c_int, c_double]

    #Don't call abortMPI so it can be handled nicely in Python.
    #@abortMPI
    def set_timing(self, initialstep, nsteps, dt):
        class DepricatedException(Exception):
            """Raise Error as function should not be used"""

        raise DepricatedException(
            "CPL set_timing is depricated and should not be used")
        self.py_set_timing(initialstep, nsteps, dt)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_send = _cpl_lib.CPLC_send
    py_send.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         POINTER(c_bool)]

    @abortMPI
    def send(self, asend, limits=None):
        #Attempt to guess required size
        if limits is None:
            if self.realm is self.CFD_REALM:
                limits = self.my_proc_portion(self.get_cnst_limits())
            elif self.realm is self.MD_REALM:
                limits = self.my_proc_portion(self.get_bnry_limits())

        asend = self._type_check(asend)
        asend_shape = np.array(asend.shape, order='F', dtype=np.int32)
        send_flag = c_bool()

        self.py_send(asend, asend_shape, limits, byref(send_flag))
        return send_flag.value

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_recv = _cpl_lib.CPLC_recv
    py_recv.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         POINTER(c_bool)]

    @abortMPI
    def recv(self, arecv, limits=None):
        #Attempt to guess required size
        if limits is None:
            if self.realm is self.CFD_REALM:
                limits = self.my_proc_portion(self.get_bnry_limits())
            elif self.realm is self.MD_REALM:
                limits = self.my_proc_portion(self.get_cnst_limits())

        arecv = self._type_check(arecv)
        arecv_shape = np.array(arecv.shape, order='F', dtype=np.int32)
        recv_flag = c_bool()
        self.py_recv(arecv, arecv_shape, limits, byref(recv_flag))
        return arecv, recv_flag.value

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_gather = _cpl_lib.CPLC_gather
    py_gather.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def gather(self, gather_array, limits, recv_array):
        gather_array = self._type_check(gather_array)
        recv_array = self._type_check(recv_array)
        gather_shape = np.array(gather_array.shape, order='F', dtype=np.int32)
        recv_shape = np.array(recv_array.shape, order='F', dtype=np.int32)
        self.py_gather(gather_array, gather_shape, limits, recv_array,
                       recv_shape)

        return recv_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_scatter = _cpl_lib.CPLC_scatter
    py_scatter.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous'),
         ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous'),
         ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def scatter(self, scatter_array, limits, recv_array):
        scatter_array = self._type_check(scatter_array)
        recv_array = self._type_check(recv_array)
        scatter_shape = np.array(scatter_array.shape,
                                 order='F',
                                 dtype=np.int32)
        recv_shape = np.array(recv_array.shape, order='F', dtype=np.int32)
        self.py_scatter(scatter_array, scatter_shape, limits, recv_array,
                        recv_shape)
        return recv_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_swaphalos = _cpl_lib.CPLC_swaphalos
    py_swaphalos.argtypes = \
        [ndpointer(np.float64, flags='aligned, f_contiguous'),
         ndpointer(np.int32, ndim=1, flags='aligned, f_contiguous')]

    @abortMPI
    def swaphalos(self, A):
        A = self._type_check(A)
        A_shape = np.array(A.shape, order='F', dtype=np.int32)
        self.py_swaphalos(A, A_shape)
        return A

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_overlap = _cpl_lib.CPLC_overlap
    py_overlap.argtypes = []

    @abortMPI
    def overlap(self):
        self.py_overlap.restype = c_bool
        return self.py_overlap()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    py_is_proc_inside = _cpl_lib.CPLC_is_proc_inside
    py_is_proc_inside.argtypes = \
        [ndpointer(np.int32, shape=(6,), flags='aligned, f_contiguous')]

    @abortMPI
    def is_proc_inside(self, region):
        self.py_is_proc_inside.restype = c_bool
        region = self._type_check(region)
        return self.py_is_proc_inside(region)

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def get(self, var_name):
        try:
            var_type = _CPL_GET_VARS[var_name]
            fun = getattr(self._cpl_lib, "CPLC_" + var_name)
        except KeyError:
            print("CPL-ERROR: CPL Library function '" + str(var_name) +
                  "' not found!")
            print("Available options include: ")
            for var in _CPL_GET_VARS:
                print(var)
            raise KeyError
        else:
            fun.restype = var_type
            return fun()

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def set(self, var_name, value):
        try:
            var_type = _CPL_SET_VARS[var_name]
            fun = getattr(self._cpl_lib, "CPLC_set_" + var_name)
        except KeyError:
            print("CPL-ERROR: CPL Library function '" + str(var_name) +
                  "' not found!")
            raise KeyError
        else:
            fun.argtypes = [var_type]
            return fun(var_type(value))

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def _type_check(self, A):
        if type(A) is list:
            ndtype = type(A[0])
            if ndtype == float:
                ndtype = np.float64
            elif ndtype == int:
                ndtype = np.int32
            A = np.asfortranarray(A, dtype=ndtype)
        if not A.flags["F_CONTIGUOUS"]:
            A = np.require(A, requirements=['F'])
        if not A.flags["ALIGNED"]:
            A = np.require(A, requirements=['A'])
        return A

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def get_arrays(self, recv_size, send_size):
        """
          Return recv array and send array based
          on constraint/boundary sizes
        """
        #Get constraint region
        cnst_limits = self.get_cnst_limits()
        cnst_portion = self.my_proc_portion(cnst_limits)
        cnst_ncxl, cnst_ncyl, cnst_nczl = self.get_no_cells(cnst_portion)

        #Get overlap region
        BC_limits = self.get_bnry_limits()
        BC_portion = self.my_proc_portion(BC_limits)
        BC_ncxl, BC_ncyl, BC_nczl = self.get_no_cells(BC_portion)

        #Allocate send and recv arrays
        recv_array = np.zeros((recv_size, BC_ncxl, BC_ncyl, BC_nczl),
                              order='F',
                              dtype=np.float64)
        send_array = np.zeros((send_size, cnst_ncxl, cnst_ncyl, cnst_nczl),
                              order='F',
                              dtype=np.float64)

        return recv_array, send_array

    # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

    @abortMPI
    def dump_region(self,
                    region,
                    array,
                    fname,
                    comm,
                    components={},
                    coords="mine"):
        lines = ""
        portion = self.my_proc_portion(region)
        cell_coords = np.array(3)
        dx = self.get("dx")
        dy = self.get("dy")
        dz = self.get("dz")
        def_func = lambda x: x
        components_dic = components
        if callable(components):
            def_func = components
        if not components or callable(components):
            components_idx = list(xrange(0, array.shape[0]))
            for c_idx in components_idx:
                components_dic[c_idx] = def_func
        for k, v in components_dic.items():
            if v is None:
                components_dic[k] = def_func
        #if self.overlap():
        if self.is_proc_inside(portion):
            ncx, ncy, ncz = self.get_no_cells(portion)
            if (ncx, ncy, ncz) != array.shape[1:]:
                print(
                    "self-Error in dump_region(): array and processor portion of different size."
                )
                MPI.COMM_WORLD.Abort(errorcode=1)

            for i in xrange(portion[0], portion[1] + 1):
                for j in xrange(portion[2], portion[3] + 1):
                    for k in xrange(portion[4], portion[5] + 1):
                        cell_coords = self.map_cell2coord(i, j, k)
                        if coords != "mine":
                            if self.realm == CPL.CFD_REALM:
                                cell_coords = self.map_cfd2md_coord(
                                    cell_coords)
                            else:
                                cell_coords = self.map_md2cfd_coord(
                                    cell_coords)
                        [i_loc, j_loc,
                         k_loc] = self.map_glob2loc_cell(portion, [i, j, k])
                        lines += str(cell_coords[0] + dx/2.0) + " "\
                               + str(cell_coords[1] + dy/2.0) + " "\
                               + str(cell_coords[2] + dz/2.0)

                        for k, f in components_dic.items():
                            lines += " " + str(f(array[k, i_loc, j_loc,
                                                       k_loc]))
                        lines += "\n"

        # Gather all the forces from every processor and dump them to a file at the root
        lines = comm.gather(lines, root=0)

        myrank = comm.Get_rank()
        if myrank == 0:
            with open(fname, "w") as file_out:
                file_out.writelines(lines)
コード例 #27
0
from mpi4py import MPI
import cffi
import os

_libdir = os.path.dirname(__file__)

ffi = cffi.FFI()
if MPI._sizeof(MPI.Comm) == ffi.sizeof('int'):
    _mpi_comm_t = 'int'
else:
    _mpi_comm_t = 'void*'
ffi.cdef("""
typedef %(_mpi_comm_t)s MPI_Comm;
void sayhello(MPI_Comm);
""" % vars())
lib = ffi.dlopen(os.path.join(_libdir, "libhelloworld.so"))

def sayhello(comm):
    comm_ptr = MPI._addressof(comm)
    comm_val = ffi.cast('MPI_Comm*', comm_ptr)[0]
    lib.sayhello(comm_val)
コード例 #28
0
ファイル: wrappers.py プロジェクト: boruil/homework4
import ctypes
import numpy
import os

from ctypes import c_void_p, c_int, c_long, c_size_t, c_double
from mpi4py import MPI

# try to import the library
try:
    path_to_library = os.path.join('lib','libhomework4.so')
    homework4library = ctypes.cdll.LoadLibrary(path_to_library)
except OSError:
    raise OSError("You need to compile your homework library using 'make'.")

# determine the c-type of an MPI_Comm type
if MPI._sizeof(MPI.Comm) == ctypes.sizeof(c_int):
    c_mpi_comm = ctypes.c_int
else:
    c_mpi_comm = ctypes.c_void_p

###############################################################################
# function wrappers
###############################################################################
def heat_serial(u, dx, Nx, dt, num_steps):
    r"""Solve the heat equation using basic finite difference scheme.

    Parameters
    ----------
    u : array
        Function values.
コード例 #29
0
from mpi4py import MPI
import ctypes
import os

_libdir = os.path.dirname(__file__)

if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
    MPI_Comm = ctypes.c_int
else:
    MPI_Comm = ctypes.c_void_p
_lib = ctypes.CDLL(os.path.join(_libdir, "libhelloworld.so"))
_lib.sayhello.restype = None
_lib.sayhello.argtypes = [MPI_Comm]


def sayhello(comm):
    comm_ptr = MPI._addressof(comm)
    comm_val = MPI_Comm.from_address(comm_ptr)
    _lib.sayhello(comm_val)
コード例 #30
0
from mpi4py import MPI
import cffi
import os

_libdir = os.path.dirname(__file__)

ffi = cffi.FFI()
if MPI._sizeof(MPI.Comm) == ffi.sizeof('int'):
    _mpi_comm_t = 'int'
else:
    _mpi_comm_t = 'void*'
ffi.cdef("""
typedef %(_mpi_comm_t)s MPI_Comm;
void sayhello(MPI_Comm);
""" % vars())
lib = ffi.dlopen(os.path.join(_libdir, "libhelloworld.so"))


def sayhello(comm):
    comm_ptr = MPI._addressof(comm)
    comm_val = ffi.cast('MPI_Comm*', comm_ptr)[0]
    lib.sayhello(comm_val)
コード例 #31
0
 def testSizeOf(self):
     for obj in self.objects:
         n1 = MPI._sizeof(obj)
         n2 = MPI._sizeof(type(obj))
         self.assertEqual(n1, n2)
コード例 #32
0
    def init(self, comm: Optional[Union[Sequence[int], MPI.Comm]] = None,
             process_sets: Optional[Sequence[ProcessSet]] = None):
        """A function that initializes Horovod.

        Args:

          comm: One of these possibilities:

            1) List specifying ranks for the communicator, relative to the MPI_COMM_WORLD
               communicator.
            2) None: Use all ranks of MPI_COMM_WORLD.
            3) MPI communicator to use. Given communicator will be duplicated and used as
               the global Horovod communicator.

          process_sets: One of these possibilities:

            1) None -- Do not initialize any process sets.
            2) List[hvd.ProcessSet] -- Initialize process set objects given in list (in addition to
               hvd.global_process_set that will always be initialized). Users should hold on to these objects to pass
               them to any Horovod collective communication ops. Duplicate process sets are not allowed.
            3) "dynamic": do not initialize any process sets now, but set the environment variable
               HOROVOD_DYNAMIC_PROCESS_SETS=1 so we can call `hvd.add_process_set(...)` later.
        """

        if comm is None:
            comm = []
        if process_sets is None:
            process_sets = []
        elif isinstance(process_sets, str) and process_sets.lower() == "dynamic":
            process_sets = []
            os.environ["HOROVOD_DYNAMIC_PROCESS_SETS"] = "1"

        process_sets = list(process_sets)
        process_sets_via_ranks = [ps for ps in process_sets if ps.ranks is not None]
        process_sets_via_comm = [ps for ps in process_sets if ps.mpi_comm is not None and ps.ranks is None]

        process_set_sizes_via_ranks = [len(ps.ranks) for ps in process_sets_via_ranks]
        process_set_ranks_via_ranks = [rank for process_set in process_sets_via_ranks for rank in process_set.ranks]
        process_set_args_via_ranks = [
            (ctypes.c_int * len(process_set_ranks_via_ranks))(*process_set_ranks_via_ranks),
            (ctypes.c_int * len(process_set_sizes_via_ranks))(*process_set_sizes_via_ranks),
            ctypes.c_int(len(process_set_sizes_via_ranks))
        ]

        atexit.register(self.shutdown)

        initialization_ok = True
        if util.is_iterable(comm):
            # comm is a list of ranks relative to the global communicator
            if len(process_sets_via_comm) > 0:
                raise NotImplementedError(
                    "At this time process sets defined via MPI communicators are only supported when calling hvd.init() "
                    "with comm set to a global MPI communicator.")
            comm_size = len(comm)
            initialization_ok = self.MPI_LIB_CTYPES.horovod_init(
                (ctypes.c_int * comm_size)(*comm), ctypes.c_int(comm_size),
                *process_set_args_via_ranks)
        else:
            if not self.mpi_built():
                raise ValueError(
                    "Horovod has not been built with MPI support. Ensure MPI is installed and "
                    "reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error.")

            from mpi4py import MPI
            if not isinstance(comm, MPI.Comm):
                raise ValueError(
                    "Invalid type of argument comm. Expected list of rank integers or mpi4py.MPI.Comm object.")
            global_process_set.mpi_comm = comm
            if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
                MPI_Comm = ctypes.c_int
            else:
                MPI_Comm = ctypes.c_void_p

            comm_list = [comm] + [ps.mpi_comm for ps in process_sets_via_comm]
            comm_objs = [MPI_Comm.from_address(MPI._addressof(c)) for c in comm_list]
            num_comms = len(comm_list)
            self.MPI_LIB_CTYPES.horovod_init_multi_comm.argtypes = [MPI_Comm * num_comms, ctypes.c_int]
            initialization_ok = self.MPI_LIB_CTYPES.horovod_init_multi_comm((MPI_Comm * num_comms)(*comm_objs),
                                                                            ctypes.c_int(num_comms),
                                                                            *process_set_args_via_ranks)
        if not initialization_ok:
            raise ValueError(
                "Horovod initialization failed. Please check log messages above for a more descriptive error.")

        try:
            _init_process_sets(process_sets)
        except ValueError as e:
            if (len(e.args) > 0 and isinstance(e.args[0], str) and
                "Horovod has not been initialized properly" in e.args[0]):
                # Horovod is already shutting down
                return
            else:
                raise e

        for ps_idx, ps in enumerate(process_sets):
            if ps.process_set_id is None:
                raise ValueError(
                    f"Horovod could not be initialized because process_sets entry number {ps_idx} is a duplicate: {ps}")
コード例 #33
0
  def __init__(self, library=None, style='spherical', dim=3, units='si', path=None, cmdargs=[], comm=None, ptr=None):
    # What is ptr used for?

    if library:
      if not comm.Get_rank():
        print("Using " + library + " as a shared library for DEM computations")
    else:
      if not comm.Get_rank():
        print('No library supplies.')
      
      sys.exit()

    if not comm:
      comm = MPI.COMM_WORLD

    try:
      self.lib = ctypes.CDLL(library, ctypes.RTLD_GLOBAL)
    except:
      etype,value,tb = sys.exc_info()
      traceback.print_exception(etype,value,tb)
      raise RuntimeError("Could not load LIGGGHTS dynamic library")

    # if no ptr provided, create an instance of LIGGGHTS
    #   don't know how to pass an MPI communicator from PyPar
    #   but we can pass an MPI communicator from mpi4py v2.0.0 and later
    #   no_mpi call lets LIGGGHTS use MPI_COMM_WORLD
    #   cargs = array of C strings from args
    # if ptr, then are embedding Python in LIGGGHTS input script
    #   ptr is the desired instance of LIGGGHTS
    #   just convert it to ctypes ptr and store in self.lmp

    if MPI._sizeof(MPI.Comm) == ctypes.sizeof(ctypes.c_int):
      MPI_Comm = ctypes.c_int
    else:
      MPI_Comm = ctypes.c_void_p

    if not ptr:
      # with mpi4py v2, can pass MPI communicator to LIGGGHTS
      # need to adjust for type of MPI communicator object
      # allow for int (like MPICH) or void* (like OpenMPI)

        narg = 0
        cargs = 0

        if cmdargs:
          cmdargs.insert(0, "liggghts.py")
          narg = len(cmdargs)
          for i in range(narg):
            if isinstance(cmdargs[i], str):
              cmdargs[i] = cmdargs[i].encode()

          cargs = (ctypes.c_char_p*narg)(*cmdargs)
          self.lib.lammps_open.argtypes = [ctypes.c_int, ctypes.c_char_p*narg, \
                                           MPI_Comm, ctypes.c_void_p()]
        else:
          self.lib.lammps_open.argtypes = [ctypes.c_int, ctypes.c_int, \
                                           MPI_Comm, ctypes.c_void_p()]

        self.lib.lammps_open.restype = None
        self.opened = 1
        self.lmp = ctypes.c_void_p()
        comm_ptr = MPI._addressof(comm)
        comm_val = MPI_Comm.from_address(comm_ptr)
        self.lib.lammps_open(narg,cargs,comm_val,ctypes.byref(self.lmp))

        self.opened = True
    else:
      self.opened = False

      if sys.version_info >= (3, 0):
        # Python 3 (uses PyCapsule API)
        ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
        ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
        self.lmp = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(ptr, None))
      else:
        # Python 2 (uses PyCObject API)
        ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p
        ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ctypes.py_object]
        self.lmp = ctypes.c_void_p(ctypes.pythonapi.PyCObject_AsVoidPtr(ptr))
コード例 #34
0
 def testSizeOf(self):
     for obj in self.objects:
         n1 = MPI._sizeof(obj)
         n2 = MPI._sizeof(type(obj))
         self.assertEqual(n1, n2)
コード例 #35
0
ファイル: __init__.py プロジェクト: hoangducthuong/libconviqt
from mpi4py import MPI

_conviqt = None
try:
    _conviqt = ct.CDLL("_conviqt.so")
except OSError:
    path = ctu.find_library("conviqt")
    if path is not None:
        _conviqt = ct.CDLL(path)

available = _conviqt is not None

if available:

    try:
        if MPI._sizeof(MPI.Comm) == ct.sizeof(ct.c_int):
            MPI_Comm = ct.c_int
        else:
            MPI_Comm = ct.c_void_p
    except Exception as e:
        raise Exception(
            'Failed to set the portable MPI communicator datatype: "{}". '
            "MPI4py is probably too old. ".format(e))

    def encode_comm(comm):
        comm_ptr = MPI._addressof(comm)
        return MPI_Comm.from_address(comm_ptr)

    # Beam functions

    _conviqt.conviqt_beam_new.restype = ct.c_void_p
コード例 #36
0
ファイル: heffte.py プロジェクト: af-ayala/heffte
class heffte_plan(Structure):
    _fields_ = [("backend_type", c_int), ("using_r2c", c_int),
                ("fft", c_void_p)]


LP_plan = POINTER(heffte_plan)


class plan_options(Structure):
    _fields_ = [("use_reorder", c_int), ("use_alltoall", c_int),
                ("use_pencils", c_int)]


# double-check this!
MPI_Comm = c_int if mpi._sizeof(mpi.COMM_WORLD) == sizeof(c_int) else c_void_p

libheffte = cdll.LoadLibrary(libheffte_path)

# create and destroy
libheffte.heffte_plan_create.argtypes = [c_int, ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                         ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                         ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                         MPI_Comm, POINTER(plan_options), POINTER(LP_plan)]
libheffte.heffte_plan_create.restype = c_int
libheffte.heffte_plan_create_r2c.argtypes = [c_int, ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                             ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                             ndpointer(c_int, flags="C_CONTIGUOUS"), ndpointer(c_int, flags="C_CONTIGUOUS"), \
                                             c_int, MPI_Comm, POINTER(plan_options), POINTER(LP_plan)]
libheffte.heffte_plan_create_r2c.restype = c_int