Esempio n. 1
1
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
Esempio n. 2
0
 def testAHandleOf(self):
     for obj in self.objects:
         if isinstance(obj, MPI.Status):
             hdl = lambda: MPI._handleof(obj)
             self.assertRaises(NotImplementedError, hdl)
             continue
         hdl = MPI._handleof(obj)
Esempio n. 3
0
 def testAHandleOf(self):
     for obj in self.objects:
         if isinstance(obj, MPI.Status):
             hdl = lambda: MPI._handleof(obj)
             self.assertRaises(NotImplementedError, hdl)
             continue
         hdl = MPI._handleof(obj)
Esempio n. 4
0
 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
Esempio n. 5
0
def to_mpi_ptr(mpi_obj):
    """
    to_mpi_ptr(mpi_obj)

    Returns the ptr to the underlying C mpi object
    """
    return _np.uint64(_MPI._handleof(mpi_obj))
Esempio n. 6
0
 def testHandleValue(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('uint32_t'): 'uint32_t',
                ffi.sizeof('uint64_t'): 'uint64_t',}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0]
         self.assertEqual(handle, MPI._handleof(obj))
Esempio n. 7
0
 def setup_md(self, icomm_grid, xyzL, xyz_orig):
     """
     setup_md(self, dt, icomm_grid, xyzL, xyz_orig)
     Keyword arguments:
     real -- the real part (default 0.0)
     imag -- the imaginary part (default 0.0)
     """
     self.py_setup_md(MPI._handleof(icomm_grid), xyzL, xyz_orig)
Esempio n. 8
0
def to_mpi_handle(mpi_obj):
    """
    Returns the handle of the underlying C mpi object.

    Only defined for some MPI types (such as MPI_Comm), throws NotImplementedError
    otherwise.

    Note: This is not a pointer, but the actual C integer representation of the object.
    """
    return _np.uintp(_MPI._handleof(mpi_obj))
Esempio n. 9
0
def to_mpi_ptr(mpi_obj):
    """
    to_mpi_ptr(mpi_obj)

    Returns the ptr to the underlying C mpi object
    """
    try:
        addr = _MPI._handleof(mpi_obj)
    except NotImplementedError:
        # some objects like Status only work with addressof
        addr = _MPI._addressof(mpi_obj)

    return _np.uint64(addr)
Esempio n. 10
0
    def setup_md(self, icomm_grid, xyzL, xyz_orig):
        """
        setup_md(icomm_grid, xyzL, xyz_orig)

        """
        if (((type(icomm_grid) is list) and (len(icomm_grid) is 3)) or
            ((type(icomm_grid) is np.array) and (icomm_grid.shape[0] is 3))):
            icomm_grid = self.COMM.Create_cart(
                [icomm_grid[0], icomm_grid[1], icomm_grid[2]])

        if ((type(xyzL) is list) or (xyzL.dtype != np.float64)
                or (not xyzL.flags["F_CONTIGUOUS"])):
            xyzL = np.array(xyzL, order='F', dtype=np.float64)

        if ((type(xyz_orig) is list) or (xyz_orig.dtype != np.float64)
                or (not xyz_orig.flags["F_CONTIGUOUS"])):
            xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)

        self.py_setup_md(MPI._handleof(icomm_grid), xyzL, xyz_orig)
Esempio n. 11
0
    def __init__(
        self,
        global_lattice=None,
        block_lattice=None,
        procs=None,
        comm=None,
        boundary_conditions=-1,
        number_of_levels=1,
        number_openmp_threads=1,
        **kwargs,
    ):
        """
        Initialize a new DDalphaAMG solver class.
        
        Parameters
        ----------
        global_lattice: int[4]
            Size of the lattice. The directions order is T, Z, Y, X.
        block_lattice: int[4]
            Size of the first level blocking. The directions order is T, Z, Y, X.
        procs: int[4]
            Number of processes per direction. The directions order is T, Z, Y, X.
        comm: MPI.Comm
            It can be (a) MPI_COMM_WORLD, (b) A split of MPI_COMM_WORLD, 
            (c) A cartesian communicator with 4 dims and number of processes in
            each directions equal to procs[4] and with proper bondary conditions.
        boundary_conditions: int or int[4]
            It can be +1 (periodic), -1 (anti-periodic) or four floats (twisted)
            i.e. a phase proportional to M_PI * [T, Z, Y, X] will multiplies
            the links in the respective directions.
        number_of_levels: int
            Number of levels for the multigrid, from 1 (no MG) to 4 (max number of levels)
        number_openmp_threads: int
            Number of openmp threads, from 1 to omp_get_num_threads()
        """
        self._init_params = lib.DDalphaAMG_init()
        self._run_params = lib.DDalphaAMG_parameters()
        self._status = lib.DDalphaAMG_status()
        self._setup = 0
        self.updated = True

        global_lattice, block_lattice, procs, comm = get_lattice_partitioning(
            global_lattice, block_lattice, procs, comm)

        self._init_params.comm_cart = ll.cast["MPI_Comm"](MPI._handleof(comm))
        self._init_params.Cart_rank = nullptr
        self._init_params.Cart_coords = nullptr

        if boundary_conditions == 1:
            self._init_params.bc = 0
        elif boundary_conditions == -1:
            self._init_params.bc = 1
        else:
            assert (hasattr(boundary_conditions, "__len__")
                    and len(boundary_conditions) == 4), """
            boundary_conditions can be +1 (periodic), -1 (anti-periodic) or four floats
            (twisted), i.e. a phase proportional to M_PI * [T, Z, Y, X] multiplies links
            in the respective directions.
            """
            self._init_params.bc = 2

        for i in range(4):
            self._init_params.global_lattice[i] = global_lattice[i]
            self._init_params.procs[i] = procs[i]

            self._init_params.block_lattice[i] = block_lattice[i]

            if self._init_params.bc == 2:
                self._init_params.theta[i] = boundary_conditions[i]
            else:
                self._init_params.theta[i] = 0

        self._init_params.number_of_levels = number_of_levels
        self._init_params.number_openmp_threads = number_openmp_threads

        self._init_params.kappa = kwargs.pop("kappa", 0)
        self._init_params.mu = kwargs.pop("mu", 0)
        self._init_params.csw = kwargs.pop("csw", 0)

        # self._init_params.init_file = nullptr
        # self._init_params.rnd_seeds = nullptr

        if Solver.initialized:
            self.__del__()
            logging.warning("""
                The solver library was already initialized on this node.
                The previously initialized Solver class cannot be used anymore!
                NOTE: The DDalphaAMG library supports only one Solver at time.
                """)
        lib.DDalphaAMG_initialize(self._init_params, self._run_params,
                                  self._status)
        Solver.inizialized = True

        kwargs.setdefault("print", 1)
        if kwargs:
            self.update_params(**kwargs)
Esempio n. 12
0
comm = pympi.COMM_WORLD

# Activate the desired julia environment
jl.using('Pkg')
from julia import Pkg
Pkg.activate(".")

jl.using('MPI')
jl.using('Random')  # for seed! function
jl.using('Statistics')  # for mean function

from julia import Main
from julia import MPI as jlmpi

# Convert pympi comm to jlmpi comm
Main.handle = pympi._handleof(comm)  # make handle accessible to julia
jl.eval('comm = MPI.Comm(MPI.MPI_Comm(handle))')  # create julia comm

# WARNING: You might think that we could use a statement like
#     Main.comm = jlmpi.Comm(jlmpi.MPI_Comm(pympi._handleof(comm)))
# to turn the python MPI comm into a julia MPI comm instead of the above `eval`.
# However, this will fail when using MPICH (it works with OpenMPI). The reason
# is that MPICH uses integers to differentiate MPI comms (OpenMPI uses raw
# pointers) . So for MPICH, `jlmpi.MPI_Comm(pympi._handleof(comm))` returns a
# `Cint` (which is a specialized julia Int32 for interfacing with C/Fortran
# libraries). When it comes back to python, it is converted to a python `int`
# which is then converted to a Julia Int64 when given to `jlmpi.Comm` as an
# argument. The result is a type error. We can avoid this MPICH incompatibility
# by using the above `eval` statement.

# Initialize Julia MPI without initializing the libmpi--this is part of the
Esempio n. 13
0
def sum_inplace_jax_primitive(x, comm):
    comm_ptr = _np.uint64(MPI._handleof(comm))
    return sum_inplace_p.bind(x, comm=comm_ptr)
Esempio n. 14
0
 def testAHandleOf(self):
     for obj in self.objects:
         if isinstance(obj, MPI.Status): continue
         hdl = MPI._handleof(obj)