Esempio n. 1
0
 def setUp(self):
     nbytes = 100 * MPI.DOUBLE.size
     try:
         self.mpi_memory = MPI.Alloc_mem(nbytes)
         self.memory = self.mpi_memory
         memzero(self.memory)
     except MPI.Exception:
         import array
         self.mpi_memory = None
         self.memory = array.array('B', [0] * nbytes)
     self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
Esempio n. 2
0
 def testAttachDetach(self):
     mem1 = MPI.Alloc_mem(8)
     mem2 = MPI.Alloc_mem(16)
     mem3 = MPI.Alloc_mem(32)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
         self.testMemory()
         self.WIN.Detach(mem)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
     self.testMemory()
     for mem in (mem1, mem2, mem3):
         self.WIN.Detach(mem)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
     self.testMemory()
     for mem in (mem3, mem2, mem1):
         self.WIN.Detach(mem)
     MPI.Free_mem(mem1)
     MPI.Free_mem(mem2)
     MPI.Free_mem(mem3)
Esempio n. 3
0
    def testMessageBad(self):
        sbuf = MPI.Alloc_mem(4)
        rbuf = MPI.Alloc_mem(4)

        def f():
            Alltoallw([sbuf], [rbuf])

        self.assertRaises(ValueError, f)

        def f():
            Alltoallw([sbuf, [0], [0], [MPI.BYTE], None],
                      [rbuf, [0], [0], [MPI.BYTE]])

        self.assertRaises(ValueError, f)

        def f():
            Alltoallw([sbuf, [0], [0], [MPI.BYTE]],
                      [rbuf, [0], [0], [MPI.BYTE], None])

        self.assertRaises(ValueError, f)
        MPI.Free_mem(sbuf)
        MPI.Free_mem(rbuf)
Esempio n. 4
0
 def testCompareAndSwap(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1)
     memzero(obuf)
     cbuf = MPI.Alloc_mem(1)
     memzero(cbuf)
     rbuf = MPI.Alloc_mem(1)
     memzero(rbuf)
     try:
         try:
             self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
                                       [cbuf, 1, MPI.BYTE],
                                       [rbuf, 1, MPI.BYTE], rank, 0)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(cbuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-compare_and_swap')
     self.WIN.Fence()
     for array, typecode in arrayimpl.subTest(self):
         if unittest.is_mpi_gpu('openmpi', array): continue
         if unittest.is_mpi_gpu('mvapich2', array): continue
         if typecode in 'fdg': continue
         if typecode in 'FDG': continue
         obuf = array(+1, typecode)
         cbuf = array(0, typecode)
         rbuf = array(-1, typecode, 2)
         for rank in range(size):
             for disp in range(3):
                 self.WIN.Lock(rank)
                 self.WIN.Compare_and_swap(obuf.as_mpi(), cbuf.as_mpi(),
                                           rbuf.as_mpi_c(1), rank, disp)
                 self.WIN.Unlock(rank)
                 self.assertEqual(rbuf[1], -1)
Esempio n. 5
0
 def testFetchAndOp(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     blen = MPI.INT.Get_size()
     obuf = MPI.Alloc_mem(blen)
     memzero(obuf)
     rbuf = MPI.Alloc_mem(blen)
     memzero(rbuf)
     try:
         try:
             self.WIN.Fetch_and_op([obuf, 1, MPI.INT], [rbuf, 1, MPI.INT],
                                   rank)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-fetch_and_op')
     self.WIN.Fence()
     for array, typecode in arrayimpl.subTest(self):
         if unittest.is_mpi_gpu('openmpi', array): continue
         if unittest.is_mpi_gpu('mvapich2', array): continue
         if typecode in 'FDG': continue
         obuf = array(+1, typecode)
         rbuf = array(-1, typecode, 2)
         for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE,
                    MPI.NO_OP):
             for rank in range(size):
                 for disp in range(3):
                     self.WIN.Lock(rank)
                     self.WIN.Fetch_and_op(obuf.as_mpi(),
                                           rbuf.as_mpi_c(1),
                                           rank,
                                           disp,
                                           op=op)
                     self.WIN.Unlock(rank)
                     self.assertEqual(rbuf[1], -1)
Esempio n. 6
0
 def testMessageBad(self):
     buf = MPI.Alloc_mem(4)
     empty = [None, 0, [0], "B"]
     def f(): Alltoallv([buf, 0, [0], "i", None], empty)
     self.assertRaises(ValueError, f)
     def f(): Alltoallv([buf, 0, [0], "\0"], empty)
     self.assertRaises(KeyError, f)
     MPI.Free_mem(buf)
     buf = [1,2,3,4]
     def f(): Alltoallv([buf, 0,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     buf = {1:2,3:4}
     def f(): Alltoallv([buf, 0,  0, "i"], empty)
     self.assertRaises(TypeError, f)
Esempio n. 7
0
 def testCompareAndSwap(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1)
     memzero(obuf)
     cbuf = MPI.Alloc_mem(1)
     memzero(cbuf)
     rbuf = MPI.Alloc_mem(1)
     memzero(rbuf)
     try:
         try:
             self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
                                       [cbuf, 1, MPI.BYTE],
                                       [rbuf, 1, MPI.BYTE], rank, 0)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(cbuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         return
     self.WIN.Fence()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             if typecode in 'fdg': continue
             obuf = array(+1, typecode)
             cbuf = array(0, typecode)
             rbuf = array(-1, typecode, 2)
             for rank in range(size):
                 for disp in range(3):
                     self.WIN.Lock(rank)
                     self.WIN.Compare_and_swap(obuf.as_mpi(), cbuf.as_mpi(),
                                               rbuf.as_mpi_c(1), rank, disp)
                     self.WIN.Unlock(rank)
                     self.assertEqual(rbuf[1], -1)
Esempio n. 8
0
 def setUp(self):
     nbytes = 100*MPI.DOUBLE.size
     try:
         self.mpi_memory = MPI.Alloc_mem(nbytes)
         self.memory = self.mpi_memory
         try:
             zero = bytearray([0])
         except NameError:
             zero = str('\0')
         self.memory[:] = zero * len(self.memory)
     except MPI.Exception:
         from array import array
         self.mpi_memory = None
         self.memory = array('B',[0]*nbytes)
     self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
Esempio n. 9
0
 def testFetchAndOp(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1)
     memzero(obuf)
     rbuf = MPI.Alloc_mem(1)
     memzero(rbuf)
     try:
         try:
             self.WIN.Fetch_and_op([obuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE],
                                   rank)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         return
     self.WIN.Fence()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             obuf = array(+1, typecode)
             rbuf = array(-1, typecode, 2)
             for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN, MPI.REPLACE,
                        MPI.NO_OP):
                 for rank in range(size):
                     for disp in range(3):
                         self.WIN.Lock(rank)
                         self.WIN.Fetch_and_op(obuf.as_mpi(),
                                               rbuf.as_mpi_c(1),
                                               rank,
                                               disp,
                                               op=op)
                         self.WIN.Unlock(rank)
                         self.assertEqual(rbuf[1], -1)
Esempio n. 10
0
    def testMessageBad(self):
        buf = MPI.Alloc_mem(5)
        empty = [None, 0, [0], "B"]

        def f():
            Alltoallv([buf, 0, [0], "i", None], empty)

        self.assertRaises(ValueError, f)

        def f():
            Alltoallv([buf, 0, [0], "\0"], empty)

        self.assertRaises(KeyError, f)

        def f():
            Alltoallv([buf, None, [0], MPI.DATATYPE_NULL], empty)

        self.assertRaises(ValueError, f)

        def f():
            Alltoallv([buf, None, [0], "i"], empty)

        self.assertRaises(ValueError, f)
        try:
            t = MPI.INT.Create_resized(0, -4).Commit()

            def f():
                Alltoallv([buf, None, [0], t], empty)

            self.assertRaises(ValueError, f)
            t.Free()
        except NotImplementedError:
            pass
        MPI.Free_mem(buf)
        buf = [1, 2, 3, 4]

        def f():
            Alltoallv([buf, 0, 0, "i"], empty)

        self.assertRaises(TypeError, f)
        buf = {1: 2, 3: 4}

        def f():
            Alltoallv([buf, 0, 0, "i"], empty)

        self.assertRaises(TypeError, f)
Esempio n. 11
0
 def __init__(self, comm, init=0):
     #
     size = comm.Get_size()
     rank = comm.Get_rank()
     mask = 1
     while mask < size:
         mask <<= 1
     mask >>= 1
     idx = 0
     get_idx = []
     acc_idx = []
     while mask >= 1:
         left = idx + 1
         right = idx + (mask << 1)
         if rank < mask:
             acc_idx.append(left)
             get_idx.append(right)
             idx = left
         else:
             acc_idx.append(right)
             get_idx.append(left)
             idx = right
         rank = rank % mask
         mask >>= 1
     #
     typecode = 'i'
     datatype = MPI.INT
     itemsize = datatype.Get_size()
     #
     root = 0
     rank = comm.Get_rank()
     if rank == root:
         nlevels = len(get_idx) + 1
         nentries = (1 << nlevels) - 1
         self.mem = MPI.Alloc_mem(nentries * itemsize, MPI.INFO_NULL)
         self.mem[:] = _struct.pack(typecode, init) * nentries
     else:
         self.mem = None
     #
     self.win = MPI.Win.Create(self.mem, itemsize, MPI.INFO_NULL, comm)
     self.acc_type = datatype.Create_indexed_block(1, acc_idx).Commit()
     self.get_type = datatype.Create_indexed_block(1, get_idx).Commit()
     self.acc_buf = _array_new(len(acc_idx), typecode)
     self.get_buf = _array_new(len(get_idx), typecode)
     self.myval = 0
Esempio n. 12
0
 def __init__(self, comm):
     #
     size = comm.Get_size()
     rank = comm.Get_rank()
     #
     itemsize = MPI.INT.Get_size()
     if rank == 0:
         mem = MPI.Alloc_mem(itemsize * size, MPI.INFO_NULL)
         mem[:] = _struct.pack('i', 0) * size
     else:
         mem = MPI.BOTTOM
     self.win = MPI.Win.Create(mem, itemsize, MPI.INFO_NULL, comm)
     #
     blens = [rank, size - rank - 1]
     disps = [0, rank + 1]
     self.dt_get = MPI.INT.Create_indexed(blens, disps).Commit()
     #
     self.myval = 0
Esempio n. 13
0
 def setUp(self):
     try:
         self.mpi_memory = MPI.Alloc_mem(10)
         self.memory = self.mpi_memory
         memzero(self.memory)
     except MPI.Exception:
         from array import array
         self.mpi_memory = None
         self.memory = array('B', [0] * 10)
     refcnt = getrefcount(self.memory)
     self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
     if type(self.memory).__name__ == 'buffer':
         self.assertEqual(getrefcount(self.memory), refcnt + 1)
     else:
         if sys.version_info[:3] < (3, 3):
             self.assertEqual(getrefcount(self.memory), refcnt)
         else:
             self.assertEqual(getrefcount(self.memory), refcnt + 1)
Esempio n. 14
0
 def testProcNull(self):
     comm = self.COMM
     #
     comm.Sendrecv(None, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0)
     comm.Sendrecv_replace(None, MPI.PROC_NULL, 0, MPI.PROC_NULL, 0)
     #
     comm.Send(None, MPI.PROC_NULL)
     comm.Isend(None, MPI.PROC_NULL).Wait()
     req = comm.Send_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     comm.Ssend(None, MPI.PROC_NULL)
     comm.Issend(None, MPI.PROC_NULL).Wait()
     req = comm.Ssend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     comm.Bsend(None, MPI.PROC_NULL)
     comm.Ibsend(None, MPI.PROC_NULL).Wait()
     req = comm.Bsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     MPI.Detach_buffer()
     MPI.Free_mem(buf)
     #
     comm.Rsend(None, MPI.PROC_NULL)
     comm.Irsend(None, MPI.PROC_NULL).Wait()
     req = comm.Rsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     comm.Recv(None, MPI.PROC_NULL)
     comm.Irecv(None, MPI.PROC_NULL).Wait()
     req = comm.Recv_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
Esempio n. 15
0
 def testIRecvAndBSend(self):
     comm = self.COMM
     rank = comm.Get_rank()
     buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     for smess in messages:
         src = dst = rank
         req1 = comm.irecv(None, src, 1)
         req2 = comm.irecv(None, src, 2)
         req3 = comm.irecv(None, src, 3)
         comm.bsend(smess, dst, 3)
         comm.bsend(smess, dst, 2)
         comm.bsend(smess, dst, 1)
         self.assertEqual(smess, req3.wait())
         self.assertEqual(smess, req2.wait())
         self.assertEqual(smess, req1.wait())
         comm.bsend(smess, MPI.PROC_NULL, 3)
     MPI.Detach_buffer()
     MPI.Free_mem(buf)
Esempio n. 16
0
 def setUp(self):
     try:
         self.mpi_memory = MPI.Alloc_mem(10)
         self.memory = self.mpi_memory
         try:
             zero = bytearray([0])
         except NameError:
             zero = str('\0')
         self.memory[:] = zero * len(self.memory)
     except MPI.Exception:
         from array import array
         self.mpi_memory = None
         self.memory = array('B', [0] * 10)
     refcnt = sys.getrefcount(self.memory)
     self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
     if type(self.memory).__name__ == 'buffer':
         self.assertEqual(sys.getrefcount(self.memory), refcnt + 1)
     else:
         self.assertEqual(sys.getrefcount(self.memory), refcnt)
 def testMessageBad(self):
     buf = MPI.Alloc_mem(5)
     empty = [None, 0, "B"]
     def f(): Sendrecv([buf, 0, 0, "i", None], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf,  0, "\0"], empty)
     self.assertRaises(KeyError, f)
     def f(): Sendrecv([buf, -1, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, -1, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, +2, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([None, 1,  0, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, None,  0, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, 1, MPI.DATATYPE_NULL], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, None, 0, MPI.DATATYPE_NULL], empty)
     self.assertRaises(ValueError, f)
     try:
         t = MPI.INT.Create_resized(0, -4).Commit()
         def f(): Sendrecv([buf, None, t], empty)
         self.assertRaises(ValueError, f)
         def f(): Sendrecv([buf, 0, 1, t], empty)
         self.assertRaises(ValueError, f)
         t.Free()
     except NotImplementedError:
         pass
     MPI.Free_mem(buf)
     buf = [1,2,3,4]
     def f(): Sendrecv([buf, 4,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     buf = {1:2,3:4}
     def f(): Sendrecv([buf, 4,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     def f(): Sendrecv(b"abc", b"abc")
     self.assertRaises((BufferError, TypeError, ValueError), f)
Esempio n. 18
0
 def testIRecvAndIBSend(self):
     comm = self.COMM
     rank = comm.Get_rank()
     buf = MPI.Alloc_mem((1 << 16) + MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     try:
         for smess in messages:
             src = dst = rank
             req1 = comm.irecv(None, src, 1)
             req2 = comm.irecv(None, src, 2)
             req3 = comm.irecv(None, src, 3)
             req4 = comm.ibsend(smess, dst, 3)
             req5 = comm.ibsend(smess, dst, 2)
             req6 = comm.ibsend(smess, dst, 1)
             MPI.Request.waitall([req4, req5, req6])
             self.assertEqual(smess, req3.wait())
             self.assertEqual(smess, req2.wait())
             self.assertEqual(smess, req1.wait())
             comm.ibsend(smess, MPI.PROC_NULL, 3).wait()
     finally:
         MPI.Detach_buffer()
         MPI.Free_mem(buf)
Esempio n. 19
0
    def __init__(self, shape, dtype):
        self._length = reduce(lambda x, y: x * y, shape)
        if self._length > 0:
            self._mpi_alloc_ptr = MPI.Alloc_mem(
                ctypes.sizeof(dtype) * self._length)
            pp = ctypes.cast(self._mpi_alloc_ptr.address,
                             ctypes.POINTER(dtype))

            # numpy/ctypes will sometimes raise a RuntimeWarning (PEP3118)
            # relating to the deduced itemsize of the datatype passed.
            # Here we check that the created array has the correct itemsize,
            # shape and dtype.
            self._array = np.ctypeslib.as_array(pp, shape=shape)
            assert self._array.shape == tuple(shape)
            assert self._array.itemsize == ctypes.sizeof(dtype)
            assert self._array.dtype == dtype

        else:
            self._array = np.zeros(shape, dtype)

        self.array = self._array.view(dtype)
        """Numpy array formed from allocated memory."""

        self.array.fill(0)
Esempio n. 20
0
 def testBSendAndRecv(self):
     buf = MPI.Alloc_mem((1 << 16) + MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     try:
         size = self.COMM.Get_size()
         rank = self.COMM.Get_rank()
         for smess in messages:
             self.COMM.bsend(smess, MPI.PROC_NULL)
             rmess = self.COMM.recv(None, MPI.PROC_NULL, 0)
             self.assertEqual(rmess, None)
         if size == 1: return
         for smess in messages:
             if rank == 0:
                 self.COMM.bsend(smess, rank + 1, 0)
                 rmess = smess
             elif rank == size - 1:
                 rmess = self.COMM.recv(None, rank - 1, 0)
             else:
                 rmess = self.COMM.recv(None, rank - 1, 0)
                 self.COMM.bsend(rmess, rank + 1, 0)
             self.assertEqual(rmess, smess)
     finally:
         MPI.Detach_buffer()
         MPI.Free_mem(buf)
Esempio n. 21
0
    buf[0] = b'x'
except TypeError as e:
    print e
# create a memory view of mem
mv = memoryview(mem)
# create a memory view of mem1
mv1 = memoryview(mem1)
print 'before change: mv[0] = %s, mv1[0] = %s' % (mv[0], mv1[0])
# change mv[0]
mv[0] = 'x'
print 'after change: mv[0] = %s, mv1[0] = %s' % (mv[0], mv1[0])
# release the memory object
mem.release()

print

# allocate a memory of 40 bytes, return a MPI.memory object
mem = MPI.Alloc_mem(10 * 4)
print 'len(mem):', len(mem)
print 'mem.address:', MPI.Get_address(mem)
# create a numpy array from the allocated memory
# NOTE: use copy = False here to avoid the copy
buf = np.array(mem, dtype='B', copy=False)
# cast to be int array
npary = np.ndarray(buffer=buf, dtype='i', shape=(10, ))
# now you can operate the memory buffer by the usual numpy array operations
npary[:] = np.arange(10)
print 'npary.tobytes:', npary.tobytes()
# release the memory object
MPI.Free_mem(mem)
print 'len(mem) after free:', len(mem)
Esempio n. 22
0
    def slave(self):  #{{{
        #setup prerequests
        #messages.STATUS {{{
        self.status_reqs = array(0, dtype=int)
        self.status_prequest = comm.Recv_init([self.status_reqs, MPI.INT],
                                              source=0,
                                              tag=messages.STATUS)
        self.control_reqlist += [self.status_prequest]
        self.control_reqhandlers += [self.status]
        #}}}
        #messages.ABORT {{{
        self.abort_reqs = array(0, dtype=int)
        self.abort_prequest = comm.Recv_init([self.abort_reqs, MPI.INT],
                                             source=0,
                                             tag=messages.ABORT)
        self.control_reqlist += [self.abort_prequest]
        self.control_reqhandlers += [self.abort]
        #}}}
        #messages.STATE {{{
        self.state_req = array([0] * 300,
                               dtype='uint8')  #XXX, calculate exact state size
        self.state_prequest = comm.Recv_init([self.state_req, MPI.BYTE],
                                             source=MPI.ANY_SOURCE,
                                             tag=messages.STATE)
        self.computation_reqlist += [self.state_prequest]
        self.computation_reqhandlers += [self.state_received]
        #}}}

        self.setupGoalMessageHandler()
        self.termination_detection.slave_init()

        self.reqlist = self.control_reqlist + self.computation_reqlist
        self.reqhandlers = self.control_reqhandlers + self.computation_reqhandlers

        #print self.reqlist, self.reqhandlers

        #MPI buffer send space
        MPI.Attach_buffer(MPI.Alloc_mem(1024 * 1024))  #1Mb

        MPI.Prequest.Startall(self.reqlist)
        print "Slave", rank, "awaiting your command"

        self.running = True
        while self.running:
            #Do control comm, if any
            (i, comm_todo) = MPI.Prequest.Testany(self.control_reqlist)
            if comm_todo:
                self.control_reqhandlers[i]()
                self.control_reqlist[i].Start()
            #Do computation comm, if any
            (i, comm_todo) = MPI.Prequest.Testany(self.computation_reqlist)
            if comm_todo:
                #print "calling ", self.computation_reqhandlers[i]
                self.computation_reqhandlers[i]()
                self.computation_reqlist[i].Start()

            #Do a computation
            try:
                self.mcReachability.compute()
            except NoMoreStatesException:
                #No more work...
                #print "Rank %d has no more work, len(pwlist) = %d" % (rank, len(self.pwlist.waiting))
                self.termination_detection.noMoreWork()
                #... wait for something to arrive
                i = MPI.Prequest.Waitany(self.reqlist)
                #print "calling ", self.reqhandlers[i]
                self.reqhandlers[i]()
                self.reqlist[i].Start()
            except GoalFoundException:
                #goal state found: Profit!
                comm.Bsend([None, MPI.INT], dest=0, tag=messages.GOAL_REACHED)
Esempio n. 23
0
"""

from mpi4py import MPI
import sys

nprocs = (MPI.COMM_WORLD.Get_size())
myid = MPI.Comm.Get_rank(MPI.COMM_WORLD)

fh = MPI.File.Open(MPI.COMM_WORLD,
                   'C:/Users/wilso/Programming_analytics/small.txt',
                   MPI.MODE_RDONLY)
size = MPI.File.Get_size(fh)
MPI.File.Close

blocksize = size / nprocs
block_start = blocksize * myid
buf = MPI.Alloc_mem(100)
if myid == nprocs - 1:
    block_end = size
else:
    block_end = block_start + blocksize - 1

print("beginning", myid, block_start)

if myid != 0:
    fh = open('C:/Users/wilso/Programming_analytics/small.txt')
    fh.seek(block_start)
    buffer = fh.readlines(100)
    block_start += sys.getsizeof(buffer[0]) - 48
    print("ending", myid, block_start)
Esempio n. 24
0
 def setUp(self):
     self.memory = MPI.Alloc_mem(10)
     memzero(self.memory)
     self.WIN = MPI.Win.Create(self.memory, 1, self.INFO, self.COMM)
Esempio n. 25
0
    def testSequence(self):
        n = 16
        try:
            mem = MPI.Alloc_mem(n, MPI.INFO_NULL)
        except NotImplementedError:
            self.skipTest('mpi-alloc_mem')
        try:
            self.assertTrue(type(mem) is MPI.memory)
            self.assertTrue(mem.address != 0)
            self.assertEqual(mem.nbytes, n)
            self.assertFalse(mem.readonly)
            self.assertEqual(len(mem), n)

            def delitem():
                del mem[n]

            def getitem1():
                return mem[n]

            def getitem2():
                return mem[::2]

            def getitem3():
                return mem[None]

            def setitem1():
                mem[n] = 0

            def setitem2():
                mem[::2] = 0

            def setitem3():
                mem[None] = 0

            self.assertRaises(Exception, delitem)
            self.assertRaises(IndexError, getitem1)
            self.assertRaises(IndexError, getitem2)
            self.assertRaises(TypeError, getitem3)
            self.assertRaises(IndexError, setitem1)
            self.assertRaises(IndexError, setitem2)
            self.assertRaises(TypeError, setitem3)
            for i in range(n):
                mem[i] = i
            for i in range(n):
                self.assertEqual(mem[i], i)
            mem[:] = 0
            for i in range(n):
                self.assertEqual(mem[i], 0)
            mem[:] = 255
            for i in range(n):
                self.assertEqual(mem[i], 255)
            mem[:n // 2] = 1
            mem[n // 2:] = 0
            for i in range(n // 2):
                self.assertEqual(mem[i], 1)
            for i in range(n // 2, n):
                self.assertEqual(mem[i], 0)
            mem[:] = 0
            mem[1:5] = b"abcd"
            mem[10:13] = b"xyz"
            self.assertEqual(mem[0], 0)
            for i, c in enumerate("abcd"):
                self.assertEqual(mem[1 + i], ord(c))
            for i in range(5, 10):
                self.assertEqual(mem[i], 0)
            for i, c in enumerate("xyz"):
                self.assertEqual(mem[10 + i], ord(c))
            for i in range(13, n):
                self.assertEqual(mem[i], 0)
            self.assertEqual(mem[1:5].tobytes(), b"abcd")
            self.assertEqual(mem[10:13].tobytes(), b"xyz")
        finally:
            MPI.Free_mem(mem)
            self.assertEqual(mem.address, 0)
            self.assertEqual(mem.nbytes, 0)
            self.assertFalse(mem.readonly)
Esempio n. 26
0
    win = MPI.Win.Allocate(0, disp_unit=4, comm=comm)
    # Lock_all
    win.Lock_all()
    reqs = []
    for rk in [1, 2, 3]:
        a = np.array([rk, rk], dtype='i')
        # Rput
        req = win.Rput(a, target_rank=rk)
        reqs.append(req)
        print 'rank %d put %s to rank %d' % (rank, a, rk)
    # compute all Rput
    MPI.Request.Waitall(reqs)
    # Unlock_all
    win.Unlock_all()
    comm.Barrier()
else:
    win = MPI.Win.Allocate(8, disp_unit=4, comm=comm)
    comm.Barrier()
    # convert the memory of win to numpy array
    buf = np.array(buffer(win.tomemory()), dtype='B', copy=False)
    mem = np.ndarray(buffer=buf, dtype='i', shape=(2, ))

    print 'rank %d get %s' % (rank, mem)

# Create_dynamic
win = MPI.Win.Create_dynamic(comm=comm)
mem = MPI.Alloc_mem(8)
# Attach and Detach
win.Attach(mem)
win.Detach(mem)
MPI.Free_mem(mem)
Esempio n. 27
0

class NodeType(IntEnum):
    MASTER = 0
    FOLLOWER = 1


class Tags(IntEnum):
    INITIAL_EDGES = 0
    PARTITION_MAP = 1
    OWNERSHIPS = 2
    EDGE_QUERY = 3
    EDGE_RESPONSE = 4
    STITCHING = 5
    MERGE = 6


PICKLE_SET_OVERHEAD = 31
PICKLE_LIST_OVERHEAD = 6
PICKLE_TUPLE_OVERHEAD = 3
PICKLE_BIG_INT_OVERHEAD = 25
MAX_NODES = 200000

comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()

buf = MPI.Alloc_mem(MAX_NODES * PICKLE_BIG_INT_OVERHEAD +
                    PICKLE_SET_OVERHEAD * size + MPI.BSEND_OVERHEAD * size)
MPI.Attach_buffer(buf)
Esempio n. 28
0
    # Generate MPI thread-safe info message
    if mpi_initialized and not is_mpi_thread_safe:
        mpi_thread_safe_info_msg = "Provided MPI implementation (%s) is not thread safe configured, " % (
            mpi_vendor_str)
        mpi_thread_safe_info_msg = mpi_thread_safe_info_msg + "maximum thread safe level supported is: %s" % (
            mpi_thread_safe_level_str)
        mpi_thread_safe_info_msg = mpi_thread_safe_info_msg + "\nNOTE: In most MPI implementations thread-safety "
        mpi_thread_safe_info_msg = mpi_thread_safe_info_msg + "can be enabled at pre-compile, "
        mpi_thread_safe_info_msg = mpi_thread_safe_info_msg + "by setting explicit thread-safe configuration options, "
        mpi_thread_safe_info_msg = mpi_thread_safe_info_msg + "\n      e.g. (MPI 1.6.5) --enable-mpi-thread-multiple"

    # Allocate memory for buffered sends
    if mpi_initialized and mpi_world_size > 1 and is_mpi_thread_safe:
        try:
            mpi_buffer_size_in_mb = 100
            __mpi_buffer = __mpi_factory.Alloc_mem(mpi_buffer_size_in_mb *
                                                   1024 * 1024)
            __mpi_factory.Attach_buffer(__mpi_buffer)
            mpi_buffer_allocated = True
        except Exception, instance:
            mpi_buffer_allocated = False
            mpi_buffer_allocation_error_msg = traceback.format_exc()

    # Check if MPI is effectively enabled
    if mpi_initialized and mpi_world_size > 1 and is_mpi_thread_safe and mpi_buffer_allocated:
        is_mpi_enabled = True
    else:
        is_mpi_enabled = False
        if not mpi_initialized:
            mpi_error_msg = mpi_initialization_error_msg
        elif mpi_world_size < 2:
            mpi_error_msg = "Only 1 MPI process found"