Пример #1
0
 def testFetchAndOp(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1); memzero(obuf)
     rbuf = MPI.Alloc_mem(1); memzero(rbuf)
     try:
         try:
             self.WIN.Fetch_and_op([obuf, 1, MPI.BYTE], [rbuf, 1, MPI.BYTE], rank)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-fetch_and_op')
     self.WIN.Fence()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             obuf = array(+1, typecode)
             rbuf = array(-1, typecode, 2)
             for op in (MPI.SUM, MPI.PROD,
                        MPI.MAX, MPI.MIN,
                        MPI.REPLACE, MPI.NO_OP):
                 for rank in range(size):
                     for disp in range(3):
                         self.WIN.Lock(rank)
                         self.WIN.Fetch_and_op(obuf.as_mpi(),
                                               rbuf.as_mpi_c(1),
                                               rank, disp, op=op)
                         self.WIN.Unlock(rank)
                         self.assertEqual(rbuf[1], -1)
Пример #2
0
 def testGetAccumulate(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1)
     memzero(obuf)
     rbuf = MPI.Alloc_mem(1)
     memzero(rbuf)
     try:
         try:
             self.WIN.Get_accumulate([obuf, 0, MPI.BYTE],
                                     [rbuf, 0, MPI.BYTE], rank)
             self.WIN.Fence()
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-get_accumulate')
     self.WIN.Fence()
     for array, typecode in arrayimpl.loop():
         with arrayimpl.test(self):
             if unittest.is_mpi_gpu('openmpi', array): continue
             if unittest.is_mpi_gpu('mvapich2', array): continue
             if typecode in 'FDG': continue
             for count in range(10):
                 for rank in range(size):
                     with self.subTest(rank=rank, count=count):
                         ones = array([1] * count, typecode)
                         sbuf = array(range(count), typecode)
                         rbuf = array(-1, typecode, count + 1)
                         gbuf = array(-1, typecode, count + 1)
                         for op in (
                                 MPI.SUM,
                                 MPI.PROD,
                                 MPI.MAX,
                                 MPI.MIN,
                                 MPI.REPLACE,
                                 MPI.NO_OP,
                         ):
                             self.WIN.Lock(rank)
                             self.WIN.Put(ones.as_mpi(), rank)
                             self.WIN.Flush(rank)
                             self.WIN.Get_accumulate(sbuf.as_mpi(),
                                                     rbuf.as_mpi_c(count),
                                                     rank,
                                                     op=op)
                             self.WIN.Flush(rank)
                             self.WIN.Get(gbuf.as_mpi_c(count), rank)
                             self.WIN.Flush(rank)
                             self.WIN.Unlock(rank)
                             #
                             for i in range(count):
                                 self.assertEqual(sbuf[i], i)
                                 self.assertEqual(rbuf[i], 1)
                                 self.assertEqual(gbuf[i], op(1, i))
                             self.assertEqual(rbuf[-1], -1)
                             self.assertEqual(gbuf[-1], -1)
Пример #3
0
 def testMessageBad(self):
     sbuf = MPI.Alloc_mem(4)
     rbuf = MPI.Alloc_mem(4)
     def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE], None],
                        [sbuf, [0], [0], [MPI.BYTE]])
     self.assertRaises(ValueError, f)
     def f(): Alltoallw([sbuf, [0], [0], [MPI.BYTE]],
                        [sbuf, [0], [0], [MPI.BYTE], None])
     self.assertRaises(ValueError, f)
     MPI.Free_mem(sbuf)
     MPI.Free_mem(rbuf)
Пример #4
0
    def testFetchAndOp(self):
        typemap = MPI._typedict
        group = self.WIN.Get_group()
        size = group.Get_size()
        rank = group.Get_rank()
        group.Free()
        self.WIN.Fence()
        blen = MPI.INT.Get_size()
        obuf = MPI.Alloc_mem(blen)
        memzero(obuf)
        rbuf = MPI.Alloc_mem(blen)
        memzero(rbuf)
        try:
            try:
                self.WIN.Fetch_and_op([obuf, 1, MPI.INT], [rbuf, 1, MPI.INT],
                                      rank)
                self.WIN.Fence()
            finally:
                MPI.Free_mem(obuf)
                MPI.Free_mem(rbuf)
        except NotImplementedError:
            self.skipTest('mpi-win-fetch_and_op')
        self.WIN.Fence()
        for array, typecode in arrayimpl.loop():
            with arrayimpl.test(self):
                if unittest.is_mpi_gpu('openmpi', array): continue
                if unittest.is_mpi_gpu('mvapich2', array): continue
                if typecode in 'FDG': continue
                obuf = array(+1, typecode)
                rbuf = array(-1, typecode, 2)
                datatype = typemap[typecode]
                for op in (
                        MPI.SUM,
                        MPI.PROD,
                        MPI.MAX,
                        MPI.MIN,
                        MPI.REPLACE,
                        MPI.NO_OP,
                ):
                    for rank in range(size):
                        for disp in range(3):
                            with self.subTest(disp=disp, rank=rank):
                                self.WIN.Lock(rank)
                                self.WIN.Fetch_and_op(obuf.as_mpi(),
                                                      rbuf.as_mpi_c(1),
                                                      rank,
                                                      disp * datatype.size,
                                                      op=op)

                                self.WIN.Unlock(rank)
                                self.assertEqual(rbuf[1], -1)
Пример #5
0
 def testGetAccumulate(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1)
     memzero(obuf)
     rbuf = MPI.Alloc_mem(1)
     memzero(rbuf)
     try:
         try:
             self.WIN.Get_accumulate([obuf, 0, MPI.BYTE],
                                     [rbuf, 0, MPI.BYTE], rank)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-get_accumulate')
     self.WIN.Fence()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             for count in range(10):
                 for rank in range(size):
                     ones = array([1] * count, typecode)
                     sbuf = array(range(count), typecode)
                     rbuf = array(-1, typecode, count + 1)
                     gbuf = array(-1, typecode, count + 1)
                     for op in (MPI.SUM, MPI.PROD, MPI.MAX, MPI.MIN,
                                MPI.REPLACE, MPI.NO_OP):
                         self.WIN.Lock(rank)
                         self.WIN.Put(ones.as_mpi(), rank)
                         self.WIN.Flush(rank)
                         self.WIN.Get_accumulate(sbuf.as_mpi(),
                                                 rbuf.as_mpi_c(count),
                                                 rank,
                                                 op=op)
                         self.WIN.Flush(rank)
                         self.WIN.Get(gbuf.as_mpi_c(count), rank)
                         self.WIN.Flush(rank)
                         self.WIN.Unlock(rank)
                         #
                         for i in range(count):
                             self.assertEqual(sbuf[i], i)
                             self.assertEqual(rbuf[i], 1)
                             self.assertEqual(gbuf[i], op(1, i))
                         self.assertEqual(rbuf[-1], -1)
                         self.assertEqual(gbuf[-1], -1)
Пример #6
0
 def testSendrecv(self):
     size = self.COMM.Get_size()
     rank = self.COMM.Get_rank()
     for smess in messages:
         rmess = self.COMM.sendrecv(smess, MPI.PROC_NULL, 0, None,
                                    MPI.PROC_NULL, 0)
         self.assertEqual(rmess, None)
     if isinstance(self.COMM, pkl5.Comm):
         rbuf = MPI.Alloc_mem(32)
     else:
         rbuf = None
     for smess in messages:
         dest = (rank + 1) % size
         source = (rank - 1) % size
         rmess = self.COMM.sendrecv(None, dest, 0, None, source, 0)
         self.assertEqual(rmess, None)
         rmess = self.COMM.sendrecv(smess, dest, 0, None, source, 0)
         self.assertEqual(rmess, smess)
         status = MPI.Status()
         rmess = self.COMM.sendrecv(smess, dest, 42, rbuf, source, 42,
                                    status)
         self.assertEqual(status.source, source)
         self.assertEqual(status.tag, 42)
         self.assertEqual(status.error, 0)
     if rbuf is not None:
         MPI.Free_mem(rbuf)
Пример #7
0
 def testIBSendAndRecv(self):
     buf = MPI.Alloc_mem((1 << 16) + MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     try:
         size = self.COMM.Get_size()
         rank = self.COMM.Get_rank()
         for smess in messages:
             req = self.COMM.ibsend(smess, MPI.PROC_NULL)
             self.assertTrue(req)
             req.wait()
             self.assertFalse(req)
             rmess = self.COMM.recv(None, MPI.PROC_NULL, 0)
             self.assertEqual(rmess, None)
         for smess in messages:
             req = self.COMM.ibsend(smess, rank, 0)
             self.assertTrue(req)
             rmess = self.COMM.recv(None, rank, 0)
             self.assertTrue(req)
             flag, _ = req.test()
             self.assertTrue(flag)
             self.assertFalse(req)
             self.assertEqual(rmess, smess)
         for smess in messages:
             dst = (rank + 1) % size
             src = (rank - 1) % size
             req = self.COMM.ibsend(smess, dst, 0)
             self.assertTrue(req)
             rmess = self.COMM.recv(None, src, 0)
             req.wait()
             self.assertFalse(req)
             self.assertEqual(rmess, smess)
     finally:
         MPI.Detach_buffer()
         MPI.Free_mem(buf)
Пример #8
0
    def testMessageBad(self):
        buf = MPI.Alloc_mem(4)
        empty = [None, 0, [0], "B"]

        def f():
            Alltoallv([buf, 0, [0], "i", None], empty)

        self.assertRaises(ValueError, f)

        def f():
            Alltoallv([buf, 0, [0], "\0"], empty)

        self.assertRaises(KeyError, f)
        MPI.Free_mem(buf)
        buf = [1, 2, 3, 4]

        def f():
            Alltoallv([buf, 0, 0, "i"], empty)

        self.assertRaises(TypeError, f)
        buf = {1: 2, 3: 4}

        def f():
            Alltoallv([buf, 0, 0, "i"], empty)

        self.assertRaises(TypeError, f)
 def testMessageBad(self):
     buf = MPI.Alloc_mem(5)
     empty = [None, 0, [0], "B"]
     def f(): Alltoallv([buf, 0, [0], "i", None], empty)
     self.assertRaises(ValueError, f)
     def f(): Alltoallv([buf, 0, [0], "\0"], empty)
     self.assertRaises(KeyError, f)
     def f(): Alltoallv([buf, None, [0], MPI.DATATYPE_NULL], empty)
     self.assertRaises(ValueError, f)
     def f(): Alltoallv([buf, None, [0], "i"], empty)
     self.assertRaises(ValueError, f)
     try:
         t = MPI.INT.Create_resized(0, -4).Commit()
         def f(): Alltoallv([buf, None, [0], t], empty)
         self.assertRaises(ValueError, f)
         t.Free()
     except NotImplementedError:
         pass
     MPI.Free_mem(buf)
     buf = [1,2,3,4]
     def f(): Alltoallv([buf, 0,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     buf = {1:2,3:4}
     def f(): Alltoallv([buf, 0,  0, "i"], empty)
     self.assertRaises(TypeError, f)
 def testMessageBad(self):
     comm = MPI.COMM_WORLD
     buf = MPI.Alloc_mem(4)
     empty = [None, 0, "B"]
     def f(): comm.Alltoall([buf, None, "i"], empty)
     self.assertRaises(ValueError, f)
     MPI.Free_mem(buf)
Пример #11
0
 def testProcNullPersistent(self):
     comm = self.COMM
     #
     req = comm.Send_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     req = comm.Ssend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     req = comm.Bsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     MPI.Detach_buffer()
     MPI.Free_mem(buf)
     #
     req = comm.Rsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     req = comm.Recv_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
Пример #12
0
    def testCompareAndSwap(self):
        typemap = MPI._typedict
        group = self.WIN.Get_group()
        size = group.Get_size()
        rank = group.Get_rank()
        group.Free()
        self.WIN.Fence()
        obuf = MPI.Alloc_mem(1)
        memzero(obuf)
        cbuf = MPI.Alloc_mem(1)
        memzero(cbuf)
        rbuf = MPI.Alloc_mem(1)
        memzero(rbuf)
        try:
            try:
                self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
                                          [cbuf, 1, MPI.BYTE],
                                          [rbuf, 1, MPI.BYTE], rank, 0)
                self.WIN.Fence()
            finally:
                MPI.Free_mem(obuf)
                MPI.Free_mem(cbuf)
                MPI.Free_mem(rbuf)
        except NotImplementedError:
            self.skipTest('mpi-win-compare_and_swap')
        self.WIN.Fence()
        for array, typecode in arrayimpl.loop():
            with arrayimpl.test(self):
                if unittest.is_mpi_gpu('openmpi', array): continue
                if unittest.is_mpi_gpu('mvapich2', array): continue
                if typecode in 'fdg': continue
                if typecode in 'FDG': continue
                obuf = array(+1, typecode)
                cbuf = array(0, typecode)
                rbuf = array(-1, typecode, 2)
                datatype = typemap[typecode]
                for rank in range(size):
                    for disp in range(3):
                        with self.subTest(disp=disp, rank=rank):
                            self.WIN.Lock(rank)
                            self.WIN.Compare_and_swap(obuf.as_mpi(),
                                                      cbuf.as_mpi(),
                                                      rbuf.as_mpi_c(1), rank,
                                                      disp * datatype.size)

                            self.WIN.Unlock(rank)
                            self.assertEqual(rbuf[1], -1)
Пример #13
0
 def testMemory2(self):
     for size in range(0, 10000, 100):
         try:
             mem2 = MPI.Alloc_mem(size, MPI.INFO_NULL)
             self.assertEqual(len(mem2), size)
             MPI.Free_mem(mem2)
         except NotImplementedError:
             return
Пример #14
0
 def testMemory1(self):
     for size in range(0, 10000, 100):
         try:
             mem1 = MPI.Alloc_mem(size)
             self.assertEqual(len(mem1), size)
             MPI.Free_mem(mem1)
         except NotImplementedError:
             return
Пример #15
0
 def tearDown(self):
     refcnt = sys.getrefcount(self.memory)
     self.WIN.Free()
     if type(self.memory).__name__ == 'buffer':
         self.assertEqual(sys.getrefcount(self.memory), refcnt - 1)
     else:
         self.assertEqual(sys.getrefcount(self.memory), refcnt)
     if self.mpi_memory:
         MPI.Free_mem(self.mpi_memory)
Пример #16
0
 def testMemory1(self):
     for size in range(0, 10000, 100):
         size = max(1, size)  # Open MPI
         try:
             mem1 = MPI.Alloc_mem(size)
             self.assertEqual(len(mem1), size)
             MPI.Free_mem(mem1)
         except NotImplementedError:
             self.skipTest('mpi-alloc_mem')
Пример #17
0
 def free(self):
     if self.win:
         self.win.Free()
     if self.mem:
         MPI.Free_mem(self.mem)
         self.mem = None
     if self.get_type:
         self.get_type.Free()
     if self.acc_type:
         self.acc_type.Free()
Пример #18
0
 def tearDown(self):
     refcnt = getrefcount(self.memory)
     self.WIN.Free()
     if type(self.memory).__name__ == 'buffer':
         self.assertEqual(getrefcount(self.memory), refcnt - 1)
     else:
         if sys.version_info[:3] < (3, 3):
             self.assertEqual(getrefcount(self.memory), refcnt)
         else:
             self.assertEqual(getrefcount(self.memory), refcnt - 1)
     if self.mpi_memory:
         MPI.Free_mem(self.mpi_memory)
Пример #19
0
 def testAttachDetach(self):
     mem1 = MPI.Alloc_mem(8)
     mem2 = MPI.Alloc_mem(16)
     mem3 = MPI.Alloc_mem(32)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
         self.testMemory()
         self.WIN.Detach(mem)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
     self.testMemory()
     for mem in (mem1, mem2, mem3):
         self.WIN.Detach(mem)
     for mem in (mem1, mem2, mem3):
         self.WIN.Attach(mem)
     self.testMemory()
     for mem in (mem3, mem2, mem1):
         self.WIN.Detach(mem)
     MPI.Free_mem(mem1)
     MPI.Free_mem(mem2)
     MPI.Free_mem(mem3)
Пример #20
0
 def testCompareAndSwap(self):
     group = self.WIN.Get_group()
     size = group.Get_size()
     rank = group.Get_rank()
     group.Free()
     self.WIN.Fence()
     obuf = MPI.Alloc_mem(1); memzero(obuf)
     cbuf = MPI.Alloc_mem(1); memzero(cbuf)
     rbuf = MPI.Alloc_mem(1); memzero(rbuf)
     try:
         try:
             self.WIN.Compare_and_swap([obuf, 1, MPI.BYTE],
                                       [cbuf, 1, MPI.BYTE],
                                       [rbuf, 1, MPI.BYTE],
                                       rank, 0)
         finally:
             MPI.Free_mem(obuf)
             MPI.Free_mem(cbuf)
             MPI.Free_mem(rbuf)
     except NotImplementedError:
         self.skipTest('mpi-win-compare_and_swap')
     self.WIN.Fence()
     for array in arrayimpl.ArrayTypes:
         for typecode in arrayimpl.TypeMap:
             if typecode in 'fdg': continue
             obuf = array(+1, typecode)
             cbuf = array( 0, typecode)
             rbuf = array(-1, typecode, 2)
             for rank in range(size):
                 for disp in range(3):
                     self.WIN.Lock(rank)
                     self.WIN.Compare_and_swap(obuf.as_mpi(),
                                               cbuf.as_mpi(),
                                               rbuf.as_mpi_c(1),
                                               rank, disp)
                     self.WIN.Unlock(rank)
                     self.assertEqual(rbuf[1], -1)
Пример #21
0
 def testProcNull(self):
     comm = self.COMM
     #
     comm.Sendrecv(None, MPI.PROC_NULL, 0, None, MPI.PROC_NULL, 0)
     comm.Sendrecv_replace(None, MPI.PROC_NULL, 0, MPI.PROC_NULL, 0)
     #
     comm.Send(None, MPI.PROC_NULL)
     comm.Isend(None, MPI.PROC_NULL).Wait()
     req = comm.Send_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     comm.Ssend(None, MPI.PROC_NULL)
     comm.Issend(None, MPI.PROC_NULL).Wait()
     req = comm.Ssend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     buf = MPI.Alloc_mem(MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     comm.Bsend(None, MPI.PROC_NULL)
     comm.Ibsend(None, MPI.PROC_NULL).Wait()
     req = comm.Bsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     MPI.Detach_buffer()
     MPI.Free_mem(buf)
     #
     comm.Rsend(None, MPI.PROC_NULL)
     comm.Irsend(None, MPI.PROC_NULL).Wait()
     req = comm.Rsend_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
     #
     comm.Recv(None, MPI.PROC_NULL)
     comm.Irecv(None, MPI.PROC_NULL).Wait()
     req = comm.Recv_init(None, MPI.PROC_NULL)
     req.Start()
     req.Wait()
     req.Free()
Пример #22
0
 def testIRecvAndBSend(self):
     comm = self.COMM
     rank = comm.Get_rank()
     buf = MPI.Alloc_mem((1<<16)+MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     for smess in messages:
         src = dst = rank
         req1 = comm.irecv(None, src, 1)
         req2 = comm.irecv(None, src, 2)
         req3 = comm.irecv(None, src, 3)
         comm.bsend(smess, dst, 3)
         comm.bsend(smess, dst, 2)
         comm.bsend(smess, dst, 1)
         self.assertEqual(smess, req3.wait())
         self.assertEqual(smess, req2.wait())
         self.assertEqual(smess, req1.wait())
         comm.bsend(smess, MPI.PROC_NULL, 3)
     MPI.Detach_buffer()
     MPI.Free_mem(buf)
 def testMessageBad(self):
     buf = MPI.Alloc_mem(5)
     empty = [None, 0, "B"]
     def f(): Sendrecv([buf, 0, 0, "i", None], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf,  0, "\0"], empty)
     self.assertRaises(KeyError, f)
     def f(): Sendrecv([buf, -1, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, -1, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, +2, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([None, 1,  0, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, None,  0, "i"], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, 0, 1, MPI.DATATYPE_NULL], empty)
     self.assertRaises(ValueError, f)
     def f(): Sendrecv([buf, None, 0, MPI.DATATYPE_NULL], empty)
     self.assertRaises(ValueError, f)
     try:
         t = MPI.INT.Create_resized(0, -4).Commit()
         def f(): Sendrecv([buf, None, t], empty)
         self.assertRaises(ValueError, f)
         def f(): Sendrecv([buf, 0, 1, t], empty)
         self.assertRaises(ValueError, f)
         t.Free()
     except NotImplementedError:
         pass
     MPI.Free_mem(buf)
     buf = [1,2,3,4]
     def f(): Sendrecv([buf, 4,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     buf = {1:2,3:4}
     def f(): Sendrecv([buf, 4,  0, "i"], empty)
     self.assertRaises(TypeError, f)
     def f(): Sendrecv(b"abc", b"abc")
     self.assertRaises((BufferError, TypeError, ValueError), f)
Пример #24
0
 def testIRecvAndIBSend(self):
     comm = self.COMM
     rank = comm.Get_rank()
     buf = MPI.Alloc_mem((1 << 16) + MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     try:
         for smess in messages:
             src = dst = rank
             req1 = comm.irecv(None, src, 1)
             req2 = comm.irecv(None, src, 2)
             req3 = comm.irecv(None, src, 3)
             req4 = comm.ibsend(smess, dst, 3)
             req5 = comm.ibsend(smess, dst, 2)
             req6 = comm.ibsend(smess, dst, 1)
             MPI.Request.waitall([req4, req5, req6])
             self.assertEqual(smess, req3.wait())
             self.assertEqual(smess, req2.wait())
             self.assertEqual(smess, req1.wait())
             comm.ibsend(smess, MPI.PROC_NULL, 3).wait()
     finally:
         MPI.Detach_buffer()
         MPI.Free_mem(buf)
Пример #25
0
 def testBSendAndRecv(self):
     buf = MPI.Alloc_mem((1 << 16) + MPI.BSEND_OVERHEAD)
     MPI.Attach_buffer(buf)
     try:
         size = self.COMM.Get_size()
         rank = self.COMM.Get_rank()
         for smess in messages:
             self.COMM.bsend(smess, MPI.PROC_NULL)
             rmess = self.COMM.recv(None, MPI.PROC_NULL, 0)
             self.assertEqual(rmess, None)
         if size == 1: return
         for smess in messages:
             if rank == 0:
                 self.COMM.bsend(smess, rank + 1, 0)
                 rmess = smess
             elif rank == size - 1:
                 rmess = self.COMM.recv(None, rank - 1, 0)
             else:
                 rmess = self.COMM.recv(None, rank - 1, 0)
                 self.COMM.bsend(rmess, rank + 1, 0)
             self.assertEqual(rmess, smess)
     finally:
         MPI.Detach_buffer()
         MPI.Free_mem(buf)
Пример #26
0
 def tearDown(self):
     self.WIN.Free()
     if self.mpi_memory:
         MPI.Free_mem(self.mpi_memory)
Пример #27
0
 def free(self):
     self.dt_get.Free()
     mem = self.win.memory
     self.win.Free()
     if mem: MPI.Free_mem(mem)
Пример #28
0
 def __del__(self):
     del self.array
     del self._array
     if self._length > 0:
         MPI.Free_mem(self._mpi_alloc_ptr)
         del self._mpi_alloc_ptr
Пример #29
0
    # Lock_all
    win.Lock_all()
    reqs = []
    for rk in [1, 2, 3]:
        a = np.array([rk, rk], dtype='i')
        # Rput
        req = win.Rput(a, target_rank=rk)
        reqs.append(req)
        print 'rank %d put %s to rank %d' % (rank, a, rk)
    # compute all Rput
    MPI.Request.Waitall(reqs)
    # Unlock_all
    win.Unlock_all()
    comm.Barrier()
else:
    win = MPI.Win.Allocate(8, disp_unit=4, comm=comm)
    comm.Barrier()
    # convert the memory of win to numpy array
    buf = np.array(buffer(win.tomemory()), dtype='B', copy=False)
    mem = np.ndarray(buffer=buf, dtype='i', shape=(2, ))

    print 'rank %d get %s' % (rank, mem)

# Create_dynamic
win = MPI.Win.Create_dynamic(comm=comm)
mem = MPI.Alloc_mem(8)
# Attach and Detach
win.Attach(mem)
win.Detach(mem)
MPI.Free_mem(mem)
Пример #30
0
 def tearDown(self):
     self.WIN.Free()
     MPI.Free_mem(self.memory)