Ejemplo n.º 1
0
    def parallelRunTest(self):

        #Every process sends six messages to itself
        myMsgs = ["I", "talk", "to", "myself", "next message is BIG", ""]

        #The last message is BIG to test the new message model
        for i in range(512):
            myMsgs[5] += str(i)

        #Do all the asynchronous sends: each process sends to ITSELF
        for x in range(6):
            mpi.isend( myMsgs[x], mpi.rank, x )  

        #Get receive handles for all the receives  
        recvHandles = [0,0,0,    0,0,0]
        for x in range(6):
            recvHandles[x] = mpi.irecv( mpi.rank, x )

        #Wait for all receives to complete
        mpi.waitall(recvHandles)

        #Check for correct answers
        for x in range(6):
            if recvHandles[x].message != myMsgs[x]:
                failStr = "Self-Selding non-blocking communication test fail"
                failStr += "\nFailure on process " + str(mpi.rank) + ", test "
                failStr += str(x)
                self.fail( failStr )

        return
Ejemplo n.º 2
0
    def parallelRunTest(self):

        #Every process sends six messages to itself
        myMsgs = ["I", "talk", "to", "myself", "next message is BIG", ""]

        #The last message is BIG to test the new message model
        for i in range(512):
            myMsgs[5] += str(i)

        #Do all the asynchronous sends: each process sends to ITSELF
        for x in range(6):
            mpi.isend(myMsgs[x], mpi.rank, x)

        #Get receive handles for all the receives
        recvHandles = [0, 0, 0, 0, 0, 0]
        for x in range(6):
            recvHandles[x] = mpi.irecv(mpi.rank, x)

        #Wait for all receives to complete
        mpi.waitall(recvHandles)

        #Check for correct answers
        for x in range(6):
            if recvHandles[x].message != myMsgs[x]:
                failStr = "Self-Selding non-blocking communication test fail"
                failStr += "\nFailure on process " + str(mpi.rank) + ", test "
                failStr += str(x)
                self.fail(failStr)

        return
Ejemplo n.º 3
0
	def send(self, task, dest=0):
		#print 'sending task'
		if isinstance( task, Task ):
			
			if dest == rank:
				self.display( OUTPUT_MPI, 'cannot send task to self %s' % task.id() )
				self.queue( task )
				
			elif not dest == None:
				if isinstance( task, ControlTask ):
					self.display( OUTPUT_DEBUG, 'removing node associated for control task' )
					task.node = None

				data = task.encodeTask()

				if type(dest) is int:
					if self._send:
						self._send = isend( data, dest )
						self.display( OUTPUT_DEBUG, 'sent task %s to node %d' % (task.id(),dest) )

				else:
					for d in dest:
						if self._send:
							self._send = isend( data, d )
							self.display( OUTPUT_DEBUG, 'sent task %s to node %d' % (task.id(),d) )
						else:
							self._send.wait()				
Ejemplo n.º 4
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0, "Foo", "goo"]
        for x in range(90):
            myBigData = [x + 1, x * x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1) % mpi.procs
        frm = (mpi.rank - 1 + mpi.procs) % mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend(myBigData, to, 0)
        sendHandle2 = mpi.isend(mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm, 0)
        msgReceived2, status = mpi.recv(frm, 1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank == 0:

            #Change the data we're sending just for the heck of it
            myBigData[0] = ("changed")
            myBigData[1] = "Also changed"
            mySmallData = ("Hi", mpi.rank)

            #Perform 2 blocking sends to send the data
            mpi.send(myBigData, 1, 1)
            mpi.send(mySmallData, 1, 2)

        elif mpi.rank == 1:

            #Get recv handles for the two messages
            recvHandle1 = mpi.irecv(0, 1)
            recvHandle2 = mpi.irecv(0, 2)
            finished = [0, 0]

            #Loop until both messages come in
            while finished[0] == 0 and finished[1] == 0:
                if finished[0] == 0:
                    finished[0] = recvHandle1.test()
                if finished[1] == 0:
                    finished[1] = recvHandle2.test()

            #We got the messages, now check them
            if recvHandle1.message != myBigData:
                self.fail("Complex non-block failed on 2nd test with big data")
            if recvHandle2.message != ("Hi", 0):
                self.fail(
                    "Complex non-block failed on 2nd test with small data")

        return
Ejemplo n.º 5
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0,"Foo", "goo"]
        for x in range(90):
          myBigData = [x+1,x*x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1)%mpi.procs
        frm = (mpi.rank-1+mpi.procs)%mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend( myBigData,   to, 0)
        sendHandle2 = mpi.isend( mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm,0)
        msgReceived2, status = mpi.recv(frm,1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank==0:

          #Change the data we're sending just for the heck of it
          myBigData[0] = ("changed")
          myBigData[1] = "Also changed"
          mySmallData = ("Hi", mpi.rank)

          #Perform 2 blocking sends to send the data
          mpi.send( myBigData, 1, 1 )
          mpi.send( mySmallData, 1, 2 )

        elif mpi.rank==1:

          #Get recv handles for the two messages
          recvHandle1 = mpi.irecv( 0,1)
          recvHandle2 = mpi.irecv( 0,2)
          finished = [0,0]

          #Loop until both messages come in
          while finished[0] == 0 and finished[1] == 0:
            if finished[0] == 0:
              finished[0] = recvHandle1.test()
            if finished[1] == 0:
              finished[1] = recvHandle2.test()

          #We got the messages, now check them
          if recvHandle1.message != myBigData:
            self.fail( "Complex non-block failed on 2nd test with big data")
          if recvHandle2.message != ("Hi", 0):
            self.fail( "Complex non-block failed on 2nd test with small data")

        return
Ejemplo n.º 6
0
 def testNonBlockSend(self):
     for sendProc in xrange(mpi.procs):
         if mpi.rank == sendProc:
             for j in xrange(mpi.procs):
                 if j != mpi.rank:
                     obj = 10 * mpi.rank + 1
                     mpi.isend(obj, dest=j, tag=100)
         else:
             obj = mpi.recv(sendProc, 100)[0]
             assert obj == 10 * sendProc + 1
Ejemplo n.º 7
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req1 = mpi.isend("hello", 1, 0)
            req2 = mpi.isend("world", 1, 1)

            req3 = mpi.isend(",", 1, 2)
            req4 = mpi.isend("this", 1, 3)
            req5 = mpi.isend("is", 1, 4)
            req6 = mpi.isend("your", 1, 5)
            req7 = mpi.isend("new", 1, 6)
            req8 = mpi.isend("master", 1, 7)

            try:
                mpi.waitall((req1))
            except:
                self.fail("waitall()")

        elif mpi.rank == 1:
            req1 = mpi.irecv(0, 0)
            req2 = mpi.irecv(0, 1)
            req3 = mpi.irecv(0, 2)
            req4 = mpi.irecv(0, 3)
            req5 = mpi.irecv(0, 4)
            req6 = mpi.irecv(0, 5)
            req7 = mpi.irecv(0, 6)
            req8 = mpi.irecv(0, 7)
            try:
                mpi.waitall((req1, req2, req3, req4, req5, req6, req7, req8))
            except:
                self.fail("waitall()")

        return
Ejemplo n.º 8
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req1 = mpi.isend("hello",1,0)
            req2 = mpi.isend("world",1,1)

            req3 = mpi.isend(",",1,2)
            req4 = mpi.isend("this",1,3)
            req5 = mpi.isend("is",1,4)
            req6 = mpi.isend("your",1,5)
            req7 = mpi.isend("new",1,6)
            req8 = mpi.isend("master",1,7)

            try:
                mpi.waitall((req1))
            except:
                self.fail("waitall()")

        elif mpi.rank == 1:
            req1 = mpi.irecv(0,0)
            req2 = mpi.irecv(0,1)
            req3 = mpi.irecv(0,2)
            req4 = mpi.irecv(0,3)
            req5 = mpi.irecv(0,4)
            req6 = mpi.irecv(0,5)
            req7 = mpi.irecv(0,6)
            req8 = mpi.irecv(0,7)
            try:
                mpi.waitall((req1,req2,req3,req4,req5,req6,req7,req8))
            except:
                self.fail("waitall()")

        return
Ejemplo n.º 9
0
def main():
    rank,size = mpi.init()
    
    serial_dict = pickle.dumps(somedict)

    mpi.isend( serial_dict, len(serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )

    new_serial_dict = mpi.recv( len( serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )
    print new_serial_dict

    mpi.finalize()

    newdict = pickle.loads( new_serial_dict )
    print newdict
    return
Ejemplo n.º 10
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req = mpi.isend("hi there, bubba,", 1, 0)
            print 'send'
            req1 = mpi.isend("this is", 1, 1)
            req2 = mpi.isend("opportunity", 1, 2)
            req3 = mpi.isend("knocking....", 1, 3)
            try:
                mpi.waitany(req, req1, req2)
            except:
                self.fail("mpi.waitany() failed")
        elif mpi.rank == 1:
            req = []
            print 'recv0'
            req.append(mpi.irecv(0, 0))
            print 'recv1'
            try:
                req.append(mpi.irecv(0, 1))
            except:
                print 'bad?'
                print sys.exc_info()[1]
                raise
            print 'recv2'
            req.append(mpi.irecv(0, 2))
            print 'recv3'
            req.append(mpi.irecv(0, 3))
            print 'recv4'

            i = 0
            print 'while'
            while i < 4:
                print 'i is', i
                result = -1
                try:
                    print 'waitany?'
                    result = mpi.waitany(req)
                    print 'got', result
                except:
                    self.fail("mpi.waitany() failed")
                req.remove(req[result])
                i = i + 1

        return
Ejemplo n.º 11
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req  = mpi.isend("hi there, bubba,",1,0)
            print 'send'
            req1 = mpi.isend("this is",1,1)
            req2 = mpi.isend("opportunity",1,2)
            req3 = mpi.isend("knocking....",1,3)
            try:
                mpi.waitany(req,req1,req2)
            except:
                self.fail("mpi.waitany() failed")
        elif mpi.rank == 1:
            req = []
            print 'recv0'
            req.append( mpi.irecv(0,0))
            print 'recv1'
            try:
                req.append( mpi.irecv(0,1))
            except:
                print 'bad?'
                print sys.exc_info()[1]
                raise
            print 'recv2'
            req.append( mpi.irecv(0,2))
            print 'recv3'
            req.append( mpi.irecv(0,3))
            print 'recv4'

            i = 0
            print 'while'
            while i < 4:
                print 'i is',i
                result = -1
                try:
                    print 'waitany?'
                    result = mpi.waitany(req)
                    print 'got',result
                except:
                    self.fail("mpi.waitany() failed")
                req.remove(req[result])
                i = i + 1

        return
Ejemplo n.º 12
0
def wake_nodes(tag):
    """

    Informs nodes asleep via sleep_nodes(int) to wake up.  Tag argument must be the same
    as the tag used to sleep them.  Tag must be an integer.

    Tags used in ConsensusCluster:
    1 - Start
    2 - Wait for PCA results
    3 - Exit


    """

    if MPI_ENABLED:
        if mpi.rank == 0:
            for r in mpi.WORLD:
                mpi.isend(1, r, tag)
Ejemplo n.º 13
0
    def parallelRunTest(self):
        hello = "Hello World!!"

        to = mpi.rank + 1
        if mpi.rank == mpi.procs -1: to = 0
        frm = mpi.rank - 1
        if mpi.rank == 0: frm = mpi.procs -1

        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))
        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))
        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))

        #Try and isend/irecv a long message to fullly test the new msg model
        longMsg = []
        for i in range(64):
            longMsg = ["foo", i, longMsg]

        mpi.isend( longMsg, to )
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != longMsg:
            self.fail( "irecv failed on long message.")
        longMsg = longMsg.reverse()
        mpi.isend( longMsg, to )
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != longMsg:
            self.fail( "irecv using wait failed on long message" )

        return
Ejemplo n.º 14
0
    def parallelRunTest(self):
        hello = "Hello World!!"

        to = mpi.rank + 1
        if mpi.rank == mpi.procs - 1: to = 0
        frm = mpi.rank - 1
        if mpi.rank == 0: frm = mpi.procs - 1

        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))
        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))
        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))

        #Try and isend/irecv a long message to fullly test the new msg model
        longMsg = []
        for i in range(64):
            longMsg = ["foo", i, longMsg]

        mpi.isend(longMsg, to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != longMsg:
            self.fail("irecv failed on long message.")
        longMsg = longMsg.reverse()
        mpi.isend(longMsg, to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != longMsg:
            self.fail("irecv using wait failed on long message")

        return
Ejemplo n.º 15
0
def bibandwidth(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            r1 = mpi.irecv(slave)
            r0 = mpi.isend(message[:bytes], slave)
            mpi.waitall([r0, r1])
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return (((2.0 * bytes * cnt)) / 1024.0) / (total * 1e-6), "KB/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            r1 = mpi.irecv(master)
            r0 = mpi.isend(message[:bytes], master)
            mpi.waitall([r0, r1])

        return 0.0, "KB/sec"

    else:
        return 0.0, "KB/sec"
Ejemplo n.º 16
0
def bibandwidth(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            r1 = mpi.irecv(slave)
            r0 = mpi.isend(message[:bytes],slave)
            mpi.waitall([r0,r1])
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return (((2.0*bytes*cnt))/1024.0) / (total*1e-6),"KB/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            r1 = mpi.irecv(master)
            r0 = mpi.isend(message[:bytes],master)
            mpi.waitall([r0,r1])

        return 0.0,"KB/sec"

    else:
        return 0.0,"KB/sec"
Ejemplo n.º 17
0
    def testGlobalMeshNodeIDs(self):
        mesh, void = generatePolyhedralMesh([self.nodes],
                                            xmin=xmin,
                                            xmax=xmax,
                                            generateParallelConnectivity=True)
        globalIDs = mesh.globalMeshNodeIDs()

        # Check that all our local IDs are unique.
        uniqueIDs = set()
        for i in globalIDs:
            uniqueIDs.add(i)
        self.failUnless(
            len(uniqueIDs) == len(globalIDs),
            "Global mesh node IDs not unique!  %i != %i" %
            (len(globalIDs), len(uniqueIDs)))

        # Check that the IDs are unique and consistent across domains.
        if mpi.procs > 1:
            neighbors = mesh.neighborDomains
            sharedNodes = mesh.sharedNodes
            assert len(neighbors) == len(sharedNodes)

            # Translate to the shared nodes to global IDs.
            sharedGlobalIDs = [[globalIDs[i] for i in localIDs]
                               for localIDs in sharedNodes]
            assert len(sharedGlobalIDs) == len(neighbors)

            # Do non-blocking sends to all our neighbors.
            sendRequests = []
            for neighbor, ids in zip(neighbors, sharedGlobalIDs):
                sendRequests.append(mpi.isend(ids, dest=neighbor))
            assert len(sendRequests) == len(neighbors)

            # Recv the IDs from our neighbors and do the testing.
            for neighbor, localIDs in zip(neighbors, sharedGlobalIDs):
                otherIDs = mpi.recv(source=neighbor)[0]
                self.failUnless(
                    otherIDs == list(localIDs),
                    "Global IDs don't match between domains %i <-> %i\n%s\n%s"
                    % (mpi.rank, neighbor, list(localIDs), otherIDs))

            # Wait until all our sends have completed.
            for req in sendRequests:
                req.Wait()
Ejemplo n.º 18
0
"""
Shape is not retained between sender and receiver.

This really should be fixed.
"""
import Numeric
import mpi
A = Numeric.ones( (3,4), 'i' )
rank,size = mpi.init()
if (rank == 0:
    mpi.isend( A, (3*4), mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    #[ valid send request#: -1409286144, count: 12, datatype: 1275069445, destination: 0, tag: 0, comm: [communicator#:1140850688,size:1] ]
    B = mpi.recv( (3*4), mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    # array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],'i')
else:
    pass

mpi.finalize()
Ejemplo n.º 19
0
def testSharedNodes(mesh):
    assert len(mesh.neighborDomains) == len(mesh.sharedNodes)

    # First check that everyone agrees about who is talking to who.
    myNeighborDomains = list(mesh.neighborDomains)
    for sendProc in xrange(mpi.procs):
        otherProcs = mpi.bcast(myNeighborDomains, root=sendProc)
        if mpi.rank != sendProc:
            assert (mpi.rank in otherProcs) == (sendProc in mesh.neighborDomains)

    # Build our set of global shared node IDs.
    globalIDs = mesh.globalMeshNodeIDs()
    globalSharedNodes = [[globalIDs[i] for i in localNodes] for localNodes in mesh.sharedNodes]
    assert len(globalSharedNodes) == len(mesh.neighborDomains)

    # Check that the shared nodes are consistent.
    sendRequests = []
    for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
        sendRequests.append(mpi.isend(ids, dest=otherProc))
    for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
        otherIDs = mpi.recv(source=otherProc)[0]
        assert ids == otherIDs

    # Check that all shared nodes have been found.
    localSharedNodes = [[i for i in localNodes] for localNodes in mesh.sharedNodes]
    positions = vector_of_Vector()
    for i in xrange(mesh.numNodes):
        positions.append(mesh.node(i).position())
    xmin, xmax = Vector(), Vector()
    boundingBox(positions, xmin, xmax)
    xmin = Vector(mpi.allreduce(xmin.x, mpi.MIN), mpi.allreduce(xmin.y, mpi.MIN))
    xmax = Vector(mpi.allreduce(xmax.x, mpi.MAX), mpi.allreduce(xmax.y, mpi.MAX))
    boxInv = Vector(1.0/(xmax.x - xmin.x),
                    1.0/(xmax.y - xmin.y))
    nodeHashes = [hashPosition(mesh.node(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numNodes)]
    nodeHashes2ID = {}
    for i in xrange(len(nodeHashes)):
        nodeHashes2ID[nodeHashes[i]] = i
    for sendProc in xrange(mpi.procs):
        otherNodeHashes = mpi.bcast(nodeHashes, root=sendProc)
        if sendProc != mpi.rank:
            for hashi in otherNodeHashes:
                if hashi in nodeHashes:
                    assert sendProc in myNeighborDomains
                    idomain = myNeighborDomains.index(sendProc)
                    i = nodeHashes2ID[hashi]
                    assert i in localSharedNodes[idomain]

    # Same for faces.
    localSharedFaces = [[i for i in localFaces] for localFaces in mesh.sharedFaces]
    positions = vector_of_Vector()
    for i in xrange(mesh.numFaces):
        positions.append(mesh.face(i).position())
    faceHashes = [hashPosition(mesh.face(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numFaces)]
    faceHashes2ID = {}
    for i in xrange(len(faceHashes)):
        faceHashes2ID[faceHashes[i]] = i
    for sendProc in xrange(mpi.procs):
        otherFaceHashes = mpi.bcast(faceHashes, root=sendProc)
        if sendProc != mpi.rank:
            for hashi in otherFaceHashes:
                if hashi in faceHashes:
                    assert sendProc in myNeighborDomains
                    idomain = myNeighborDomains.index(sendProc)
                    i = faceHashes2ID[hashi]
                    assert i in localSharedFaces[idomain]

    return True
Ejemplo n.º 20
0
import mpi
import sys, Numeric

print "Creating Data Array..."
data = Numeric.array( [1,2,3,4], Numeric.Int32 )

print "Initializing MPI: (%s,%s)"%(len(sys.argv),sys.argv)
rank, size = mpi.init( len(sys.argv), sys.argv )
print "(%s,%s): initialized..." %(rank,size)

if( rank == 0 ):
    print "(%s,%s): sending: %s" %( rank, size, data )
    request = mpi.isend( data, 4, mpi.MPI_INT, 1, 0, mpi.MPI_COMM_WORLD )
    print "(%s,%s): request#: %s" %( rank, size, request )
    data2 = Numeric.array([ -1, -1, -1, -1 ], Numeric.Int32 )
elif(rank == 1):
    print "(%s,%s): receiving..." %(rank,size)
    data2 = mpi.recv( 4, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
else:
    pass

print "(%s,%s): received: %s" % ( rank, size, data2 )

mpi.finalize()
Ejemplo n.º 21
0
    def getobsp(self, snum, stime, tetrad, zerotime=0.0, debug=0):
        """
        
        LISApar.getobsp(length,deltat,tetrad,zerotime=0.0)
        is the parallel-computing equivalent of getobs and
        getobsc, and it is used to compute the TDI responses
        of large sets of Wave objects. It must be called
        from an instance of LISApar, with the following
        parameters:
        
        - length is the total length of the TDI-observable
          arrays that will be returned;
        
        - deltat is the cadence of the time series;
        
        - zerotime is the initial time for the time series;
        
        - tetrad is a tuple (lisa,wavefactory,parameterlist,
          observables) of four elements:

          * lisa is an instance of a LISA class, which
            should be the same for every CPU taking part in
            the computation;

          * wavefactory is a Python function taking any
            number of parameters, and returning an instance of
            a synthLISA Wave object; the function must be
            defined for every CPU taking part in the
            computation;

          * parameterlist is a list of source parameters (or
            of parameter n-tuples, if wavefactory takes more
            than one parameter), which will be distributed
            among the CPUs, and passed to the Wave Factory to
            construct synthLISA Wave objects; the parameter
            sets need to be defined only on the root CPU, but
            it won't hurt to define them everywhere. They can
            contain any Python types (they are pickled before
            distribution), but not synthLISA objects;

          * observables is a list or tuple of TDI
            observables, which must be given as unbound
            methods, such as synthlisa.TDI.X1 or
            synthlisa.TDI.time.
        
        The distribution of the parameter sets among the
        CPUs tries to balance the load of the computation.
        If the number of sources is not divisible by the
        number of CPUs, it will assign a smaller number of
        sources to the root CPU, and the same number of
        sources to all other CPUs."""

        # accept four levels (0-4) of debugging info

        inittime = time.time()

        myrank = self.rank
        size = self.size

        try:
            (lisa, srcfunc, parameters, obs) = tetrad
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): third parameter must be a 4-tuple containing a",
                print "LISA instance, a Wave factory, an array of parameters for the factory,",
                print "and a set of TDI observables given as class methods (such as synthlisa.TDI.X)."
            raise IndexError

        if type(parameters) not in (list, tuple, numpy.ndarray):
            if myrank == 0:
                print "LISApar.getobsp(...): needs a list of parameters to feed to the factory!"
            raise IndexError

        if size == 1:
            if myrank == 0:
                print "LISApar.getobsp(...): must be run with more than one cpu!"
            raise NotImplementedError

        if size > len(parameters):
            if myrank == 0:
                print "LISApar.getobsp(...): needs to run with more sources than cpus!"
            raise IndexError

        # root may get zero processors

        blocksize, remain = divmod(len(parameters), size)

        if remain > 0:
            blockadd, remain = divmod(remain, size - 1)
            blocksize = blocksize + blockadd

        if myrank == 0 and debug > 2:
            print "Standard block: ", blocksize,
            print "; root block: ", len(parameters) - blocksize * (size - 1)

        if myrank == 0:
            if debug > 3:
                print "Preparing for parallel execution..."

            for cpu in range(1, size):
                blockstart, blockend = (cpu - 1) * blocksize, cpu * blocksize

                serial_pars = pickle.dumps(parameters[blockstart:blockend])
                len_pars = len(serial_pars)

                mpi.isend(len_pars, 1, mpi.MPI_INT, cpu, 0, mpi.MPI_COMM_WORLD)
                mpi.isend(serial_pars, len_pars, mpi.MPI_CHAR, cpu, 1,
                          mpi.MPI_COMM_WORLD)

            mypars = parameters[blockend:]
        else:
            len_pars = mpi.recv(1, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD)
            serial_pars = mpi.recv(len_pars, mpi.MPI_CHAR, 0, 1,
                                   mpi.MPI_COMM_WORLD)

            mypars = pickle.loads(serial_pars)

        if debug > 2:
            print "CPU ", myrank, " received ", len(
                mypars), " source parameters ", mypars

        try:
            if type(mypars[0]) in (list, tuple, numpy.ndarray):
                sources = map(lambda x: srcfunc(*x), mypars)
            else:
                sources = map(srcfunc, mypars)

            if len(filter(lambda x: not isinstance(x, synthlisa.Wave),
                          sources)) > 0:
                raise TypeError
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): srcfunc must return a synthlisa.Wave when applied",
                print "to each element of the parameter list"
            raise TypeError

        if debug > 3:
            print "CPU ", myrank, " created sources ", sources

        wavearray = synthlisa.WaveArray(sources)

        if not isinstance(lisa, synthlisa.LISA):
            if myrank == 0:
                print "LISApar.getobsp(...): lisa must be an instance of synthlisa.LISA."
            raise TypeError

        tdisignal = synthlisa.TDIsignal(lisa, wavearray)

        # is it possible to permanently bind an unbound method?
        # yes, by doing bound_obs = obs.__get__(tdisignal)
        # but it's not clear this will yield a faster call

        if type(obs) == list or type(obs) == tuple:
            multobs = len(obs)

            array = numpy.zeros((snum, multobs), dtype='d')
            for i in numpy.arange(0, snum):
                for j in range(0, multobs):
                    array[i, j] = obs[j](tdisignal, zerotime + i * stime)
        else:
            multobs = 1

            array = numpy.zeros(snum, dtype='d')
            for i in numpy.arange(0, snum):
                array[i] = obs(tdisignal, zerotime + i * stime)

        sumresults = mpi.reduce(array, snum * multobs, mpi.MPI_DOUBLE,
                                mpi.MPI_SUM, 0, mpi.MPI_COMM_WORLD)

        if myrank == 0 and debug > 0:
            currenttime = time.time() - inittime

            vel = snum / currenttime
            print "Completed in %d s [%d (multi)samples/s]." % (
                int(currenttime), int(vel))

        if myrank == 0:
            if multobs == 1:
                return sumresults
            else:
                return sumresults.reshape(snum, multobs)
        else:
            return None
Ejemplo n.º 22
0
    def shuffleIntoBlocks(ndim, vals, xmin, xmax, nglobal):

        if ndim == 2:
            import Spheral2d as sph
        else:
            import Spheral3d as sph

        dx = [(xmax[j] - xmin[j]) / nglobal[j] for j in xrange(ndim)]
        ntot = reduce(mul, nglobal)

        # Which dimension should we divide up into?
        jsplit = min(ndim - 1, max(enumerate(nglobal), key=lambda x: x[1])[0])

        # Find the offset to the global lattice numbering on this domain.
        # This is based on knowing the native lattice sampling method stripes the original data
        # accoriding to (i + j*nx + k*nx*ny), and simply divides that 1D serialization sequentially
        # between processors.
        offset = 0
        for sendproc in xrange(mpi.procs):
            n = mpi.bcast(len(vals), root=sendproc)
            if sendproc < mpi.rank:
                offset += n
        if mpi.rank == mpi.procs - 1:
            assert offset + len(vals) == ntot

        # A function to turn an index into the integer lattice coordinates
        def latticeCoords(iglobal):
            return (iglobal % nglobal[0],
                    (iglobal % (nglobal[0] * nglobal[1])) // nglobal[0],
                    iglobal // (nglobal[0] * nglobal[1]))

        # A function to tell us which block to assign a global index to
        slabsperblock = max(1, nglobal[jsplit] // mpi.procs)
        remainder = max(0, nglobal[jsplit] - mpi.procs * slabsperblock)
        islabdomain = [
            min(nglobal[jsplit], iproc * slabsperblock + min(iproc, remainder))
            for iproc in xrange(mpi.procs + 1)
        ]

        #sys.stderr.write("Domain splitting: %s %i %s\n" % (nglobal, jsplit, islabdomain))
        #sys.stderr.write("islabdomain : %s\n" % str(islabdomain))
        def targetBlock(index):
            icoords = latticeCoords(offset + index)
            return bisect.bisect_right(islabdomain, icoords[jsplit]) - 1

        # Build a list of (global_index, value, target_proc) for each of the lattice values.
        id_val_procs = [(offset + i, val, targetBlock(i))
                        for i, val in enumerate(vals)]
        #sys.stderr.write("id_val_procs : %s\n" % str(id_val_procs))
        #sys.stderr.write("map index -> slab : %s\n" % str([(offset + i, latticeCoords(offset + i), targetBlock(i)) for i in xrange(len(vals))]))
        #sys.stderr.write("id_val_procs : %s\n" % str([(i, tb, latticeCoords(i)) for (i, val, tb) in id_val_procs if i % 100 < 10 and tb != 0]))

        # Send our values to other domains.
        sendreqs, sendvals = [], []
        for iproc in xrange(mpi.procs):
            if iproc != mpi.rank:
                sendvals.append([(i, val) for (i, val, proc) in id_val_procs
                                 if proc == iproc])
                sendreqs.append(mpi.isend(sendvals[-1], dest=iproc, tag=100))

        # Now we can build the dang result.
        xminblock, xmaxblock = sph.Vector(*xmin), sph.Vector(*xmax)
        xminblock[jsplit] = xmin[jsplit] + islabdomain[mpi.rank] * dx[jsplit]
        xmaxblock[jsplit] = xmin[jsplit] + islabdomain[mpi.rank +
                                                       1] * dx[jsplit]
        nblock = list(nglobal)
        nblock[jsplit] = islabdomain[mpi.rank + 1] - islabdomain[mpi.rank]
        #sys.stderr.write("nblock : %s\n" % str(nblock))
        newvals = []
        for iproc in xrange(mpi.procs):
            if iproc == mpi.rank:
                recvvals = [(i, val) for (i, val, proc) in id_val_procs
                            if proc == mpi.rank]
            else:
                recvvals = mpi.recv(source=iproc, tag=100)[0]
            newvals += recvvals
        newvals.sort()
        valsblock = sph.vector_of_double()
        for i, val in newvals:
            valsblock.append(val)
        #sys.stderr.write("len(valsblock) = %s\n" % len(valsblock))
        assert len(valsblock) == reduce(mul, nblock)

        # Wait 'til all communication is done.
        for req in sendreqs:
            req.wait()

        # That should be it.
        return valsblock, xminblock, xmaxblock, nblock, jsplit
Ejemplo n.º 23
0
import sys
import Numeric
import mpi

try:
    rank,size = mpi.init( len(sys.argv), sys.argv )

    request,buffer = mpi.irecv( 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )

    print "Request #: %s"%(request)
    print "buffer: %s"%(buffer)

    A = Numeric.array([1,2,3,4,5,6,7,8,9,10],Numeric.Int32)
    send_request = mpi.isend( A, 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    print "Sending Request: %s"%(send_request)
    status = mpi.wait( request )
    status = mpi.wait( send_request )
    print "buffer(after send): %s"%(buffer)
    print "status:",status
    mpi.finalize()
except:
    mpi.finalize()
    raise