Esempio n. 1
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0, "Foo", "goo"]
        for x in range(90):
            myBigData = [x + 1, x * x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1) % mpi.procs
        frm = (mpi.rank - 1 + mpi.procs) % mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend(myBigData, to, 0)
        sendHandle2 = mpi.isend(mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm, 0)
        msgReceived2, status = mpi.recv(frm, 1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank == 0:

            #Change the data we're sending just for the heck of it
            myBigData[0] = ("changed")
            myBigData[1] = "Also changed"
            mySmallData = ("Hi", mpi.rank)

            #Perform 2 blocking sends to send the data
            mpi.send(myBigData, 1, 1)
            mpi.send(mySmallData, 1, 2)

        elif mpi.rank == 1:

            #Get recv handles for the two messages
            recvHandle1 = mpi.irecv(0, 1)
            recvHandle2 = mpi.irecv(0, 2)
            finished = [0, 0]

            #Loop until both messages come in
            while finished[0] == 0 and finished[1] == 0:
                if finished[0] == 0:
                    finished[0] = recvHandle1.test()
                if finished[1] == 0:
                    finished[1] = recvHandle2.test()

            #We got the messages, now check them
            if recvHandle1.message != myBigData:
                self.fail("Complex non-block failed on 2nd test with big data")
            if recvHandle2.message != ("Hi", 0):
                self.fail(
                    "Complex non-block failed on 2nd test with small data")

        return
Esempio n. 2
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0,"Foo", "goo"]
        for x in range(90):
          myBigData = [x+1,x*x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1)%mpi.procs
        frm = (mpi.rank-1+mpi.procs)%mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend( myBigData,   to, 0)
        sendHandle2 = mpi.isend( mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm,0)
        msgReceived2, status = mpi.recv(frm,1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank==0:

          #Change the data we're sending just for the heck of it
          myBigData[0] = ("changed")
          myBigData[1] = "Also changed"
          mySmallData = ("Hi", mpi.rank)

          #Perform 2 blocking sends to send the data
          mpi.send( myBigData, 1, 1 )
          mpi.send( mySmallData, 1, 2 )

        elif mpi.rank==1:

          #Get recv handles for the two messages
          recvHandle1 = mpi.irecv( 0,1)
          recvHandle2 = mpi.irecv( 0,2)
          finished = [0,0]

          #Loop until both messages come in
          while finished[0] == 0 and finished[1] == 0:
            if finished[0] == 0:
              finished[0] = recvHandle1.test()
            if finished[1] == 0:
              finished[1] = recvHandle2.test()

          #We got the messages, now check them
          if recvHandle1.message != myBigData:
            self.fail( "Complex non-block failed on 2nd test with big data")
          if recvHandle2.message != ("Hi", 0):
            self.fail( "Complex non-block failed on 2nd test with small data")

        return
Esempio n. 3
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req1 = mpi.isend("hello", 1, 0)
            req2 = mpi.isend("world", 1, 1)

            req3 = mpi.isend(",", 1, 2)
            req4 = mpi.isend("this", 1, 3)
            req5 = mpi.isend("is", 1, 4)
            req6 = mpi.isend("your", 1, 5)
            req7 = mpi.isend("new", 1, 6)
            req8 = mpi.isend("master", 1, 7)

            try:
                mpi.waitall((req1))
            except:
                self.fail("waitall()")

        elif mpi.rank == 1:
            req1 = mpi.irecv(0, 0)
            req2 = mpi.irecv(0, 1)
            req3 = mpi.irecv(0, 2)
            req4 = mpi.irecv(0, 3)
            req5 = mpi.irecv(0, 4)
            req6 = mpi.irecv(0, 5)
            req7 = mpi.irecv(0, 6)
            req8 = mpi.irecv(0, 7)
            try:
                mpi.waitall((req1, req2, req3, req4, req5, req6, req7, req8))
            except:
                self.fail("waitall()")

        return
Esempio n. 4
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req1 = mpi.isend("hello",1,0)
            req2 = mpi.isend("world",1,1)

            req3 = mpi.isend(",",1,2)
            req4 = mpi.isend("this",1,3)
            req5 = mpi.isend("is",1,4)
            req6 = mpi.isend("your",1,5)
            req7 = mpi.isend("new",1,6)
            req8 = mpi.isend("master",1,7)

            try:
                mpi.waitall((req1))
            except:
                self.fail("waitall()")

        elif mpi.rank == 1:
            req1 = mpi.irecv(0,0)
            req2 = mpi.irecv(0,1)
            req3 = mpi.irecv(0,2)
            req4 = mpi.irecv(0,3)
            req5 = mpi.irecv(0,4)
            req6 = mpi.irecv(0,5)
            req7 = mpi.irecv(0,6)
            req8 = mpi.irecv(0,7)
            try:
                mpi.waitall((req1,req2,req3,req4,req5,req6,req7,req8))
            except:
                self.fail("waitall()")

        return
Esempio n. 5
0
    def parallelRunTest(self):

        #Every process sends six messages to itself
        myMsgs = ["I", "talk", "to", "myself", "next message is BIG", ""]

        #The last message is BIG to test the new message model
        for i in range(512):
            myMsgs[5] += str(i)

        #Do all the asynchronous sends: each process sends to ITSELF
        for x in range(6):
            mpi.isend( myMsgs[x], mpi.rank, x )  

        #Get receive handles for all the receives  
        recvHandles = [0,0,0,    0,0,0]
        for x in range(6):
            recvHandles[x] = mpi.irecv( mpi.rank, x )

        #Wait for all receives to complete
        mpi.waitall(recvHandles)

        #Check for correct answers
        for x in range(6):
            if recvHandles[x].message != myMsgs[x]:
                failStr = "Self-Selding non-blocking communication test fail"
                failStr += "\nFailure on process " + str(mpi.rank) + ", test "
                failStr += str(x)
                self.fail( failStr )

        return
Esempio n. 6
0
    def parallelRunTest(self):

        #Every process sends six messages to itself
        myMsgs = ["I", "talk", "to", "myself", "next message is BIG", ""]

        #The last message is BIG to test the new message model
        for i in range(512):
            myMsgs[5] += str(i)

        #Do all the asynchronous sends: each process sends to ITSELF
        for x in range(6):
            mpi.isend(myMsgs[x], mpi.rank, x)

        #Get receive handles for all the receives
        recvHandles = [0, 0, 0, 0, 0, 0]
        for x in range(6):
            recvHandles[x] = mpi.irecv(mpi.rank, x)

        #Wait for all receives to complete
        mpi.waitall(recvHandles)

        #Check for correct answers
        for x in range(6):
            if recvHandles[x].message != myMsgs[x]:
                failStr = "Self-Selding non-blocking communication test fail"
                failStr += "\nFailure on process " + str(mpi.rank) + ", test "
                failStr += str(x)
                self.fail(failStr)

        return
Esempio n. 7
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req  = mpi.isend("hi there, bubba,",1,0)
            print 'send'
            req1 = mpi.isend("this is",1,1)
            req2 = mpi.isend("opportunity",1,2)
            req3 = mpi.isend("knocking....",1,3)
            try:
                mpi.waitany(req,req1,req2)
            except:
                self.fail("mpi.waitany() failed")
        elif mpi.rank == 1:
            req = []
            print 'recv0'
            req.append( mpi.irecv(0,0))
            print 'recv1'
            try:
                req.append( mpi.irecv(0,1))
            except:
                print 'bad?'
                print sys.exc_info()[1]
                raise
            print 'recv2'
            req.append( mpi.irecv(0,2))
            print 'recv3'
            req.append( mpi.irecv(0,3))
            print 'recv4'

            i = 0
            print 'while'
            while i < 4:
                print 'i is',i
                result = -1
                try:
                    print 'waitany?'
                    result = mpi.waitany(req)
                    print 'got',result
                except:
                    self.fail("mpi.waitany() failed")
                req.remove(req[result])
                i = i + 1

        return
Esempio n. 8
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail('This test needs at least 2 processes')

        if mpi.rank == 0:
            req = mpi.isend("hi there, bubba,", 1, 0)
            print 'send'
            req1 = mpi.isend("this is", 1, 1)
            req2 = mpi.isend("opportunity", 1, 2)
            req3 = mpi.isend("knocking....", 1, 3)
            try:
                mpi.waitany(req, req1, req2)
            except:
                self.fail("mpi.waitany() failed")
        elif mpi.rank == 1:
            req = []
            print 'recv0'
            req.append(mpi.irecv(0, 0))
            print 'recv1'
            try:
                req.append(mpi.irecv(0, 1))
            except:
                print 'bad?'
                print sys.exc_info()[1]
                raise
            print 'recv2'
            req.append(mpi.irecv(0, 2))
            print 'recv3'
            req.append(mpi.irecv(0, 3))
            print 'recv4'

            i = 0
            print 'while'
            while i < 4:
                print 'i is', i
                result = -1
                try:
                    print 'waitany?'
                    result = mpi.waitany(req)
                    print 'got', result
                except:
                    self.fail("mpi.waitany() failed")
                req.remove(req[result])
                i = i + 1

        return
Esempio n. 9
0
    def parallelRunTest(self):
        hello = "Hello World!!"

        to = mpi.rank + 1
        if mpi.rank == mpi.procs -1: to = 0
        frm = mpi.rank - 1
        if mpi.rank == 0: frm = mpi.procs -1

        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))
        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))
        mpi.isend(hello,to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d "%(frm))

        #Try and isend/irecv a long message to fullly test the new msg model
        longMsg = []
        for i in range(64):
            longMsg = ["foo", i, longMsg]

        mpi.isend( longMsg, to )
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != longMsg:
            self.fail( "irecv failed on long message.")
        longMsg = longMsg.reverse()
        mpi.isend( longMsg, to )
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != longMsg:
            self.fail( "irecv using wait failed on long message" )

        return
Esempio n. 10
0
    def parallelRunTest(self):
        hello = "Hello World!!"

        to = mpi.rank + 1
        if mpi.rank == mpi.procs - 1: to = 0
        frm = mpi.rank - 1
        if mpi.rank == 0: frm = mpi.procs - 1

        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))
        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))
        mpi.isend(hello, to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != hello:
            self.fail("Received unexpected reply from:%d " % (frm))

        #Try and isend/irecv a long message to fullly test the new msg model
        longMsg = []
        for i in range(64):
            longMsg = ["foo", i, longMsg]

        mpi.isend(longMsg, to)
        handle = mpi.irecv(frm)
        handle.wait()
        if handle.message != longMsg:
            self.fail("irecv failed on long message.")
        longMsg = longMsg.reverse()
        mpi.isend(longMsg, to)
        handle = mpi.irecv(frm)
        while handle.test[0] == 0:
            pass
        if handle.message != longMsg:
            self.fail("irecv using wait failed on long message")

        return
Esempio n. 11
0
 def irecv(self):
     # Read data from this specific task, nonblocking
     if self.currentReceive is None:
         self.currentReceive = mpi.irecv(self.tid)
     if mpi.testany(self.currentReceive)[0] is not None:
         msg = self.currentReceive.message
         msg = Message(msg, self, self.manager, self.currentReceive.status)
         self.currentReceive = None
         self.log("recv", msg)
         return msg
     else:
         return 0
Esempio n. 12
0
def bibandwidth(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            r1 = mpi.irecv(slave)
            r0 = mpi.isend(message[:bytes],slave)
            mpi.waitall([r0,r1])
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return (((2.0*bytes*cnt))/1024.0) / (total*1e-6),"KB/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            r1 = mpi.irecv(master)
            r0 = mpi.isend(message[:bytes],master)
            mpi.waitall([r0,r1])

        return 0.0,"KB/sec"

    else:
        return 0.0,"KB/sec"
Esempio n. 13
0
def bibandwidth(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            r1 = mpi.irecv(slave)
            r0 = mpi.isend(message[:bytes], slave)
            mpi.waitall([r0, r1])
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return (((2.0 * bytes * cnt)) / 1024.0) / (total * 1e-6), "KB/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            r1 = mpi.irecv(master)
            r0 = mpi.isend(message[:bytes], master)
            mpi.waitall([r0, r1])

        return 0.0, "KB/sec"

    else:
        return 0.0, "KB/sec"
Esempio n. 14
0
	def recv(self):
		try:
			if self._recv:
				self.display( OUTPUT_DEBUG, 'incoming message from node %d' % self._recv.status.source )
				module = __import__( self._recv.message['class'].lower() )
				task = eval( "module.%s(stateobj=%s)" % ( self._recv.message['class'], self._recv.message) )
				dest = self._recv.status.source
				self.display( OUTPUT_DEBUG, 'received task %s from node %d' % (task.id(),dest) )
				task.sender = dest
				task.hostname = self._name
				if isinstance( task, ControlTask ):
					task.node = self
					self.queueFirst( task )

				elif task.state == NEW:
					self.queue( task )
				else:
					self.queueFirst( task )
				self._recv = irecv()
		except:
			displayExcept()
			self.display( OUTPUT_ERROR, 'failed to receive incoming task' )
			self._recv = irecv()
Esempio n. 15
0
 def irecv(self):
     # Receive a message from anywhere, create Message
     # round robin irecvs
     if self.currentReceive is None:
         self.currentReceive = mpi.irecv()
     if self.currentReceive:
         # Find source task
         sourcet = self.currentReceive.status.source
         t = self.tasks[sourcet]
         data = self.currentReceive.message
         msg = Message(data, t, self, self.currentReceive.status)
         self.log("recv", msg)
         self.currentReceive = None
         self.idle.append(t)
         return msg
     else:
         return None
Esempio n. 16
0
 def irecv(self):
     # Receive a message from anywhere, create Message
     # round robin irecvs
     if self.currentReceive is None:
         self.currentReceive = mpi.irecv()
     if self.currentReceive:
         # Find source task
         sourcet = self.currentReceive.status.source
         t = self.tasks[sourcet]
         data = self.currentReceive.message
         msg = Message(data, t, self, self.currentReceive.status)
         self.log("recv", msg)
         self.currentReceive = None
         self.idle.append(t)
         return msg
     else:
         return None
Esempio n. 17
0
def sleep_nodes(tag):
    """

    Informs non-rank 0 nodes to sleep until they're told otherwise

    Tag argument must be an integer, and specifies which wake_nodes(int) call they should
    be waiting for.

    Tags used in ConsensusCluster:
    1 - Start
    2 - Wait for PCA results
    3 - Exit


    """

    if MPI_ENABLED:
        if mpi.rank != 0:
            r = mpi.irecv(0, tag)

            while not r:
                time.sleep(1)
Esempio n. 18
0
	def __init__(self, pollfunc):
		self._done = False
		self._rank = rank
		self._msgq = []
		self._pollq = []
		self._root = environ['STARSPATH']

		self._data_access = DataAccess( 'stars.config' )

		# share everyone's name with everone, so we can scp and other such goodies
		
		self._name = self._data_access._host
			
		self._workers = {}
		
		self._cpu_cores = cpu_count()
		if 0 == system( 'stat cpu_ht &> /dev/null' ):
			self._cpu_cores = int(cpu_count()/2)
			self.display( OUTPUT_DEBUG, 'cpus have HT lowering to %d cpus' % self._cpu_cores )
		elif 0 == system( 'stat cpu_one &> /dev/null' ):
			self._cpu_cores = 1
			self.display( OUTPUT_DEBUG, 'altering detected %d cpus to %d cpus' % ( cpu_count(), self._cpu_cores ) )

		workers = allgather( (self._rank, self._name, self._cpu_cores ) )
		#print workers
		w = 3
		while w < len( workers ):
			self._workers[ workers[w] ] = { 'name': workers[w+1], 'mslots': workers[w+2], 'slots': workers[w+2], 'proc': [], 'reserved':False }
			w = w + 3

		if not pollfunc == None:
			self._pollfunc = pollfunc

		self._recv = irecv()
		self._send = True

		self.display( OUTPUT_VERBOSE, 'initialized on %s' % self._name )
Esempio n. 19
0
import sys
import Numeric
import mpi

try:
    rank,size = mpi.init( len(sys.argv), sys.argv )

    request,buffer = mpi.irecv( 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )

    print "Request #: %s"%(request)
    print "buffer: %s"%(buffer)

    A = Numeric.array([1,2,3,4,5,6,7,8,9,10],Numeric.Int32)
    send_request = mpi.isend( A, 10, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    print "Sending Request: %s"%(send_request)
    status = mpi.wait( request )
    status = mpi.wait( send_request )
    print "buffer(after send): %s"%(buffer)
    print "status:",status
    mpi.finalize()
except:
    mpi.finalize()
    raise