Exemplo n.º 1
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0, "Foo", "goo"]
        for x in range(90):
            myBigData = [x + 1, x * x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1) % mpi.procs
        frm = (mpi.rank - 1 + mpi.procs) % mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend(myBigData, to, 0)
        sendHandle2 = mpi.isend(mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm, 0)
        msgReceived2, status = mpi.recv(frm, 1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank == 0:

            #Change the data we're sending just for the heck of it
            myBigData[0] = ("changed")
            myBigData[1] = "Also changed"
            mySmallData = ("Hi", mpi.rank)

            #Perform 2 blocking sends to send the data
            mpi.send(myBigData, 1, 1)
            mpi.send(mySmallData, 1, 2)

        elif mpi.rank == 1:

            #Get recv handles for the two messages
            recvHandle1 = mpi.irecv(0, 1)
            recvHandle2 = mpi.irecv(0, 2)
            finished = [0, 0]

            #Loop until both messages come in
            while finished[0] == 0 and finished[1] == 0:
                if finished[0] == 0:
                    finished[0] = recvHandle1.test()
                if finished[1] == 0:
                    finished[1] = recvHandle2.test()

            #We got the messages, now check them
            if recvHandle1.message != myBigData:
                self.fail("Complex non-block failed on 2nd test with big data")
            if recvHandle2.message != ("Hi", 0):
                self.fail(
                    "Complex non-block failed on 2nd test with small data")

        return
Exemplo n.º 2
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0,"Foo", "goo"]
        for x in range(90):
          myBigData = [x+1,x*x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1)%mpi.procs
        frm = (mpi.rank-1+mpi.procs)%mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend( myBigData,   to, 0)
        sendHandle2 = mpi.isend( mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm,0)
        msgReceived2, status = mpi.recv(frm,1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank==0:

          #Change the data we're sending just for the heck of it
          myBigData[0] = ("changed")
          myBigData[1] = "Also changed"
          mySmallData = ("Hi", mpi.rank)

          #Perform 2 blocking sends to send the data
          mpi.send( myBigData, 1, 1 )
          mpi.send( mySmallData, 1, 2 )

        elif mpi.rank==1:

          #Get recv handles for the two messages
          recvHandle1 = mpi.irecv( 0,1)
          recvHandle2 = mpi.irecv( 0,2)
          finished = [0,0]

          #Loop until both messages come in
          while finished[0] == 0 and finished[1] == 0:
            if finished[0] == 0:
              finished[0] = recvHandle1.test()
            if finished[1] == 0:
              finished[1] = recvHandle2.test()

          #We got the messages, now check them
          if recvHandle1.message != myBigData:
            self.fail( "Complex non-block failed on 2nd test with big data")
          if recvHandle2.message != ("Hi", 0):
            self.fail( "Complex non-block failed on 2nd test with small data")

        return
Exemplo n.º 3
0
 def parallelRunTest(self):
     msg = "I am from proc %d" % mpi.rank
     if mpi.procs % 2 != 0:
         self.fail("Test needs even number of processes")
     if mpi.rank % 2 == 0:
         sendResult = mpi.send(msg, mpi.rank + 1)
         recv, stat = mpi.recv()
     else:
         recv, stat = mpi.recv()
         sendResult = mpi.send(msg, mpi.rank - 1)
     return
Exemplo n.º 4
0
 def parallelRunTest(self):
     msg = "I am from proc %d"%mpi.rank
     if mpi.procs % 2 != 0:
         self.fail("Test needs even number of processes")
     if mpi.rank % 2 == 0:
        sendResult = mpi.send(msg,mpi.rank+1)
        recv,stat = mpi.recv()
     else:
        recv,stat = mpi.recv()
        sendResult = mpi.send(msg,mpi.rank-1)
     return
Exemplo n.º 5
0
def main():
    myrank, size = mpi.init()

    # split the problem in chunks

    if problemlength % size == 0:
        blocksize = problemlength / size
    else:
        print "Sorry, I don't know how to split up the problem, aborting!"
        mpi.finalize()
        
    if myrank == 0:
        data = range(1,problemlength + 1)  # create a toy dataset...
        random.shuffle(data)               # ...modifies data in place

        mydata = data[0:blocksize] # get some data for me...
                                   # and communicate the rest to slaves

        for host in range(1,size):
            hisdata = data[blocksize*host:blocksize*(host+1)]
            mpi.send(hisdata,blocksize,mpi.MPI_INT,host,0,mpi.MPI_COMM_WORLD)
    else:
        mydata = mpi.recv(blocksize,mpi.MPI_INT,0,0,mpi.MPI_COMM_WORLD)

    mymax = max(mydata)

    maximums = mpi.gather(mymax,1,mpi.MPI_INT, size, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD)

    if myrank == 0:
        mymax = max(maximums)
        print "The maximum value is:", mymax

    mpi.finalize()            
Exemplo n.º 6
0
    def dumpState(self, cacheFileName):
        def writeNodeData(f, iproc, pos, m, H, vol, surface):
            f.writeObject(pos, "proc%06i/pos" % iproc)
            f.writeObject(m, "proc%06i/m" % iproc)
            f.writeObject(H, "proc%06i/H" % iproc)
            f.writeObject(vol, "proc%06i/vol" % iproc)
            f.writeObject(surface, "proc%06i/surface" % iproc)
            return

        if os.path.splitext(cacheFileName) != ".silo":
            cacheFileName += ".silo"
        if mpi.rank == 0:
            dire = os.path.dirname(cacheFileName)
            if dire and not os.path.exists(dire):
                os.makedirs(dire)
            f = SiloFileIO(cacheFileName, Create)
            f.writeObject(mpi.procs, "numGeneratingProcs")
            writeNodeData(f, 0, self.pos, self.m, self.H, self.vol, self.surface)
            for iproc in xrange(1, mpi.procs):
                pos, m, H, vol, surface = mpi.recv(source=iproc)[0]
                writeNodeData(f, iproc, pos, m, H, vol, surface)
            f.close()
        else:
            mpi.send((self.pos, self.m, self.H, self.vol, self.surface), dest=0)
        mpi.barrier()
        return
Exemplo n.º 7
0
def bandwidth(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
        msg, status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return ((4 + (bytes * cnt)) / 1024.0) / (total * 1e-6), "KB/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
        mpi.send(message[:bytes], master)
        return 0.0, "KB/sec"

    else:
        return 0.0, "KB/sec"
Exemplo n.º 8
0
def roundtrip(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
            msg, status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return cnt / (total * 1e-6), "transactions/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
            mpi.send(message[:bytes], master)
        return 0.0, "transactions/sec"

    else:
        return 0.0, "transactions/sec"
Exemplo n.º 9
0
def latency(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
        TIMER_STOP()
        msg, status = mpi.recv(slave)

        total = TIMER_ELAPSED()
        return total / cnt, "u-sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
        mpi.send(message[:4], master)
        return 0.0, "u-sec"

    else:
        return 0.0, "u-sec"
Exemplo n.º 10
0
 def testNonBlockSend(self):
     for sendProc in xrange(mpi.procs):
         if mpi.rank == sendProc:
             for j in xrange(mpi.procs):
                 if j != mpi.rank:
                     obj = 10 * mpi.rank + 1
                     mpi.isend(obj, dest=j, tag=100)
         else:
             obj = mpi.recv(sendProc, 100)[0]
             assert obj == 10 * sendProc + 1
Exemplo n.º 11
0
def latency(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
        TIMER_STOP()
        msg,status = mpi.recv(slave)

        total = TIMER_ELAPSED()
        return total/cnt,"u-sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
        mpi.send(message[:4],master)
        return 0.0,"u-sec"

    else:
        return 0.0,"u-sec"
Exemplo n.º 12
0
 def ping(self, msg):
     if mpi.procs < 2: return
     try:
         if mpi.rank == 0:
             received, status = mpi.recv(1)
             self.failUnless(received == msg, 'Bad message received')
         elif mpi.rank == 1:
             mpi.send(msg, 0)
     finally:
         mpi.barrier()
Exemplo n.º 13
0
def roundtrip(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
            msg,status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return cnt / ( total *1e-6 ),"transactions/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
            mpi.send(message[:bytes],master)
        return 0.0,"transactions/sec"

    else:
        return 0.0,"transactions/sec"
Exemplo n.º 14
0
 def ping(self,msg):
     if mpi.procs < 2: return
     try:
         if mpi.rank == 0:
             received,status = mpi.recv(1)
             self.failUnless(received==msg,'Bad message received')
         elif mpi.rank == 1:
             mpi.send(msg,0)
     finally:
         mpi.barrier()
Exemplo n.º 15
0
 def recv(self):
     # blocking receive from anywhere
     (data, status) = mpi.recv()
     src = self.tasks[status.source]
     # Put back into idle list
     if not src in self.idle:
         self.idle.append(src)
     msg = Message(data, src, self, status)
     self.log("recv", msg)
     return msg
Exemplo n.º 16
0
def bandwidth(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
        msg,status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return ((4+(bytes*cnt))/1024.0) / (total*1e-6),"KB/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
        mpi.send(message[:bytes],master)
        return 0.0,"KB/sec"

    else:
        return 0.0,"KB/sec"
Exemplo n.º 17
0
    def parallelRunTest(self):
        if mpi.procs%2 == 1:
            self.fail("Simple sendrecv must be run with an even number of processes")

        if mpi.procs > 1:
            # Swap ranks even/odd, evens send first
            if mpi.rank % 2 == 0:
                mpi.send(mpi.rank,mpi.rank+1)
                nextRank,stat = mpi.recv(mpi.rank+1)
                if nextRank != mpi.rank+1:
                    self.fail("Received incorrect rank")
            else:
                prevRank,stat = mpi.recv(mpi.rank-1)
                mpi.send(mpi.rank,mpi.rank-1)
                if prevRank != mpi.rank-1:
                    self.fail("Received incorrect rank.  Expected %r, got %r"%(
                        mpi.rank-1,prevRank
                        ))

        # Try an around the horn sendrecv check
        me = mpi.rank
        rightside = (me+1)%mpi.procs
        leftside = (me-1+mpi.procs)%mpi.procs
        msg,stat2 = mpi.sendrecv(me,rightside,leftside)

        if msg != leftside:
            failStr = "Failed simple sendrecv "
            failStr += "Process " + str(mpi.rank) + " received "
            failStr += str(msg) + " instead of " + str(leftside)
            self.fail(failStr)

        #Do another check with a longer message
        longMsg = ()
        for i in range(256):
          longMsg += (i, str(i) )

        msg, stat2 = mpi.sendrecv( longMsg, leftside, rightside )
        if msg != longMsg:
          failStr = "Failed simple sendrecv for long messages"
          self.fail( failStr )

        return
Exemplo n.º 18
0
    def parallelRunTest(self):
        if mpi.procs % 2 == 1:
            self.fail(
                "Simple sendrecv must be run with an even number of processes")

        if mpi.procs > 1:
            # Swap ranks even/odd, evens send first
            if mpi.rank % 2 == 0:
                mpi.send(mpi.rank, mpi.rank + 1)
                nextRank, stat = mpi.recv(mpi.rank + 1)
                if nextRank != mpi.rank + 1:
                    self.fail("Received incorrect rank")
            else:
                prevRank, stat = mpi.recv(mpi.rank - 1)
                mpi.send(mpi.rank, mpi.rank - 1)
                if prevRank != mpi.rank - 1:
                    self.fail("Received incorrect rank.  Expected %r, got %r" %
                              (mpi.rank - 1, prevRank))

        # Try an around the horn sendrecv check
        me = mpi.rank
        rightside = (me + 1) % mpi.procs
        leftside = (me - 1 + mpi.procs) % mpi.procs
        msg, stat2 = mpi.sendrecv(me, rightside, leftside)

        if msg != leftside:
            failStr = "Failed simple sendrecv "
            failStr += "Process " + str(mpi.rank) + " received "
            failStr += str(msg) + " instead of " + str(leftside)
            self.fail(failStr)

        #Do another check with a longer message
        longMsg = ()
        for i in range(256):
            longMsg += (i, str(i))

        msg, stat2 = mpi.sendrecv(longMsg, leftside, rightside)
        if msg != longMsg:
            failStr = "Failed simple sendrecv for long messages"
            self.fail(failStr)

        return
Exemplo n.º 19
0
 def gatherVector(vec):
     for i in xrange(len(vec)):
         if mpi.rank == 0:
             for sendProc in xrange(1, mpi.procs):
                 vals = mpi.recv(sendProc)[0]
                 print "Received %i values from processor %i" % (len(vals), sendProc)
                 vec[i] += vals
         else:
             mpi.send(vec[i], 0)
         if mpi.rank == 0:
             assert len(vec[i]) == ntot
Exemplo n.º 20
0
 def run(self):
     while 1:
         print >> sys.stderr, "======= Node #%d waiting..." % mpi.rank
         try:
             mpi.send("request", 0)
             task, message = mpi.recv(0)
             print >> sys.stderr, "Node #%d received task! ---------------------------" % mpi.rank
             task.run()
         except:
             print >> sys.stderr, "======= Node %d done!" % mpi.rank
             break
Exemplo n.º 21
0
    def restoreState(self, cacheFileName):
        def readNodeData(f, iproc):
            pos = f.readObject("proc%06i/pos" % iproc)
            m = f.readObject("proc%06i/m" % iproc)
            H = f.readObject("proc%06i/H" % iproc)
            vol = f.readObject("proc%06i/vol" % iproc)
            surface = f.readObject("proc%06i/surface" % iproc)
            return pos, m, H, vol, surface

        if cacheFileName is None:
            return False

        if os.path.splitext(cacheFileName) != ".silo":
            cacheFileName += ".silo"
        result = False
        if mpi.rank == 0:
            result = (cacheFileName and os.path.exists(cacheFileName))
        result = mpi.bcast(result, root=0)
        if result:
            print "Restoring MedialGenerator state from %s" % cacheFileName
            if mpi.rank == 0:
                f = SiloFileIO(cacheFileName, Read)
                numGeneratingProcs = f.readObject("numGeneratingProcs")

                # Decide how to divide the generating domains between our current processes.
                n0 = numGeneratingProcs / mpi.procs
                remainder = numGeneratingProcs % mpi.procs
                for iproc in xrange(mpi.procs):
                    if iproc >= numGeneratingProcs:
                        imin, imax = 0, 0
                    else:
                        imin = iproc * n0 + min(iproc, remainder)
                        imax = imin + n0
                        if iproc < remainder:
                            imax += 1
                    pos, m, H, vol, surface = [], [], [], [], []
                    for igenproc in xrange(imin, imax):
                        posi, mi, Hi, voli, surfacei = readNodeData(
                            f, igenproc)
                        pos += posi
                        m += mi
                        H += Hi
                        vol += voli
                        surface += surfacei
                    if iproc == 0:
                        self.pos, self.m, self.H, self.vol, self.surface = pos, m, H, vol, surface
                    else:
                        mpi.send((pos, m, H, vol, surface), dest=iproc)
                f.close()
            else:
                self.pos, self.m, self.H, self.vol, self.surface = mpi.recv(
                    source=0)[0]
        return result
Exemplo n.º 22
0
def main():
    rank,size = mpi.init()
    
    serial_dict = pickle.dumps(somedict)

    mpi.isend( serial_dict, len(serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )

    new_serial_dict = mpi.recv( len( serial_dict), mpi.MPI_CHAR, 0, 0, mpi.MPI_COMM_WORLD )
    print new_serial_dict

    mpi.finalize()

    newdict = pickle.loads( new_serial_dict )
    print newdict
    return
Exemplo n.º 23
0
    def testGlobalMeshNodeIDs(self):
        mesh, void = generatePolyhedralMesh([self.nodes],
                                            xmin=xmin,
                                            xmax=xmax,
                                            generateParallelConnectivity=True)
        globalIDs = mesh.globalMeshNodeIDs()

        # Check that all our local IDs are unique.
        uniqueIDs = set()
        for i in globalIDs:
            uniqueIDs.add(i)
        self.failUnless(
            len(uniqueIDs) == len(globalIDs),
            "Global mesh node IDs not unique!  %i != %i" %
            (len(globalIDs), len(uniqueIDs)))

        # Check that the IDs are unique and consistent across domains.
        if mpi.procs > 1:
            neighbors = mesh.neighborDomains
            sharedNodes = mesh.sharedNodes
            assert len(neighbors) == len(sharedNodes)

            # Translate to the shared nodes to global IDs.
            sharedGlobalIDs = [[globalIDs[i] for i in localIDs]
                               for localIDs in sharedNodes]
            assert len(sharedGlobalIDs) == len(neighbors)

            # Do non-blocking sends to all our neighbors.
            sendRequests = []
            for neighbor, ids in zip(neighbors, sharedGlobalIDs):
                sendRequests.append(mpi.isend(ids, dest=neighbor))
            assert len(sendRequests) == len(neighbors)

            # Recv the IDs from our neighbors and do the testing.
            for neighbor, localIDs in zip(neighbors, sharedGlobalIDs):
                otherIDs = mpi.recv(source=neighbor)[0]
                self.failUnless(
                    otherIDs == list(localIDs),
                    "Global IDs don't match between domains %i <-> %i\n%s\n%s"
                    % (mpi.rank, neighbor, list(localIDs), otherIDs))

            # Wait until all our sends have completed.
            for req in sendRequests:
                req.Wait()
Exemplo n.º 24
0
 def action(self):
     mpi.send([self.Actions.GetBreath, []], 0)
     (result, code) = mpi.recv(0)[0]
     print mpi.rank, "] I have", result, "breath power."
     mpi.send([self.Actions.BreatheFire, [10]], 0)
     (result, code) = mpi.recv(0)[0]
     if result == 1:
         print mpi.rank, "] fire breathing attack succeeded!"
     else:
         if code == self.ResultCodes.game_over:
             print mpi.rank, "] fire breathing attack failed because the game was over"
         elif code == self.ResultCodes.not_enough_breath:
             print mpi.rank, "] fire breathing attack failed because we were out of breath"
         elif code == self.ResultCodes.invalid_breathing_direction:
             print mpi.rank, "] can only breath fire straight"
     mpi.send([self.Actions.SetFlyingDirection, [self.Direction.East]], 0)
     (result, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] set direction!"
     elif code == self.ResultCodes.invalid_flying_direction:
         print mpi.rank, "] I can't fly that way!"
     mpi.send([self.Actions.GetFlyingDirection, []], 0)
     (dir, code) = mpi.recv(0)[0]
     mpi.send([self.Actions.GetFlyingPosition, []], 0)
     (pos, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] I am flying", self.Direction.ToString(dir), "at", "(", pos.x, ",", pos.y, ")"
         print mpi.rank, "] Where is (5, 5)? In relation to me, approx.: ", self.Direction.ToString(
             pos.toward(self.Position(5, 5))
         )
     elif code == self.ResultCodes.game_over:
         print mpi.rank, "] I failed to get my flying position because the game was over"
     print mpi.rank, "] Attempting to fly 2 spaces"
     mpi.send([self.Actions.Fly, [1]], 0)
     (result, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] flying successful"
     elif code == self.ResultCodes.invalid_flight_path:
         print mpi.rank, "] flying unsuccessful, I must have flown off the board or attempted to fly too far at once"
     elif code == self.ResultCodes.collision:
         print mpi.rank, "] I collided"
     elif code == self.ResultCodes.game_over:
         print mpi.rank, "] could not fly because game over"
     elif code == self.ResultCodes.slain:
         print mpi.rank, "] could not fly because I have been slain"
Exemplo n.º 25
0
	def action(self):
		mpi.send([self.Actions.GetArrows, []], 0)
		(result, code) = mpi.recv(0)[0]
		print mpi.rank, "] I have", result,"arrows."
		mpi.send([self.Actions.FireBow, [10, self.Direction.randomDir()]], 0)
		(result, code) = mpi.recv(0)[0]
		if (result == 1):
			print mpi.rank, "] firing bow attack succeeded!"
		else:
			if (code == self.ResultCodes.game_over):
				print mpi.rank, "] firing bow attack failed because the game was over"
			elif (code == self.ResultCodes.not_enough_arrows):
				print mpi.rank, "] firing bow attack failed because we were out of arrows"
			elif (code == self.ResultCodes.invalid_bow_direction):
				print mpi.rank, "] could not aim bow in that direction"
		mpi.send([self.Actions.FaceHorse, [self.Direction.West]], 0)
		(result, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] set horse's direction!"
		elif (code == self.ResultCodes.invalid_horse_facing):
			print mpi.rank, "] I can't face my horse that way!"
		mpi.send([self.Actions.GetHorseFacing, []], 0)
		(dir, code) = mpi.recv(0)[0]
		mpi.send([self.Actions.GetHorsePosition, []], 0)
		(pos, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] My horse is faced", self.Direction.ToString(dir), "at", "(", pos.x, ",", pos.y, ")"
			print mpi.rank, "] Where is (5, 5)? In relation to me, approx.: ", self.Direction.ToString(pos.toward(self.Position(5, 5)))
		elif (code == self.ResultCodes.game_over):
			print mpi.rank, "] I failed to get my horse's position because the game was over"
		print mpi.rank, "] Attempting to ride horse 2 spaces"
		mpi.send([self.Actions.RideHorse, [2]], 0)
		(result, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] horse trodded successfully"
		elif (code == self.ResultCodes.invalid_horse_path):
			print mpi.rank, "] horse trodded unsuccessfully, he must have run off the board or attempted to move too far at once"
		elif (code == self.ResultCodes.collision):
			print mpi.rank, "] I collided"
		elif (code == self.ResultCodes.game_over):
			print mpi.rank, "] could not ride horse because game over"
		elif (code == self.ResultCodes.defeated):
			print mpi.rank, "] could not ride horse because I have been defeated"
Exemplo n.º 26
0
def testSharedNodes(mesh):
    assert len(mesh.neighborDomains) == len(mesh.sharedNodes)

    # First check that everyone agrees about who is talking to who.
    myNeighborDomains = list(mesh.neighborDomains)
    for sendProc in xrange(mpi.procs):
        otherProcs = mpi.bcast(myNeighborDomains, root=sendProc)
        if mpi.rank != sendProc:
            assert (mpi.rank in otherProcs) == (sendProc in mesh.neighborDomains)

    # Build our set of global shared node IDs.
    globalIDs = mesh.globalMeshNodeIDs()
    globalSharedNodes = [[globalIDs[i] for i in localNodes] for localNodes in mesh.sharedNodes]
    assert len(globalSharedNodes) == len(mesh.neighborDomains)

    # Check that the shared nodes are consistent.
    sendRequests = []
    for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
        sendRequests.append(mpi.isend(ids, dest=otherProc))
    for (otherProc, ids) in zip(mesh.neighborDomains, globalSharedNodes):
        otherIDs = mpi.recv(source=otherProc)[0]
        assert ids == otherIDs

    # Check that all shared nodes have been found.
    localSharedNodes = [[i for i in localNodes] for localNodes in mesh.sharedNodes]
    positions = vector_of_Vector()
    for i in xrange(mesh.numNodes):
        positions.append(mesh.node(i).position())
    xmin, xmax = Vector(), Vector()
    boundingBox(positions, xmin, xmax)
    xmin = Vector(mpi.allreduce(xmin.x, mpi.MIN), mpi.allreduce(xmin.y, mpi.MIN))
    xmax = Vector(mpi.allreduce(xmax.x, mpi.MAX), mpi.allreduce(xmax.y, mpi.MAX))
    boxInv = Vector(1.0/(xmax.x - xmin.x),
                    1.0/(xmax.y - xmin.y))
    nodeHashes = [hashPosition(mesh.node(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numNodes)]
    nodeHashes2ID = {}
    for i in xrange(len(nodeHashes)):
        nodeHashes2ID[nodeHashes[i]] = i
    for sendProc in xrange(mpi.procs):
        otherNodeHashes = mpi.bcast(nodeHashes, root=sendProc)
        if sendProc != mpi.rank:
            for hashi in otherNodeHashes:
                if hashi in nodeHashes:
                    assert sendProc in myNeighborDomains
                    idomain = myNeighborDomains.index(sendProc)
                    i = nodeHashes2ID[hashi]
                    assert i in localSharedNodes[idomain]

    # Same for faces.
    localSharedFaces = [[i for i in localFaces] for localFaces in mesh.sharedFaces]
    positions = vector_of_Vector()
    for i in xrange(mesh.numFaces):
        positions.append(mesh.face(i).position())
    faceHashes = [hashPosition(mesh.face(i).position(), xmin, xmax, boxInv) for i in xrange(mesh.numFaces)]
    faceHashes2ID = {}
    for i in xrange(len(faceHashes)):
        faceHashes2ID[faceHashes[i]] = i
    for sendProc in xrange(mpi.procs):
        otherFaceHashes = mpi.bcast(faceHashes, root=sendProc)
        if sendProc != mpi.rank:
            for hashi in otherFaceHashes:
                if hashi in faceHashes:
                    assert sendProc in myNeighborDomains
                    idomain = myNeighborDomains.index(sendProc)
                    i = faceHashes2ID[hashi]
                    assert i in localSharedFaces[idomain]

    return True
Exemplo n.º 27
0
time.sleep(1.0)

mpi.barrier() # Wait until everybody gets here


NumPts = Numruns/SaveElec

if rank == Numspots - 1: # Use the last processor to gather the data and plot it
    imaxs = zeros([NumPts])
    sigmaxs = zeros([NumPts])
    sigmays = zeros([NumPts])

    for point in range(NumPts):
        run = point * SaveElec
        src = run % (Numspots - 1)
        spotdata, status = mpi.recv(src, tag = run)
        if run != spotdata[0]:
            print "Communication error! Run = %d, tag = %d"%(run, spotdata[0])
            continue
        else:
            print "Received data for run = %d"%run
            sigmaxs[point] = spotdata[1]
            sigmays[point] = spotdata[2]
            imaxs[point] = spotdata[3]

    PlotSpotlist(Numruns, Numspots, imaxs, sigmaxs, sigmays)

else:                    # Everybody else analyzes one or more runs with all of the spots.
    for point in range(NumPts):
        run = point * SaveElec
        if run % (Numspots - 1) == rank:
Exemplo n.º 28
0
    def getobsp(self, snum, stime, tetrad, zerotime=0.0, debug=0):
        """
        
        LISApar.getobsp(length,deltat,tetrad,zerotime=0.0)
        is the parallel-computing equivalent of getobs and
        getobsc, and it is used to compute the TDI responses
        of large sets of Wave objects. It must be called
        from an instance of LISApar, with the following
        parameters:
        
        - length is the total length of the TDI-observable
          arrays that will be returned;
        
        - deltat is the cadence of the time series;
        
        - zerotime is the initial time for the time series;
        
        - tetrad is a tuple (lisa,wavefactory,parameterlist,
          observables) of four elements:

          * lisa is an instance of a LISA class, which
            should be the same for every CPU taking part in
            the computation;

          * wavefactory is a Python function taking any
            number of parameters, and returning an instance of
            a synthLISA Wave object; the function must be
            defined for every CPU taking part in the
            computation;

          * parameterlist is a list of source parameters (or
            of parameter n-tuples, if wavefactory takes more
            than one parameter), which will be distributed
            among the CPUs, and passed to the Wave Factory to
            construct synthLISA Wave objects; the parameter
            sets need to be defined only on the root CPU, but
            it won't hurt to define them everywhere. They can
            contain any Python types (they are pickled before
            distribution), but not synthLISA objects;

          * observables is a list or tuple of TDI
            observables, which must be given as unbound
            methods, such as synthlisa.TDI.X1 or
            synthlisa.TDI.time.
        
        The distribution of the parameter sets among the
        CPUs tries to balance the load of the computation.
        If the number of sources is not divisible by the
        number of CPUs, it will assign a smaller number of
        sources to the root CPU, and the same number of
        sources to all other CPUs."""

        # accept four levels (0-4) of debugging info

        inittime = time.time()

        myrank = self.rank
        size = self.size

        try:
            (lisa, srcfunc, parameters, obs) = tetrad
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): third parameter must be a 4-tuple containing a",
                print "LISA instance, a Wave factory, an array of parameters for the factory,",
                print "and a set of TDI observables given as class methods (such as synthlisa.TDI.X)."
            raise IndexError

        if type(parameters) not in (list, tuple, numpy.ndarray):
            if myrank == 0:
                print "LISApar.getobsp(...): needs a list of parameters to feed to the factory!"
            raise IndexError

        if size == 1:
            if myrank == 0:
                print "LISApar.getobsp(...): must be run with more than one cpu!"
            raise NotImplementedError

        if size > len(parameters):
            if myrank == 0:
                print "LISApar.getobsp(...): needs to run with more sources than cpus!"
            raise IndexError

        # root may get zero processors

        blocksize, remain = divmod(len(parameters), size)

        if remain > 0:
            blockadd, remain = divmod(remain, size - 1)
            blocksize = blocksize + blockadd

        if myrank == 0 and debug > 2:
            print "Standard block: ", blocksize,
            print "; root block: ", len(parameters) - blocksize * (size - 1)

        if myrank == 0:
            if debug > 3:
                print "Preparing for parallel execution..."

            for cpu in range(1, size):
                blockstart, blockend = (cpu - 1) * blocksize, cpu * blocksize

                serial_pars = pickle.dumps(parameters[blockstart:blockend])
                len_pars = len(serial_pars)

                mpi.isend(len_pars, 1, mpi.MPI_INT, cpu, 0, mpi.MPI_COMM_WORLD)
                mpi.isend(serial_pars, len_pars, mpi.MPI_CHAR, cpu, 1,
                          mpi.MPI_COMM_WORLD)

            mypars = parameters[blockend:]
        else:
            len_pars = mpi.recv(1, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD)
            serial_pars = mpi.recv(len_pars, mpi.MPI_CHAR, 0, 1,
                                   mpi.MPI_COMM_WORLD)

            mypars = pickle.loads(serial_pars)

        if debug > 2:
            print "CPU ", myrank, " received ", len(
                mypars), " source parameters ", mypars

        try:
            if type(mypars[0]) in (list, tuple, numpy.ndarray):
                sources = map(lambda x: srcfunc(*x), mypars)
            else:
                sources = map(srcfunc, mypars)

            if len(filter(lambda x: not isinstance(x, synthlisa.Wave),
                          sources)) > 0:
                raise TypeError
        except:
            if myrank == 0:
                print "LISApar.getobsp(...): srcfunc must return a synthlisa.Wave when applied",
                print "to each element of the parameter list"
            raise TypeError

        if debug > 3:
            print "CPU ", myrank, " created sources ", sources

        wavearray = synthlisa.WaveArray(sources)

        if not isinstance(lisa, synthlisa.LISA):
            if myrank == 0:
                print "LISApar.getobsp(...): lisa must be an instance of synthlisa.LISA."
            raise TypeError

        tdisignal = synthlisa.TDIsignal(lisa, wavearray)

        # is it possible to permanently bind an unbound method?
        # yes, by doing bound_obs = obs.__get__(tdisignal)
        # but it's not clear this will yield a faster call

        if type(obs) == list or type(obs) == tuple:
            multobs = len(obs)

            array = numpy.zeros((snum, multobs), dtype='d')
            for i in numpy.arange(0, snum):
                for j in range(0, multobs):
                    array[i, j] = obs[j](tdisignal, zerotime + i * stime)
        else:
            multobs = 1

            array = numpy.zeros(snum, dtype='d')
            for i in numpy.arange(0, snum):
                array[i] = obs(tdisignal, zerotime + i * stime)

        sumresults = mpi.reduce(array, snum * multobs, mpi.MPI_DOUBLE,
                                mpi.MPI_SUM, 0, mpi.MPI_COMM_WORLD)

        if myrank == 0 and debug > 0:
            currenttime = time.time() - inittime

            vel = snum / currenttime
            print "Completed in %d s [%d (multi)samples/s]." % (
                int(currenttime), int(vel))

        if myrank == 0:
            if multobs == 1:
                return sumresults
            else:
                return sumresults.reshape(snum, multobs)
        else:
            return None
Exemplo n.º 29
0
    def writeMasterSiloFile(ndim, nblock, jsplit, baseDirectory, baseName,
                            procDir, materials, vars, label, time, cycle):

        nullOpts = silo.DBoptlist()

        # Decide which domains have information.
        if len(vars[0][0]) > 0:
            myvote = mpi.rank + 1
        else:
            myvote = 0
        maxproc = mpi.allreduce(myvote, mpi.MAX)
        assert maxproc <= mpi.procs

        # Pattern for constructing per domain variables.
        domainNamePatterns = [
            os.path.join(procDir, "domain%i.silo:%%s" % i)
            for i in xrange(maxproc)
        ]

        # We need each domains nblock info.
        nblocks = [nblock]
        for sendproc in xrange(1, maxproc):
            if mpi.rank == sendproc:
                mpi.send(nblock, dest=0, tag=50)
            if mpi.rank == 0:
                nblocks.append(mpi.recv(source=sendproc, tag=50)[0])

        # Create the master file.
        if mpi.rank == 0:
            fileName = os.path.join(baseDirectory, baseName + ".silo")
            f = silo.DBCreate(fileName, silo.DB_CLOBBER, silo.DB_LOCAL, label,
                              silo.DB_HDF5)
            nullOpts = silo.DBoptlist()

            # Write the domain file names and types.
            domainNames = Spheral.vector_of_string(
                [p % "hblk0/hydro_mesh" for p in domainNamePatterns])
            meshTypes = Spheral.vector_of_int([silo.DB_QUADMESH] * maxproc)
            optlist = silo.DBoptlist(1024)
            assert optlist.addOption(silo.DBOPT_CYCLE, cycle) == 0
            assert optlist.addOption(silo.DBOPT_DTIME, time) == 0
            assert silo.DBPutMultimesh(f, "hydro_mesh", domainNames, meshTypes,
                                       optlist) == 0

            # Write material names.
            if materials:
                material_names = Spheral.vector_of_string(
                    [p % "/hblk0/Materials" for p in domainNamePatterns])
                matnames = Spheral.vector_of_string(
                    ["void"] + [x.name for x in materials])
                matnos = Spheral.vector_of_int(range(len(materials) + 1))
                assert len(material_names) == maxproc
                assert len(matnames) == len(materials) + 1
                assert len(matnos) == len(materials) + 1
                optlist = silo.DBoptlist(1024)
                assert optlist.addOption(silo.DBOPT_CYCLE, cycle) == 0
                assert optlist.addOption(silo.DBOPT_DTIME, time) == 0
                assert optlist.addOption(silo.DBOPT_MMESH_NAME,
                                         "hydro_mesh") == 0
                assert optlist.addOption(silo.DBOPT_MATNAMES,
                                         silo.DBOPT_NMATNOS, matnames) == 0
                assert optlist.addOption(silo.DBOPT_MATNOS, silo.DBOPT_NMATNOS,
                                         matnos) == 0
                assert silo.DBPutMultimat(f, "Materials", material_names,
                                          optlist) == 0

            # Write the variables descriptors.
            types = Spheral.vector_of_int([silo.DB_QUADVAR] * maxproc)
            for var, varname in vars:
                domainVarNames = Spheral.vector_of_string()
                for iproc, p in enumerate(domainNamePatterns):
                    domainVarNames.append(p % ("/hblk0/" + varname))
                assert len(domainVarNames) == maxproc
                optlistMV = silo.DBoptlist()
                assert optlistMV.addOption(silo.DBOPT_CYCLE, cycle) == 0
                assert optlistMV.addOption(silo.DBOPT_DTIME, time) == 0
                #assert optlistMV.addOption(silo.DBOPT_TENSOR_RANK, silo.DB_VARTYPE_SCALAR) == 0
                assert optlistMV.addOption(silo.DBOPT_BLOCKORIGIN, 0) == 0
                assert optlistMV.addOption(silo.DBOPT_MMESH_NAME,
                                           "hydro_mesh") == 0
                assert silo.DBPutMultivar(f, varname, domainVarNames, types,
                                          optlistMV) == 0

            # Write the dummy variable "akap_0" to fool Hades into thinking we're actually Hydra or something.
            assert silo.DBPutQuadvar1(
                f, "akap_0", "hydro_mesh",
                Spheral.vector_of_double([0.0] * (ndim * ndim)),
                Spheral.vector_of_double(), silo.DB_ZONECENT,
                Spheral.vector_of_int([ndim] * ndim), nullOpts) == 0

            # Write domain and mesh size info.
            assert silo.DBMkDir(f, "Decomposition") == 0
            assert silo.DBWrite(f, "Decomposition/NumDomains", maxproc) == 0
            assert silo.DBWrite(f, "Decomposition/NumLocalDomains",
                                maxproc) == 0
            assert silo.DBWrite(f, "Decomposition/NumBlocks", 1) == 0
            #assert silo.DBWrite(f, "Decomposition/LocalName", "hblk") == 0
            localDomains = Spheral.vector_of_int(range(maxproc))
            domainFiles = Spheral.vector_of_vector_of_int(
                [Spheral.vector_of_int(range(maxproc))])
            assert silo.DBWrite(f, "Decomposition/LocalDomains",
                                localDomains) == 0
            assert silo.DBWrite(f, "DomainFiles", domainFiles) == 0

            for iproc in xrange(maxproc):
                assert silo.DBMkDir(f, "Decomposition/gmap%i" % iproc) == 0
                stuff = Spheral.vector_of_int([0] * 12)
                for jdim in xrange(ndim):
                    stuff[6 + jdim] = nblocks[iproc][jdim]
                if iproc in (0, maxproc - 1):
                    assert silo.DBWrite(
                        f, "Decomposition/gmap%i/NumNeighbors" % iproc, 1) == 0
                else:
                    assert silo.DBWrite(
                        f, "Decomposition/gmap%i/NumNeighbors" % iproc, 2) == 0
                assert silo.DBWrite(f, "Decomposition/gmap%i/gmap" % iproc,
                                    stuff) == 0

        # Close the file.
        if mpi.rank == 0:
            assert silo.DBClose(f) == 0
            del f

        return maxproc
Exemplo n.º 30
0
 def mpirecv(pe=0, ms=0):
     return mpi.recv(pe, ms)
Exemplo n.º 31
0
 def recv(self):
     # Read data from this specific task
     (data, status) = mpi.recv(self.tid)
     return Message(data, self, self.manager, status)
Exemplo n.º 32
0
	if (len(possible) == 0):
		return 0
	(x, y) = random.choice(possible)
	player.SetPosition(x, y)
	board[y][x] = player
	return board

#main code
if (mpi.rank == 0):
	#proc 0 runs the game
	players = range(0, mpi.size) #here we store the master classes; 0 is a placeholder
	board = init_board(BOARD_DIMENSION_WIDTH, BOARD_DIMENSION_HEIGHT)

	#first we need to set up our players and board
	for player in range(1, mpi.size):
		players[player] = instantiate_master(mpi.recv(player)[0], player)
		board = PutPlayerInBoard(board, players[player])
		if (board == 0):
			raise StandardError("No room for player " + player.__str__())

	mpi.barrier()
	
	#now we run the game
	starttime = time.time()
	exited = 0
	
	while(time.time() <= starttime + GAME_LENGTH):
		#Update our queues
		if (len(mpi._recv_queue) > 0):
			packet = mpi.recv(mpi.ANY_SOURCE)
			(data, status) = packet #data[0] is the command string, data[1] is a list of args
Exemplo n.º 33
0
sleepEventStart = mpe.log_get_event_number()
sleepEventEnd = mpe.log_get_event_number()

mpe.describe_state( runEventStart, runEventEnd, "Full Runtime", "blue" )
mpe.describe_state( sendEventStart, sendEventEnd, "send", "red" )
mpe.describe_state( recvEventStart, recvEventEnd, "recv", "green" )
mpe.describe_state( sleepEventStart, sleepEventEnd, "sleep", "turquoise" )

mpe.log_event( runEventStart, rank, "starting run")
# Let's send and receive a 100 messages and generate 100(200?) events.
for i in xrange(100):
    if( rank == 0 ):
        # Generate 100 numbers, send them to rank 1
        mpe.log_event( sendEventStart, i, "start send" )
        data = Numeric.array( range(10000), Numeric.Int32 )
        mpi.send( data, 10000, mpi.MPI_INT, 1, i, mpi.MPI_COMM_WORLD )
        mpe.log_event( sendEventEnd, i, "end send")
    else:
        mpe.log_event( recvEventStart, i, "start recv" )
        rdata = mpi.recv( 10000, mpi.MPI_INT, 0, i, mpi.MPI_COMM_WORLD )
        mpe.log_event( recvEventEnd, i, "end recv" )
    if( i == 50 ):
        mpe.log_event( sleepEventStart, i, "start sleep" )
        time.sleep(1)
        mpi.barrier( mpi.MPI_COMM_WORLD )
        mpe.log_event( sleepEventEnd, i, "end sleep")

mpe.log_event( runEventEnd, rank, "stopping run")
mpe.finish_log("test1")
mpi.finalize()
 
    # initialize list of image rows
    image = []
    for i in range(ny):
        image.append(-1)
 
    # get all workers started on tasks
    for n in range(1, mpi.size):
        mpi.send(next_row_to_assign, n)
        next_row_to_assign += 1
        workers_running += 1
 
    # master's main loop:
    while workers_running > 0:
        # receive computed result from any worker
        result, status = mpi.recv(mpi.ANY_SOURCE)
        worker_id = status.source
        row_completed, row_data = result
 
        jobs_completed_by_worker[worker_id] += 1
 
        # incorporate newly computed next_row_to_assign into image data
        image[row_completed] = row_data
 
        if next_row_to_assign < ny:
            # send new work unit to the (now) idle worker
            mpi.send(next_row_to_assign, worker_id) 
            next_row_to_assign += 1
        else:
            # use -1 as the row number to signal all done
            mpi.send(-1, worker_id)
Exemplo n.º 35
0
import mpi

if mpi.rank == 0:
    for i in range(1, mpi.size):
        print mpi.recv()[0]
else:
    mpi.send("Hello from process " + str(mpi.rank), 0)
Exemplo n.º 36
0
"""
Shape is not retained between sender and receiver.

This really should be fixed.
"""
import Numeric
import mpi
A = Numeric.ones( (3,4), 'i' )
rank,size = mpi.init()
if (rank == 0:
    mpi.isend( A, (3*4), mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    #[ valid send request#: -1409286144, count: 12, datatype: 1275069445, destination: 0, tag: 0, comm: [communicator#:1140850688,size:1] ]
    B = mpi.recv( (3*4), mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    # array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],'i')
else:
    pass

mpi.finalize()
    # initialize list of image rows
    image = []
    for i in range(ny):
        image.append(-1)

    # get all workers started on tasks
    for n in range(1, mpi.size):
        mpi.send(next_row_to_assign, n)
        next_row_to_assign += 1
        workers_running += 1

    # master's main loop:
    while workers_running > 0:
        # receive computed result from any worker
        result, status = mpi.recv(mpi.ANY_SOURCE)
        worker_id = status.source
        row_completed, row_data = result

        jobs_completed_by_worker[worker_id] += 1

        # incorporate newly computed next_row_to_assign into image data
        image[row_completed] = row_data

        if next_row_to_assign < ny:
            # send new work unit to the (now) idle worker
            mpi.send(next_row_to_assign, worker_id)
            next_row_to_assign += 1
        else:
            # use -1 as the row number to signal all done
            mpi.send(-1, worker_id)
Exemplo n.º 38
0
# Run various size messages
for power in range(k):
     mpi.barrier()
     n = 2 ** power
     msg = 'a'*n

     python = []
     c = []
     # I am a sender
     if mpi.rank in sendmap.keys():
          dst = sendmap[mpi.rank]
          for run in range(runs):
               mpi.barrier()
               t0 = time()
               x = mpi.send(msg,dst)
               y,status = mpi.recv(dst)
               python.append( time()-t0 )
               del t0
               del x

               mpi.barrier()
               t0 = time()
               x = pingpong.send(n,dst)
               y = pingpong.recv(n,dst)
               c.append( time()-t0 )

               del t0
               del x
               del y
               
Exemplo n.º 39
0
    me = mpi.rank
    npes = mpi.procs
except ImportError:
    me = 0
    npes = 0

if npes > 0: lparallel = 1
else: lparallel = 0

# --- The interface has changed some in the newest version of pyMPI.
# --- Check the interface to the mpi.recv command. The newer versions
# --- return a tuple instead of just the data itself.
# --- Is there a better way of doing this?
if lparallel:
    mpi.send(me, me)
    _i = mpi.recv(me)
    if type(_i) == TupleType: _newpympi = 1
    else: _newpympi = 0
else:
    _newpympi = 1

if _newpympi:

    def mpirecv(pe=0, ms=0):
        result, stat = mpi.recv(pe, ms)
        return result
else:

    def mpirecv(pe=0, ms=0):
        return mpi.recv(pe, ms)
Exemplo n.º 40
0
def main():
	if sys.argv[-1].startswith("usefs="): sys.argv = sys.argv[:-1]	# remove the runpar fileserver info

	(options,args) =  parse_command_line()
	
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.appinit(sys.argv)

	inputParm = EMAN.ccmlInputParm()
	sf = EMAN.XYData()
	if options.sfFileName != "" :
		readsf = sf.readFile(options.sfFileName)
		if ((readsf == -1) and (options.verbose > 0)) :
			print "The file of scattering factor does NOT exist"
	inputParm.scateringFactor = sf

	startNumOfRawImages = options.startNumOfRawImages
	#endNumOfRawImages = options.endNumOfRawImages

	refImageFileName = args[-1]
	numOfRefImages = options.numOfRefImages
	solutionFile = options.solutionFile

	# write log info to .emanlog file so that eman program can browse the history
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): 
		pid = EMAN.LOGbegin(sys.argv)
		for f in args[0:-1]: EMAN.LOGInfile(pid,f)
		EMAN.LOGReffile(pid,args[-1])
		if options.solutionFile: EMAN.LOGOutfile(pid,options.solutionFile)
		if options.listFile: EMAN.LOGOutfile(pid,options.listFile)
		if options.mrcSolutionFile: EMAN.LOGOutfile(pid,options.mrcSolutionFile)

	inputParm.sym = options.sym
	inputParm.FFTOverSampleScale = options.FFTOverSampleScale
	inputParm.pftStepSize = options.pftStepSize
	inputParm.deltaR = options.deltaR
	inputParm.RMin = options.RMin
	inputParm.RMax = options.RMax
	inputParm.searchMode = options.searchMode
	inputParm.scalingMode = options.scalingMode
	inputParm.residualMode = options.residualMode
	inputParm.weightMode = options.weightMode
	# inputParm.rawImageFN will be set later
	inputParm.refImagesFN = refImageFileName
	inputParm.rawImageIniParmFN = options.rawImageIniParmFN
	inputParm.rawImagePhaseCorrected = options.phasecorrected

	inputParm.maxNumOfRun = options.maxNumOfRun
	inputParm.zScoreCriterion = options.zScoreCriterion
	inputParm.residualCriterion = options.residualCriterion
	inputParm.solutionCenterDiffCriterion = options.solutionCenterDiffCriterion
	inputParm.solutionOrientationDiffCriterion = options.solutionOrientationDiffCriterion/180.0*pi
	inputParm.maxNumOfIteration = options.maxNumOfIteration
	inputParm.numOfRandomJump = options.numOfRandomJump
	inputParm.numOfFastShrink = options.numOfFastShrink
	inputParm.numOfStartConfigurations = options.numOfStartConfigurations
	inputParm.orientationSearchRange = options.orientationSearchRange/180.0*pi
	inputParm.centerSearchRange = options.centerSearchRange

	inputParm.numOfRefImages = options.numOfRefImages
	inputParm.refEulerConvention = options.refEulerConvention
	#maskR = options.maskR
	#if (maskR<=0): maskR = refImageSizeY/2

	inputParm.verbose = options.verbose
	verbose = options.verbose
	#verboseSolution = options.verboseSolution

	updataHeader = options.updataHeader
	solutionFile = options.solutionFile
	mrcSolutionFile = options.mrcSolutionFile
	iniCenterOrientationMode = options.iniCenterOrientationMode
	refCenterOrientationMode = options.refCenterOrientationMode

	rawImages = []
	if not mpi or (mpi and mpi.rank==0):
		for imgfile in args[0:-1]:
			imgnum = EMAN.fileCount(imgfile)[0]
			for i in range(imgnum): rawImages.append((imgfile, i))
	if mpi: rawImages = mpi.bcast(rawImages)
	
	endNumOfRawImages = options.endNumOfRawImages
	if endNumOfRawImages <=0  or endNumOfRawImages > len(rawImages):
		endNumOfRawImages = len(rawImages)

	numRawImages = endNumOfRawImages - startNumOfRawImages

	if mpi:
		ptclset = range(startNumOfRawImages + mpi.rank, endNumOfRawImages, mpi.size)
	else:
		ptclset = range(startNumOfRawImages, endNumOfRawImages)
	
	solutions = []

	rMask = options.rMask        #mask size is given
	if options.rMask <= 0 : rMask = refImageSizeY/2   #mask size = half image size
	
	rMask1 = options.rMask1             #output tnf mask size is given
	if options.rMask1 <= 0 : rMask1 = rMask    #output tnf mask size = half image size

	inputParm.rMask = rMask
	inputParm.rMask1 = rMask1

	rawImage = EMAN.EMData()
	rawImage.getEuler().setSym(inputParm.sym) #set the symmetry of the raw partile
	inputParm.rawImageFN = rawImages[0][0] #give the initial raw particle filename
	print "start to prepare------"
	rawImage.crossCommonLineSearchPrepare(inputParm) #prepare, create pseudo PFT of ref images
	print "end to prepare------"
	inputParm.rawImage = rawImage
	#for rawImgSN in ptclset:
	for index in range(len(ptclset)):
		rawImgSN = ptclset[index]
		inputParm.rawImageFN = rawImages[rawImgSN][0]
		inputParm.thisRawImageSN = rawImages[rawImgSN][1]
		if mpi: print "rank %d: %d in %d-%d (%d in %d-%d)" % (mpi.rank, rawImgSN, startNumOfRawImages, endNumOfRawImages, index, 0, len(ptclset))
		#rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1])

		#rawImage.applyMask(rMask, 6) #apply mask type 6 [edge mean value] to raw image, center will be image center
		#rawImage.getEuler().setSym("icos")
		#if rawImage.hasCTF() == 1:
			#ctfParm = rawImage.getCTF()
			#inputParm.zScoreCriterion = options.zScoreCriterion + atan(abs(ctfParm[0])-1.5)/(pi/4) +0.59 #adjust zScore criterion -0.6 --> +1.2, 1.5, 2.0
			#inputParm.numOfRefImages = int(min(numOfRefImages, max(numOfRefImages*exp(-(abs(ctfParm[0])/2.0-0.15))+0.5, 5.0))) # adjust maxNumOfRun, the min is 2

		inputParm.thisRawImageSN = rawImgSN

		solutionCenterDiffCriterion = inputParm.solutionCenterDiffCriterion
		solutionOrientationDiffCriterion = inputParm.solutionOrientationDiffCriterion

		#initialize Center And Orientation by ont of the following modes

		if iniCenterOrientationMode == "iniparmfile" :
			inputParm.initializeCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif iniCenterOrientationMode == "headerfile" :
			inputParm.initializeCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else :
			inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center

		#set the refence Center And Orientation by ont of the following modes

		if refCenterOrientationMode == "iniparmfile" : inputParm.setRefCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif refCenterOrientationMode == "headerfile" : inputParm.setRefCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else : inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

		rawImage.crossCommonLineSearchReadRawParticle(inputParm) #create pseudo PFT of raw image

		maxNumOfRun = inputParm.maxNumOfRun
		outputParmList = []
		numOfRun = 0
		passAllConsistencyCriteria = 0
		while (numOfRun < maxNumOfRun) or (len(outputParmList) < 2):

			if (iniCenterOrientationMode != "iniparmfile") and (iniCenterOrientationMode != "headerfile") :
				inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center
			if (refCenterOrientationMode != "iniparmfile") and (refCenterOrientationMode != "headerfile") :
				inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

			numOfRun = numOfRun + 1
			print "numOfRun = ", numOfRun

			############################################################################
			############ execute cross common line search for reference ################
			############################################################################
			outputParm  = rawImage.crossCommonLineSearch(inputParm)
			############################################################################
			# pass criteria check
			outputParmList.append(outputParm) #if passed criteria, e.g. zscore, residualThreshold, etc
			############################################################################

			outputParmList.sort(lambda x, y: cmp(x.residual, y.residual))

			############################################################################
			########################## consistency check ###############################
			############################################################################
			#passConsistencyCriteria = 0
			finalOutputParmList = []
			lowestResidualList = []
			lengthOfList = len(outputParmList)
			if lengthOfList < 2 : continue
			for i in range(lengthOfList-1):
				thisOutputParm = outputParmList[i]
				numOfPairsPassConsistencyCheck = 0
				for j in range(i+1,lengthOfList):
					refOutputParm = outputParmList[j]
					tmpOutputParm = EMAN.ccmlOutputParm() #create a new output parm object
					tmpOutputParm.rawImageSN = thisOutputParm.rawImageSN #copy all paramenters
					tmpOutputParm.residual = thisOutputParm.residual
					tmpOutputParm.sigma = thisOutputParm.sigma
					tmpOutputParm.verbose = thisOutputParm.verbose
					tmpOutputParm.zScore = thisOutputParm.zScore
					tmpOutputParm.zScoreCriterion = thisOutputParm.zScoreCriterion

					tmpOutputParm.passAllCriteria = 0
					tmpOutputParm.setCalculatedCenterAndOrientation(thisOutputParm.cx,thisOutputParm.cy,thisOutputParm.q)
					tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q)
					tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference

					centerDiff = tmpOutputParm.centerDiff
					orientationDiff = tmpOutputParm.orientationDiff
					
					#####  FLIP CASE :  if no consistency found, try flip this orientation
					if ((centerDiff > solutionCenterDiffCriterion) or (orientationDiff > solutionOrientationDiffCriterion)) :
						quatFlip = EMAN.Quaternion(refOutputParm.q.getEuler().alt(), refOutputParm.q.getEuler().az(), refOutputParm.q.getEuler().phi()+pi)
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, quatFlip)
						tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference
						centerDiff = tmpOutputParm.centerDiff
						orientationDiff = tmpOutputParm.orientationDiff
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q) #set back the exact orientation of reference 

					#Save the configurations with lowest residuals
					if (i<3) and (j==i+1) : lowestResidualList.append(tmpOutputParm)
					
					#make the good/answers list
					if ((centerDiff < solutionCenterDiffCriterion) and (orientationDiff < solutionOrientationDiffCriterion)) :
						numOfPairsPassConsistencyCheck += 1
						if numOfPairsPassConsistencyCheck == 1 : #save to the final list
							tmpOutputParm.passAllCriteria = 1
							finalOutputParmList.append(tmpOutputParm)
						if i==0 and numOfPairsPassConsistencyCheck >= options.numConsistentRun: #if the first one, check whether it has 3 pair of consistencies
							passAllConsistencyCriteria = 1
							break
						if i>0 : break #if not the first one, find one pair of consistency, then break
				
				#no break here, just for saving all possible solutions

			if passAllConsistencyCriteria and len(finalOutputParmList) >= options.numConsistentRun: break #if 3 consistent pair orientations were found, then stop


		rawImage.crossCommonLineSearchReleaseParticle(inputParm) # release the memory related to this raw particle

		# if no consistency found, keep the lowest ones as output
		if len(finalOutputParmList) == 0 : finalOutputParmList = lowestResidualList
		for i in range(len(finalOutputParmList)) : 
			if passAllConsistencyCriteria : finalOutputParmList[i].passAllCriteria = 1
			else : finalOutputParmList[i].passAllCriteria = 0

		if options.solutionFile:
			for i in range(len(finalOutputParmList)) : finalOutputParmList[i].outputResult(solutionFile)

		outputParm = finalOutputParmList[0] #just use the lowest residual as regular output
		if outputParm.passAllCriteria: 	passfail = "pass"
		else: passfail = "fail"

		print "Final result: euler=%g\t%g\t%g\tcenter=%g\t%g\tresidue=%g\t%s" % (outputParm.alt*180/pi, outputParm.az*180/pi, outputParm.phi*180/pi, outputParm.cx, outputParm.cy, outputParm.residual, passfail)
		
		if options.scoreFile:
			rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1], 1) # read header only
			if rawImage.hasCTF(): 
				defocus = rawImage.getCTF()[0]
		else:
			defocus = 0

		solution = (rawImages[rawImgSN][0], rawImages[rawImgSN][1], outputParm.alt, outputParm.az, outputParm.phi, \
					   outputParm.cx, outputParm.cy, defocus, outputParm.residual, outputParm.passAllCriteria)
		solutions.append( solution )

		sys.stdout.flush()

	rawImage.crossCommonLineSearchFinalize(inputParm) #finalize, i.e. delete memories

	if mpi:
		if options.verbose: 
			print "rank %d: done and ready to output" % (mpi.rank)
			sys.stdout.flush()
		mpi.barrier()
		#print "rank %d: %s" % (mpi.rank, solutions)
		if mpi.rank==0:
			for r in range(1,mpi.size):
				msg, status = mpi.recv(source = r, tag = r)
				solutions += msg
			def ptcl_cmp(x, y):
				eq = cmp(x[0], y[0])
				if not eq: return cmp(x[1],y[1])
				else: return eq
			solutions.sort(ptcl_cmp)
		else:
			mpi.send(solutions, 0, tag = mpi.rank)

	if not mpi or (mpi and mpi.rank==0):
		if options.scoreFile:
			sFile = open(options.scoreFile, "w")
			sFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					sFile.write("%d\t%s\tdefocus=%g\tresidual=%g\n" % (i[1], i[0], i[7], i[8]))
			sFile.close()
			
		if options.listFile:
			lFile = open(options.listFile, "w")
			lFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					lFile.write("%d\t%s\t%g\t%g\t%g\t%g\t%g\n" % (i[1], i[0], i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6]))
			lFile.close()
		if options.mrcSolutionFile:
			outFile = open(options.mrcSolutionFile, "w")
			for i in solutions:
				if i[-1]:
					#rawImage.readImage(i[0], i[1], 1)
					rawImage.readImage(i[0], i[1])
					thisEu = EMAN.Euler(i[2], i[3], i[4])
					thisEu.convertToMRCAngle()
					alt = thisEu.alt_MRC()*180.0/pi
					az  = thisEu.az_MRC()*180.0/pi
					phi = thisEu.phi_MRC()*180.0/pi
		
					cx  = i[5]
					cy  = i[6]
					dx = cx - rawImage.xSize()/2
					dy = cy - rawImage.ySize()/2
					rawImage.applyMask(rMask1,6,dx,dy,0) #apply mask type 4 [outside=0] to raw image, center will be the solved center
					#tnfFileName = "%s-%d.tnf" % (os.path.basename(os.path.splitext(rawImages[rawImgSN][0])[0]), rawImages[rawImgSN][1])
					prefix = os.path.dirname(options.mrcSolutionFile).replace(" ", "")
					if prefix != "" : prefix = prefix + "/"
					tnfFileName = "%s%s-%d.tnf" % (prefix,os.path.basename(os.path.splitext(i[0])[0]), i[1])
					rawFFT = rawImage.doFFT()
					rawFFT.writeImage(tnfFileName,0)  #tnf file no header information, it is a pure FFT of raw image file
		
					outFile.write("%s\n" % (os.path.abspath(tnfFileName)))
					outFile.write(" %d, %.4f, %.4f, %.4f, %.4f, %.4f, 0.0\n" % (0, alt, az, phi, cy, cx))
			outFile.close()
		if updataHeader:
			for i in solutions:
				rawImage.readImage(i[0], i[1], 1)
				if options.verbose:
					cx  = rawImage.get_center_x()
					cy  = rawImage.get_center_y()
					alt = rawImage.alt()
					az  = rawImage.az()
					phi = rawImage.phi()
					print "Update header: %s %d\t%7.5f  %7.5f  %7.2f  %7.2f  %7.2f => %7.5f  %7.5f  %7.2f  %7.2f  %7.2f" % \
						(i[0], i[1], alt*180.0/pi, az*180.0/pi, phi*180.0/pi, cx, cy, i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6])
				rawImage.setRAlign(i[2], i[3], i[4])
				rawImage.set_center_x(i[5])
				rawImage.set_center_y(i[6])
				imgtype = EMAN.EMData.ANY
				rawImage.writeImage(i[0], i[1], imgtype, 1)
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.LOGend()
Exemplo n.º 41
0
 def mpirecv(pe=0, ms=0):
     result, stat = mpi.recv(pe, ms)
     return result
Exemplo n.º 42
0
import mpi
import sys, Numeric

print "Creating Data Array..."
data = Numeric.array( [1,2,3,4], Numeric.Int32 )

print "Initializing MPI: (%s,%s)"%(len(sys.argv),sys.argv)
rank, size = mpi.init( len(sys.argv), sys.argv )
print "(%s,%s): initialized..." %(rank,size)

if( rank == 0 ):
    print "(%s,%s): sending: %s" %( rank, size, data )
    request = mpi.isend( data, 4, mpi.MPI_INT, 1, 0, mpi.MPI_COMM_WORLD )
    print "(%s,%s): request#: %s" %( rank, size, request )
    data2 = Numeric.array([ -1, -1, -1, -1 ], Numeric.Int32 )
elif(rank == 1):
    print "(%s,%s): receiving..." %(rank,size)
    data2 = mpi.recv( 4, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
else:
    pass

print "(%s,%s): received: %s" % ( rank, size, data2 )

mpi.finalize()
Exemplo n.º 43
0
def main():
    EMAN.appinit(sys.argv)
    if sys.argv[-1].startswith("usefs="):
        sys.argv = sys.argv[:-1]  # remove the runpar fileserver info

    (options, rawimage, refmap) = parse_command_line()

    sffile = options.sffile
    verbose = options.verbose
    shrink = options.shrink
    mask = options.mask
    first = options.first
    last = options.last
    scorefunc = options.scorefunc

    projfile = options.projection
    output_ptcls = options.update_rawimage
    cmplstfile = options.cmplstfile
    ortlstfile = options.ortlstfile
    startSym = options.startSym
    endSym = options.endSym

    if not options.nocmdlog:
        pid = EMAN.LOGbegin(sys.argv)
        EMAN.LOGInfile(pid, rawimage)
        EMAN.LOGInfile(pid, refmap)
        if projfile:
            EMAN.LOGOutfile(pid, projfile)
        if output_ptcls:
            EMAN.LOGOutfile(pid, output_ptcls)
        if cmplstfile:
            EMAN.LOGOutfile(pid, cmplstfile)
        if ortlstfile:
            EMAN.LOGOutfile(pid, ortlstfile)

    ptcls = []
    if not (mpi or pypar) or ((mpi and mpi.rank == 0) or (pypar and pypar.rank == 0)):
        ptcls = EMAN.image2list(rawimage)
        ptcls = ptcls[first:last]

        print "Read %d particle parameters" % (len(ptcls))
        # ptcls = ptcls[0:10]

    if mpi and mpi.size > 1:
        ptcls = mpi.bcast(ptcls)
        print "rank=%d\t%d particles" % (mpi.rank, len(ptcls))
    elif pypar and pypar.size() > 1:
        ptcls = pypar.broadcast(ptcls)
        print "rank=%d\t%d particles" % (pypar.rank(), len(ptcls))

    if sffile:
        sf = EMAN.XYData()
        sf.readFile(sffile)
        sf.logy()

    if not mpi or ((mpi and mpi.rank == 0) or (pypar and pypar.rank() == 0)):
        if cmplstfile and projfile:
            if output_ptcls:
                raw_tmp = output_ptcls
            else:
                raw_tmp = rawimage
            raw_tmp = rawimage
            fp = open("tmp-" + cmplstfile, "w")
            fp.write("#LST\n")
            for i in range(len(ptcls)):
                fp.write("%d\t%s\n" % (first + i, projfile))
                fp.write("%d\t%s\n" % (first + i, raw_tmp))
            fp.close()
        if (mpi and mpi.size > 1 and mpi.rank == 0) or (pypar and pypar.size() > 1 and pypar.rank() == 0):
            total_recv = 0
            if output_ptcls:
                total_recv += len(ptcls)
            if projfile:
                total_recv += len(ptcls)
            for r in range(total_recv):
                # print "before recv from %d" % (r)
                if mpi:
                    msg, status = mpi.recv()
                else:
                    msg = pypar.receive(r)
                    # print "after recv from %d" % (r)
                    # print msg, status
                d = emdata_load(msg[0])
                fname = msg[1]
                index = msg[2]
                d.writeImage(fname, index)
                print "wrtie %s %d" % (fname, index)
            if options.ortlstfile:
                solutions = []
                for r in range(1, mpi.size):
                    msg, status = mpi.recv(source=r, tag=r)
                    solutions += msg

                def ptcl_cmp(x, y):
                    eq = cmp(x[0], y[0])
                    if not eq:
                        return cmp(x[1], y[1])
                    else:
                        return eq

                solutions.sort(ptcl_cmp)
    if (not mpi or (mpi and ((mpi.size > 1 and mpi.rank > 0) or mpi.size == 1))) or (
        not pypar or (pypar and ((pypar.size() > 1 and pypar.rank() > 0) or pypar.size() == 1))
    ):
        map3d = EMAN.EMData()
        map3d.readImage(refmap, -1)
        map3d.normalize()
        if shrink > 1:
            map3d.meanShrink(shrink)
        map3d.realFilter(0, 0)  # threshold, remove negative pixels

        imgsize = map3d.ySize()

        img = EMAN.EMData()

        ctffilter = EMAN.EMData()
        ctffilter.setSize(imgsize + 2, imgsize, 1)
        ctffilter.setComplex(1)
        ctffilter.setRI(1)

        if (mpi and mpi.size > 1) or (pypar and pypar.size() > 1):
            ptclset = range(mpi.rank - 1, len(ptcls), mpi.size - 1)
        else:
            ptclset = range(0, len(ptcls))

        if mpi:
            print "Process %d/%d: %d/%d particles" % (mpi.rank, mpi.size, len(ptclset), len(ptcls))

        solutions = []
        for i in ptclset:
            ptcl = ptcls[i]
            e = EMAN.Euler(ptcl[2], ptcl[3], ptcl[4])
            dx = ptcl[5] - imgsize / 2
            dy = ptcl[6] - imgsize / 2
            print "%d\talt,az,phi=%8g,%8g,%8g\tx,y=%8g,%8g" % (
                i + first,
                e.alt() * 180 / pi,
                e.az() * 180 / pi,
                e.phi() * 180 / pi,
                dx,
                dy,
            ),

            img.readImage(ptcl[0], ptcl[1])
            img.setTAlign(-dx, -dy, 0)
            img.setRAlign(0, 0, 0)
            img.rotateAndTranslate()  # now img is centered
            img.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
            if img.hasCTF():
                fft = img.doFFT()

                ctfparm = img.getCTF()
                ctffilter.setCTF(ctfparm)
                if options.phasecorrected:
                    if sffile:
                        ctffilter.ctfMap(64, sf)  # Wiener filter with 1/CTF (no sign) correction
                else:
                    if sffile:
                        ctffilter.ctfMap(32, sf)  # Wiener filter with 1/CTF (including sign) correction
                    else:
                        ctffilter.ctfMap(2, EMAN.XYData())  # flip phase

                fft.mult(ctffilter)
                img2 = fft.doIFT()  # now img2 is the CTF-corrected raw image

                img.gimmeFFT()
                del fft
            else:
                img2 = img

            img2.normalize()
            if shrink > 1:
                img2.meanShrink(shrink)
            # if sffile:
            # 	snrcurve = img2.ctfCurve(9, sf)	# absolute SNR
            # else:
            # 	snrcurve = img2.ctfCurve(3, EMAN.XYData())		# relative SNR

            e.setSym(startSym)
            maxscore = -1e30  # the larger the better
            scores = []
            for s in range(e.getMaxSymEl()):
                ef = e.SymN(s)
                # proj = map3d.project3d(ef.alt(), ef.az(), ef.phi(), -6)		# Wen's direct 2D accumulation projection
                proj = map3d.project3d(
                    ef.alt(), ef.az(), ef.phi(), -1
                )  # Pawel's fast projection, ~3 times faster than mode -6 with 216^3
                # don't use mode -4, it modifies its own data
                # proj2 = proj
                proj2 = proj.matchFilter(img2)
                proj2.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
                if scorefunc == "ncccmp":
                    score = proj2.ncccmp(img2)
                elif scorefunc == "lcmp":
                    score = -proj2.lcmp(img2)[0]
                elif scorefunc == "pcmp":
                    score = -proj2.pcmp(img2)
                elif scorefunc == "fsccmp":
                    score = proj2.fscmp(img2, [])
                elif scorefunc == "wfsccmp":
                    score = proj2.fscmp(img2, snrcurve)
                if score > maxscore:
                    maxscore = score
                    best_proj = proj2
                    best_ef = ef
                    best_s = s
                scores.append(score)
                # proj2.writeImage("proj-debug.img",s)
                # print "\tsym %2d/%2d: euler=%8g,%8g,%8g\tscore=%12.7g\tbest=%2d euler=%8g,%8g,%8g score=%12.7g\n" % \
                # 		   (s,60,ef.alt()*180/pi,ef.az()*180/pi,ef.phi()*180/pi,score,best_s,best_ef.alt()*180/pi,best_ef.az()*180/pi,best_ef.phi()*180/pi,maxscore)
            scores = Numeric.array(scores)
            print "\tbest=%2d euler=%8g,%8g,%8g max score=%12.7g\tmean=%12.7g\tmedian=%12.7g\tmin=%12.7g\n" % (
                best_s,
                best_ef.alt() * 180 / pi,
                best_ef.az() * 180 / pi,
                best_ef.phi() * 180 / pi,
                maxscore,
                MLab.mean(scores),
                MLab.median(scores),
                MLab.min(scores),
            )
            if projfile:
                best_proj.setTAlign(dx, dy, 0)
                best_proj.setRAlign(0, 0, 0)
                best_proj.rotateAndTranslate()

                best_proj.set_center_x(ptcl[5])
                best_proj.set_center_y(ptcl[6])
                best_proj.setRAlign(best_ef)
                # print "before proj send from %d" % (mpi.rank)

                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(best_proj), projfile, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(best_proj), projfile, i + first), 0)
                # print "after proj send from %d" % (mpi.rank)
                else:
                    best_proj.writeImage(projfile, i + first)

            img2.setTAlign(0, 0, 0)
            img2.setRAlign(best_ef)
            img2.setNImg(1)
            # print "before raw send from %d" % (mpi.rank)
            if output_ptcls:
                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(img2), output_ptcls, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(img2), output_ptcls, i + first), 0)
                # print "after raw send from %d" % (mpi.rank)
                else:
                    img2.writeImage(output_ptcls, i + first)

            solutions.append((ptcl[0], ptcl[1], best_ef.alt(), best_ef.az(), best_ef.phi(), ptcl[5], ptcl[6]))
        if mpi and (mpi.size > 1 and mpi.rank > 0):
            mpi.send(solutions, 0, tag=mpi.rank)

    if mpi:
        mpi.barrier()
    elif pypar:
        pypar.barrier()
    if mpi:
        mpi.finalize()
    elif pypar:
        pypar.finalize()

    if options.cmplstfile:
        os.rename("tmp-" + cmplstfile, cmplstfile)
    if options.ortlstfile:
        lFile = open(options.ortlstfile, "w")
        lFile.write("#LST\n")
        for i in solutions:
            lFile.write(
                "%d\t%s\t%g\t%g\t%g\t%g\t%g\n"
                % (i[1], i[0], i[2] * 180.0 / pi, i[3] * 180.0 / pi, i[4] * 180.0 / pi, i[5], i[6])
            )
        lFile.close()

    if not options.nocmdlog:
        EMAN.LOGend()
Exemplo n.º 44
0
import mpi

if mpi.rank == 0:
        for i in range(1,mpi.size):
                print mpi.recv()[0]
else:
        mpi.send("Hello from process " + str(mpi.rank),0)
Exemplo n.º 45
0
# elif mpi.rank == 2:
#     m, rc = mpi.recv(0)
#     print 'rank 2 received ', m
# mpi.barrier()

print('')

for j in range(3):

    # Running neural networks from Ursula Directory
    # depending on what iteration j is on
    if mpi.rank == 0:
        #for every child core, a result is received
        for i in range(1, mpi.size):
            #result is called 's'
            s, rc = mpi.recv(i)
            print 'received ', s, 'from rank ', i
            #saving what is received to array
            array[i] = s
    elif mpi.rank == 1:
        if (j != 0):  #if it is not the first time runnning
            print 'rank 1 running network '
            #    exec(open('./paths.sh').read())
            exec(
                open('./models/apple_stock_price_predictor_iterative1.py').
                read())
            # result needed is the Mean Squared Error to determine best network
            result = (np.mean(mse_test_loss_seq))
            print 'rank 1 sending result'
            # sending result to Master Core
            mpi.send(result, 0)
Exemplo n.º 46
0
import mpi

rank = mpi.rank
if rank == 0:
    data = {"a": 7, "b": 3.14}
    mpi.send(data, 1)
    print("Sending data from", rank, "data", data)
    data, status = mpi.recv(source=1)
    mpi.barrier()
    print("Receving data at", rank, "data", data)
elif rank == 1:
    data1 = {"a": 7, "b": "abc"}
    mpi.send(data1, 0)
    print("Sending data from", rank, "data", data1)
    data, status = mpi.recv(source=0)
    mpi.barrier()
    print("Recieving data at", rank, "data", data1)
Exemplo n.º 47
0
    def shuffleIntoBlocks(ndim, vals, xmin, xmax, nglobal):

        if ndim == 2:
            import Spheral2d as sph
        else:
            import Spheral3d as sph

        dx = [(xmax[j] - xmin[j]) / nglobal[j] for j in xrange(ndim)]
        ntot = reduce(mul, nglobal)

        # Which dimension should we divide up into?
        jsplit = min(ndim - 1, max(enumerate(nglobal), key=lambda x: x[1])[0])

        # Find the offset to the global lattice numbering on this domain.
        # This is based on knowing the native lattice sampling method stripes the original data
        # accoriding to (i + j*nx + k*nx*ny), and simply divides that 1D serialization sequentially
        # between processors.
        offset = 0
        for sendproc in xrange(mpi.procs):
            n = mpi.bcast(len(vals), root=sendproc)
            if sendproc < mpi.rank:
                offset += n
        if mpi.rank == mpi.procs - 1:
            assert offset + len(vals) == ntot

        # A function to turn an index into the integer lattice coordinates
        def latticeCoords(iglobal):
            return (iglobal % nglobal[0],
                    (iglobal % (nglobal[0] * nglobal[1])) // nglobal[0],
                    iglobal // (nglobal[0] * nglobal[1]))

        # A function to tell us which block to assign a global index to
        slabsperblock = max(1, nglobal[jsplit] // mpi.procs)
        remainder = max(0, nglobal[jsplit] - mpi.procs * slabsperblock)
        islabdomain = [
            min(nglobal[jsplit], iproc * slabsperblock + min(iproc, remainder))
            for iproc in xrange(mpi.procs + 1)
        ]

        #sys.stderr.write("Domain splitting: %s %i %s\n" % (nglobal, jsplit, islabdomain))
        #sys.stderr.write("islabdomain : %s\n" % str(islabdomain))
        def targetBlock(index):
            icoords = latticeCoords(offset + index)
            return bisect.bisect_right(islabdomain, icoords[jsplit]) - 1

        # Build a list of (global_index, value, target_proc) for each of the lattice values.
        id_val_procs = [(offset + i, val, targetBlock(i))
                        for i, val in enumerate(vals)]
        #sys.stderr.write("id_val_procs : %s\n" % str(id_val_procs))
        #sys.stderr.write("map index -> slab : %s\n" % str([(offset + i, latticeCoords(offset + i), targetBlock(i)) for i in xrange(len(vals))]))
        #sys.stderr.write("id_val_procs : %s\n" % str([(i, tb, latticeCoords(i)) for (i, val, tb) in id_val_procs if i % 100 < 10 and tb != 0]))

        # Send our values to other domains.
        sendreqs, sendvals = [], []
        for iproc in xrange(mpi.procs):
            if iproc != mpi.rank:
                sendvals.append([(i, val) for (i, val, proc) in id_val_procs
                                 if proc == iproc])
                sendreqs.append(mpi.isend(sendvals[-1], dest=iproc, tag=100))

        # Now we can build the dang result.
        xminblock, xmaxblock = sph.Vector(*xmin), sph.Vector(*xmax)
        xminblock[jsplit] = xmin[jsplit] + islabdomain[mpi.rank] * dx[jsplit]
        xmaxblock[jsplit] = xmin[jsplit] + islabdomain[mpi.rank +
                                                       1] * dx[jsplit]
        nblock = list(nglobal)
        nblock[jsplit] = islabdomain[mpi.rank + 1] - islabdomain[mpi.rank]
        #sys.stderr.write("nblock : %s\n" % str(nblock))
        newvals = []
        for iproc in xrange(mpi.procs):
            if iproc == mpi.rank:
                recvvals = [(i, val) for (i, val, proc) in id_val_procs
                            if proc == mpi.rank]
            else:
                recvvals = mpi.recv(source=iproc, tag=100)[0]
            newvals += recvvals
        newvals.sort()
        valsblock = sph.vector_of_double()
        for i, val in newvals:
            valsblock.append(val)
        #sys.stderr.write("len(valsblock) = %s\n" % len(valsblock))
        assert len(valsblock) == reduce(mul, nblock)

        # Wait 'til all communication is done.
        for req in sendreqs:
            req.wait()

        # That should be it.
        return valsblock, xminblock, xmaxblock, nblock, jsplit
Exemplo n.º 48
0
 def run(self):
     for task in self.taskList:
         data, message = mpi.recv()
         source = message.source
         mpi.send( task, source)