示例#1
0
文件: max2.py 项目: steder/maroonmpi
def main():
    myrank, size = mpi.init()

    # split the problem in chunks

    if problemlength % size == 0:
        blocksize = problemlength / size
    else:
        print "Sorry, I don't know how to split up the problem, aborting!"
        mpi.finalize()
        
    if myrank == 0:
        data = range(1,problemlength + 1)  # create a toy dataset...
        random.shuffle(data)               # ...modifies data in place

        mydata = data[0:blocksize] # get some data for me...
                                   # and communicate the rest to slaves

        for host in range(1,size):
            hisdata = data[blocksize*host:blocksize*(host+1)]
            mpi.send(hisdata,blocksize,mpi.MPI_INT,host,0,mpi.MPI_COMM_WORLD)
    else:
        mydata = mpi.recv(blocksize,mpi.MPI_INT,0,0,mpi.MPI_COMM_WORLD)

    mymax = max(mydata)

    maximums = mpi.gather(mymax,1,mpi.MPI_INT, size, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD)

    if myrank == 0:
        mymax = max(maximums)
        print "The maximum value is:", mymax

    mpi.finalize()            
示例#2
0
    def dumpState(self, cacheFileName):
        def writeNodeData(f, iproc, pos, m, H, vol, surface):
            f.writeObject(pos, "proc%06i/pos" % iproc)
            f.writeObject(m, "proc%06i/m" % iproc)
            f.writeObject(H, "proc%06i/H" % iproc)
            f.writeObject(vol, "proc%06i/vol" % iproc)
            f.writeObject(surface, "proc%06i/surface" % iproc)
            return

        if os.path.splitext(cacheFileName) != ".silo":
            cacheFileName += ".silo"
        if mpi.rank == 0:
            dire = os.path.dirname(cacheFileName)
            if dire and not os.path.exists(dire):
                os.makedirs(dire)
            f = SiloFileIO(cacheFileName, Create)
            f.writeObject(mpi.procs, "numGeneratingProcs")
            writeNodeData(f, 0, self.pos, self.m, self.H, self.vol, self.surface)
            for iproc in xrange(1, mpi.procs):
                pos, m, H, vol, surface = mpi.recv(source=iproc)[0]
                writeNodeData(f, iproc, pos, m, H, vol, surface)
            f.close()
        else:
            mpi.send((self.pos, self.m, self.H, self.vol, self.surface), dest=0)
        mpi.barrier()
        return
示例#3
0
文件: pypar.py 项目: uniomni/pypar
def send_vanilla(x, destination, tag=0):
    from cPickle import dumps
    from mpi import send_string as send

    s = dumps(x, 1)
    send(s, destination, tag)
    return len(s)
示例#4
0
文件: parallel.py 项目: umansky/UEDGE
def getarray(src, v, dest=0):
    if not lparallel: return v
    if mpi.rank == src:
        mpi.send(v, dest)
    elif mpi.rank == dest:
        return mpirecv(src)
    return v
示例#5
0
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0, "Foo", "goo"]
        for x in range(90):
            myBigData = [x + 1, x * x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1) % mpi.procs
        frm = (mpi.rank - 1 + mpi.procs) % mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend(myBigData, to, 0)
        sendHandle2 = mpi.isend(mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm, 0)
        msgReceived2, status = mpi.recv(frm, 1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank == 0:

            #Change the data we're sending just for the heck of it
            myBigData[0] = ("changed")
            myBigData[1] = "Also changed"
            mySmallData = ("Hi", mpi.rank)

            #Perform 2 blocking sends to send the data
            mpi.send(myBigData, 1, 1)
            mpi.send(mySmallData, 1, 2)

        elif mpi.rank == 1:

            #Get recv handles for the two messages
            recvHandle1 = mpi.irecv(0, 1)
            recvHandle2 = mpi.irecv(0, 2)
            finished = [0, 0]

            #Loop until both messages come in
            while finished[0] == 0 and finished[1] == 0:
                if finished[0] == 0:
                    finished[0] = recvHandle1.test()
                if finished[1] == 0:
                    finished[1] = recvHandle2.test()

            #We got the messages, now check them
            if recvHandle1.message != myBigData:
                self.fail("Complex non-block failed on 2nd test with big data")
            if recvHandle2.message != ("Hi", 0):
                self.fail(
                    "Complex non-block failed on 2nd test with small data")

        return
示例#6
0
文件: PyMPITest.py 项目: LLNL/pynamic
    def parallelRunTest(self):
        if mpi.procs < 2:
            self.fail("This test needs at least 2 processes to run")

        mySmallData = "Hello from " + str(mpi.rank)
        myBigData = [0,"Foo", "goo"]
        for x in range(90):
          myBigData = [x+1,x*x, 12.4, ("c", "a"), myBigData]

        to = (mpi.rank + 1)%mpi.procs
        frm = (mpi.rank-1+mpi.procs)%mpi.procs

        #First we send asynchronously and receive synchronously
        sendHandle1 = mpi.isend( myBigData,   to, 0)
        sendHandle2 = mpi.isend( mySmallData, to, 1)
        msgReceived1, status = mpi.recv(frm,0)
        msgReceived2, status = mpi.recv(frm,1)

        #Check for failures
        if msgReceived1 != myBigData:
            self.fail("Complex NonBlock failed on first test with big data")
        if msgReceived2 != "Hello from " + str(frm):
            self.fail("Complex NonBlock failed on first test with small data")

        #Next we will do a blocking send and a non-blocking receive
        if mpi.rank==0:

          #Change the data we're sending just for the heck of it
          myBigData[0] = ("changed")
          myBigData[1] = "Also changed"
          mySmallData = ("Hi", mpi.rank)

          #Perform 2 blocking sends to send the data
          mpi.send( myBigData, 1, 1 )
          mpi.send( mySmallData, 1, 2 )

        elif mpi.rank==1:

          #Get recv handles for the two messages
          recvHandle1 = mpi.irecv( 0,1)
          recvHandle2 = mpi.irecv( 0,2)
          finished = [0,0]

          #Loop until both messages come in
          while finished[0] == 0 and finished[1] == 0:
            if finished[0] == 0:
              finished[0] = recvHandle1.test()
            if finished[1] == 0:
              finished[1] = recvHandle2.test()

          #We got the messages, now check them
          if recvHandle1.message != myBigData:
            self.fail( "Complex non-block failed on 2nd test with big data")
          if recvHandle2.message != ("Hi", 0):
            self.fail( "Complex non-block failed on 2nd test with small data")

        return
示例#7
0
 def testBlockSend(self):
     for sendProc in xrange(mpi.procs):
         if mpi.rank == sendProc:
             for j in xrange(mpi.procs):
                 if j != mpi.rank:
                     obj = 10 * mpi.rank + 1
                     mpi.send(obj, dest=j, tag=100)
         else:
             obj = mpi.recv(sendProc, 100)[0]
             assert obj == 10 * sendProc + 1
示例#8
0
 def ping(self, msg):
     if mpi.procs < 2: return
     try:
         if mpi.rank == 0:
             received, status = mpi.recv(1)
             self.failUnless(received == msg, 'Bad message received')
         elif mpi.rank == 1:
             mpi.send(msg, 0)
     finally:
         mpi.barrier()
示例#9
0
文件: PyMPITest.py 项目: LLNL/pynamic
 def ping(self,msg):
     if mpi.procs < 2: return
     try:
         if mpi.rank == 0:
             received,status = mpi.recv(1)
             self.failUnless(received==msg,'Bad message received')
         elif mpi.rank == 1:
             mpi.send(msg,0)
     finally:
         mpi.barrier()
示例#10
0
文件: PyMPITest.py 项目: LLNL/pynamic
 def parallelRunTest(self):
     msg = "I am from proc %d"%mpi.rank
     if mpi.procs % 2 != 0:
         self.fail("Test needs even number of processes")
     if mpi.rank % 2 == 0:
        sendResult = mpi.send(msg,mpi.rank+1)
        recv,stat = mpi.recv()
     else:
        recv,stat = mpi.recv()
        sendResult = mpi.send(msg,mpi.rank-1)
     return
示例#11
0
def FillSpotlist(run, Numspots):
    global spotlist
    # Note sigmas and offset are in pixels, not microns.
    incfgfile = datadir+startdir+'bf.cfg'
    InConfigData = ReadConfigFile(incfgfile)
    # Postage Stamp size
    nx = InConfigData['PixelBoundaryNx']
    ny = InConfigData['PixelBoundaryNy']

    outputfiledir = InConfigData['outputfiledir']
    outputfilebase = InConfigData['outputfilebase']
    GridsPerPixel = InConfigData['GridsPerPixel'] * InConfigData['ScaleFactor']
    PixelSize = InConfigData['PixelSize']
    ChannelStopWidth = InConfigData['ChannelStopWidth']
    cspixels = int(ChannelStopWidth / PixelSize * float(GridsPerPixel / 2)) + 1
    stampxmin = -(int(nx/2)+0.5)
    stampxmax = -stampxmin
    stampymin = -(int(ny/2)+0.5)
    stampymax = -stampymin

    spotlist = Array2dSet(stampxmin,stampxmax,nx,stampymin,stampymax,ny,Numspots-1)

    dirbase = outputfiledir.split('bfrun')
    
    for spot in range(Numspots-1): 
        spotrun = spot + 1 # Don't include run 0 because it's different
        dat = Array3dHDF5Elec(dirbase[0]+'bfrun_%d'%spotrun, outputfilebase, run)
        cfgfile = dirbase[0]+'bfrun_%d'%spotrun+'/bf.cfg'
        ConfigData = ReadConfigFile(cfgfile)

        spotlist.xoffset[spot] = ConfigData['Xoffset'] / ConfigData['PixelSize']
        spotlist.yoffset[spot] = ConfigData['Yoffset'] / ConfigData['PixelSize']

        for i in range(nx):
            nxmin = ((ConfigData['PixelBoundaryLowerLeft'][0] - dat.xmin) / dat.dx) + GridsPerPixel * i
            nxmax = nxmin + GridsPerPixel
            for j in range(ny):
                nymin = ((ConfigData['PixelBoundaryLowerLeft'][1] - dat.ymin) / dat.dy) + GridsPerPixel * j
                nymax = nymin + GridsPerPixel
                electrons_in_pixel = dat.elec[(nxmin+cspixels):(nxmax-cspixels),nymin:nymax,:].sum()
                #print "i = %d, j = %d, nxmin = %d, nymin = %d, electron = %d"%(i,j,nxmin,nymin,electrons_in_pixel)
                spotlist.data[i,j,spot] = electrons_in_pixel

    param0 = [1.00, 1.00]
    args = ()
    Result = fmin_powell(FOM, param0, args)
    
    imax = spotlist.imax.mean()
    ADU_correction = Area(-0.5,0.5,-0.5,0.5,Result[0],Result[1],1.0)

    spotdata = [run, Result[0], Result[1], imax * ADU_correction]
    print spotdata
    mpi.send(spotdata, Numspots - 1, tag = run)
    return
示例#12
0
 def run(self):
     while 1:
         print >> sys.stderr, "======= Node #%d waiting..." % mpi.rank
         try:
             mpi.send("request", 0)
             task, message = mpi.recv(0)
             print >> sys.stderr, "Node #%d received task! ---------------------------" % mpi.rank
             task.run()
         except:
             print >> sys.stderr, "======= Node %d done!" % mpi.rank
             break
示例#13
0
 def gatherVector(vec):
     for i in xrange(len(vec)):
         if mpi.rank == 0:
             for sendProc in xrange(1, mpi.procs):
                 vals = mpi.recv(sendProc)[0]
                 print "Received %i values from processor %i" % (len(vals), sendProc)
                 vec[i] += vals
         else:
             mpi.send(vec[i], 0)
         if mpi.rank == 0:
             assert len(vec[i]) == ntot
示例#14
0
 def parallelRunTest(self):
     msg = "I am from proc %d" % mpi.rank
     if mpi.procs % 2 != 0:
         self.fail("Test needs even number of processes")
     if mpi.rank % 2 == 0:
         sendResult = mpi.send(msg, mpi.rank + 1)
         recv, stat = mpi.recv()
     else:
         recv, stat = mpi.recv()
         sendResult = mpi.send(msg, mpi.rank - 1)
     return
示例#15
0
    def restoreState(self, cacheFileName):
        def readNodeData(f, iproc):
            pos = f.readObject("proc%06i/pos" % iproc)
            m = f.readObject("proc%06i/m" % iproc)
            H = f.readObject("proc%06i/H" % iproc)
            vol = f.readObject("proc%06i/vol" % iproc)
            surface = f.readObject("proc%06i/surface" % iproc)
            return pos, m, H, vol, surface

        if cacheFileName is None:
            return False

        if os.path.splitext(cacheFileName) != ".silo":
            cacheFileName += ".silo"
        result = False
        if mpi.rank == 0:
            result = (cacheFileName and os.path.exists(cacheFileName))
        result = mpi.bcast(result, root=0)
        if result:
            print "Restoring MedialGenerator state from %s" % cacheFileName
            if mpi.rank == 0:
                f = SiloFileIO(cacheFileName, Read)
                numGeneratingProcs = f.readObject("numGeneratingProcs")

                # Decide how to divide the generating domains between our current processes.
                n0 = numGeneratingProcs / mpi.procs
                remainder = numGeneratingProcs % mpi.procs
                for iproc in xrange(mpi.procs):
                    if iproc >= numGeneratingProcs:
                        imin, imax = 0, 0
                    else:
                        imin = iproc * n0 + min(iproc, remainder)
                        imax = imin + n0
                        if iproc < remainder:
                            imax += 1
                    pos, m, H, vol, surface = [], [], [], [], []
                    for igenproc in xrange(imin, imax):
                        posi, mi, Hi, voli, surfacei = readNodeData(
                            f, igenproc)
                        pos += posi
                        m += mi
                        H += Hi
                        vol += voli
                        surface += surfacei
                    if iproc == 0:
                        self.pos, self.m, self.H, self.vol, self.surface = pos, m, H, vol, surface
                    else:
                        mpi.send((pos, m, H, vol, surface), dest=iproc)
                f.close()
            else:
                self.pos, self.m, self.H, self.vol, self.surface = mpi.recv(
                    source=0)[0]
        return result
示例#16
0
文件: parallel.py 项目: umansky/UEDGE
def gather(obj, dest=0):
    if not lparallel: return [obj]
    if mpi.rank == dest:
        result = []
        for i in range(mpi.procs):
            if i == dest:
                result.append(obj)
            else:
                result.append(mpirecv(i))
        return result
    else:
        mpi.send(obj, dest)
        return [obj]
 def send(self, data, listen=0):
     if self.manager:
         self.manager.messagesSent[self.tid] += 1
     self.log("send", data, self.tid)
     try:
         mpi.send(data, self.tid)
     except:
         if type(data) == list and isinstance(data[0], cPickle.UnpickleableError):
             data[0] = ValueError("Unpickleable!")
             try:
                 mpi.send(data, self.tid)
             except:
                 print "Fail in send:"
                 print data
                 raise
         else:
             print "Fail in send:"
             print data
             raise
示例#18
0
def latency(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
        TIMER_STOP()
        msg, status = mpi.recv(slave)

        total = TIMER_ELAPSED()
        return total / cnt, "u-sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
        mpi.send(message[:4], master)
        return 0.0, "u-sec"

    else:
        return 0.0, "u-sec"
示例#19
0
def roundtrip(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
            msg, status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return cnt / (total * 1e-6), "transactions/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
            mpi.send(message[:bytes], master)
        return 0.0, "transactions/sec"

    else:
        return 0.0, "transactions/sec"
示例#20
0
def bandwidth(cnt, bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes], slave)
        msg, status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return ((4 + (bytes * cnt)) / 1024.0) / (total * 1e-6), "KB/sec"

    elif mpi.rank == slave:
        for i in range(cnt):
            msg, status = mpi.recv(master)
        mpi.send(message[:bytes], master)
        return 0.0, "KB/sec"

    else:
        return 0.0, "KB/sec"
示例#21
0
 def send(self, data, listen=0):
     if self.manager:
         self.manager.messagesSent[self.tid] += 1
     self.log("send", data, self.tid)
     try:
         mpi.send(data, self.tid)
     except:
         if type(data) == list and isinstance(data[0], cPickle.UnpickleableError):
             data[0] = ValueError("Unpickleable!")
             try:
                 mpi.send(data, self.tid)
             except:
                 print "Fail in send:"
                 print data
                 raise
         else:
             print "Fail in send:"
             print data
             raise
示例#22
0
文件: benchmark.py 项目: LLNL/pynamic
def roundtrip(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
            msg,status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return cnt / ( total *1e-6 ),"transactions/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
            mpi.send(message[:bytes],master)
        return 0.0,"transactions/sec"

    else:
        return 0.0,"transactions/sec"
示例#23
0
文件: benchmark.py 项目: LLNL/pynamic
def bandwidth(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
        msg,status = mpi.recv(slave)
        TIMER_STOP()

        total = TIMER_ELAPSED()
        return ((4+(bytes*cnt))/1024.0) / (total*1e-6),"KB/sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
        mpi.send(message[:bytes],master)
        return 0.0,"KB/sec"

    else:
        return 0.0,"KB/sec"
示例#24
0
文件: benchmark.py 项目: LLNL/pynamic
def latency(cnt,bytes):
    if mpi.rank == 0:
        TIMER_START()
        for i in range(cnt):
            mpi.send(message[:bytes],slave)
        TIMER_STOP()
        msg,status = mpi.recv(slave)

        total = TIMER_ELAPSED()
        return total/cnt,"u-sec"
    
    elif mpi.rank == slave:
        for i in range(cnt):
            msg,status = mpi.recv(master)
        mpi.send(message[:4],master)
        return 0.0,"u-sec"

    else:
        return 0.0,"u-sec"
示例#25
0
文件: PyMPITest.py 项目: LLNL/pynamic
    def parallelRunTest(self):
        if mpi.procs%2 == 1:
            self.fail("Simple sendrecv must be run with an even number of processes")

        if mpi.procs > 1:
            # Swap ranks even/odd, evens send first
            if mpi.rank % 2 == 0:
                mpi.send(mpi.rank,mpi.rank+1)
                nextRank,stat = mpi.recv(mpi.rank+1)
                if nextRank != mpi.rank+1:
                    self.fail("Received incorrect rank")
            else:
                prevRank,stat = mpi.recv(mpi.rank-1)
                mpi.send(mpi.rank,mpi.rank-1)
                if prevRank != mpi.rank-1:
                    self.fail("Received incorrect rank.  Expected %r, got %r"%(
                        mpi.rank-1,prevRank
                        ))

        # Try an around the horn sendrecv check
        me = mpi.rank
        rightside = (me+1)%mpi.procs
        leftside = (me-1+mpi.procs)%mpi.procs
        msg,stat2 = mpi.sendrecv(me,rightside,leftside)

        if msg != leftside:
            failStr = "Failed simple sendrecv "
            failStr += "Process " + str(mpi.rank) + " received "
            failStr += str(msg) + " instead of " + str(leftside)
            self.fail(failStr)

        #Do another check with a longer message
        longMsg = ()
        for i in range(256):
          longMsg += (i, str(i) )

        msg, stat2 = mpi.sendrecv( longMsg, leftside, rightside )
        if msg != longMsg:
          failStr = "Failed simple sendrecv for long messages"
          self.fail( failStr )

        return
示例#26
0
    def parallelRunTest(self):
        if mpi.procs % 2 == 1:
            self.fail(
                "Simple sendrecv must be run with an even number of processes")

        if mpi.procs > 1:
            # Swap ranks even/odd, evens send first
            if mpi.rank % 2 == 0:
                mpi.send(mpi.rank, mpi.rank + 1)
                nextRank, stat = mpi.recv(mpi.rank + 1)
                if nextRank != mpi.rank + 1:
                    self.fail("Received incorrect rank")
            else:
                prevRank, stat = mpi.recv(mpi.rank - 1)
                mpi.send(mpi.rank, mpi.rank - 1)
                if prevRank != mpi.rank - 1:
                    self.fail("Received incorrect rank.  Expected %r, got %r" %
                              (mpi.rank - 1, prevRank))

        # Try an around the horn sendrecv check
        me = mpi.rank
        rightside = (me + 1) % mpi.procs
        leftside = (me - 1 + mpi.procs) % mpi.procs
        msg, stat2 = mpi.sendrecv(me, rightside, leftside)

        if msg != leftside:
            failStr = "Failed simple sendrecv "
            failStr += "Process " + str(mpi.rank) + " received "
            failStr += str(msg) + " instead of " + str(leftside)
            self.fail(failStr)

        #Do another check with a longer message
        longMsg = ()
        for i in range(256):
            longMsg += (i, str(i))

        msg, stat2 = mpi.sendrecv(longMsg, leftside, rightside)
        if msg != longMsg:
            failStr = "Failed simple sendrecv for long messages"
            self.fail(failStr)

        return
示例#27
0
         s, rc = mpi.recv(i)
         print 'received ', s, 'from rank ', i
         #saving what is received to array
         array[i] = s
 elif mpi.rank == 1:
     if (j != 0):  #if it is not the first time runnning
         print 'rank 1 running network '
         #    exec(open('./paths.sh').read())
         exec(
             open('./models/apple_stock_price_predictor_iterative1.py').
             read())
         # result needed is the Mean Squared Error to determine best network
         result = (np.mean(mse_test_loss_seq))
         print 'rank 1 sending result'
         # sending result to Master Core
         mpi.send(result, 0)
     else:  #if it is the first time it is running
         print 'rank 1 running network '
         #    exec(open('./paths.sh').read())
         exec(
             open(
                 './models/apple_stock_price_predictor_initial1.py').read())
         result = (np.mean(mse_test_loss_seq))
         print 'rank 1 sending result'
         mpi.send(result, 0)
 elif mpi.rank == 2:
     if (j != 0):
         print 'rank 2 running network '
         #    exec(open('./paths.sh').read())
         exec(
             open('./models/apple_stock_price_predictor_iterative2.py').
示例#28
0
def main():
	if sys.argv[-1].startswith("usefs="): sys.argv = sys.argv[:-1]	# remove the runpar fileserver info

	(options,args) =  parse_command_line()
	
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.appinit(sys.argv)

	inputParm = EMAN.ccmlInputParm()
	sf = EMAN.XYData()
	if options.sfFileName != "" :
		readsf = sf.readFile(options.sfFileName)
		if ((readsf == -1) and (options.verbose > 0)) :
			print "The file of scattering factor does NOT exist"
	inputParm.scateringFactor = sf

	startNumOfRawImages = options.startNumOfRawImages
	#endNumOfRawImages = options.endNumOfRawImages

	refImageFileName = args[-1]
	numOfRefImages = options.numOfRefImages
	solutionFile = options.solutionFile

	# write log info to .emanlog file so that eman program can browse the history
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): 
		pid = EMAN.LOGbegin(sys.argv)
		for f in args[0:-1]: EMAN.LOGInfile(pid,f)
		EMAN.LOGReffile(pid,args[-1])
		if options.solutionFile: EMAN.LOGOutfile(pid,options.solutionFile)
		if options.listFile: EMAN.LOGOutfile(pid,options.listFile)
		if options.mrcSolutionFile: EMAN.LOGOutfile(pid,options.mrcSolutionFile)

	inputParm.sym = options.sym
	inputParm.FFTOverSampleScale = options.FFTOverSampleScale
	inputParm.pftStepSize = options.pftStepSize
	inputParm.deltaR = options.deltaR
	inputParm.RMin = options.RMin
	inputParm.RMax = options.RMax
	inputParm.searchMode = options.searchMode
	inputParm.scalingMode = options.scalingMode
	inputParm.residualMode = options.residualMode
	inputParm.weightMode = options.weightMode
	# inputParm.rawImageFN will be set later
	inputParm.refImagesFN = refImageFileName
	inputParm.rawImageIniParmFN = options.rawImageIniParmFN
	inputParm.rawImagePhaseCorrected = options.phasecorrected

	inputParm.maxNumOfRun = options.maxNumOfRun
	inputParm.zScoreCriterion = options.zScoreCriterion
	inputParm.residualCriterion = options.residualCriterion
	inputParm.solutionCenterDiffCriterion = options.solutionCenterDiffCriterion
	inputParm.solutionOrientationDiffCriterion = options.solutionOrientationDiffCriterion/180.0*pi
	inputParm.maxNumOfIteration = options.maxNumOfIteration
	inputParm.numOfRandomJump = options.numOfRandomJump
	inputParm.numOfFastShrink = options.numOfFastShrink
	inputParm.numOfStartConfigurations = options.numOfStartConfigurations
	inputParm.orientationSearchRange = options.orientationSearchRange/180.0*pi
	inputParm.centerSearchRange = options.centerSearchRange

	inputParm.numOfRefImages = options.numOfRefImages
	inputParm.refEulerConvention = options.refEulerConvention
	#maskR = options.maskR
	#if (maskR<=0): maskR = refImageSizeY/2

	inputParm.verbose = options.verbose
	verbose = options.verbose
	#verboseSolution = options.verboseSolution

	updataHeader = options.updataHeader
	solutionFile = options.solutionFile
	mrcSolutionFile = options.mrcSolutionFile
	iniCenterOrientationMode = options.iniCenterOrientationMode
	refCenterOrientationMode = options.refCenterOrientationMode

	rawImages = []
	if not mpi or (mpi and mpi.rank==0):
		for imgfile in args[0:-1]:
			imgnum = EMAN.fileCount(imgfile)[0]
			for i in range(imgnum): rawImages.append((imgfile, i))
	if mpi: rawImages = mpi.bcast(rawImages)
	
	endNumOfRawImages = options.endNumOfRawImages
	if endNumOfRawImages <=0  or endNumOfRawImages > len(rawImages):
		endNumOfRawImages = len(rawImages)

	numRawImages = endNumOfRawImages - startNumOfRawImages

	if mpi:
		ptclset = range(startNumOfRawImages + mpi.rank, endNumOfRawImages, mpi.size)
	else:
		ptclset = range(startNumOfRawImages, endNumOfRawImages)
	
	solutions = []

	rMask = options.rMask        #mask size is given
	if options.rMask <= 0 : rMask = refImageSizeY/2   #mask size = half image size
	
	rMask1 = options.rMask1             #output tnf mask size is given
	if options.rMask1 <= 0 : rMask1 = rMask    #output tnf mask size = half image size

	inputParm.rMask = rMask
	inputParm.rMask1 = rMask1

	rawImage = EMAN.EMData()
	rawImage.getEuler().setSym(inputParm.sym) #set the symmetry of the raw partile
	inputParm.rawImageFN = rawImages[0][0] #give the initial raw particle filename
	print "start to prepare------"
	rawImage.crossCommonLineSearchPrepare(inputParm) #prepare, create pseudo PFT of ref images
	print "end to prepare------"
	inputParm.rawImage = rawImage
	#for rawImgSN in ptclset:
	for index in range(len(ptclset)):
		rawImgSN = ptclset[index]
		inputParm.rawImageFN = rawImages[rawImgSN][0]
		inputParm.thisRawImageSN = rawImages[rawImgSN][1]
		if mpi: print "rank %d: %d in %d-%d (%d in %d-%d)" % (mpi.rank, rawImgSN, startNumOfRawImages, endNumOfRawImages, index, 0, len(ptclset))
		#rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1])

		#rawImage.applyMask(rMask, 6) #apply mask type 6 [edge mean value] to raw image, center will be image center
		#rawImage.getEuler().setSym("icos")
		#if rawImage.hasCTF() == 1:
			#ctfParm = rawImage.getCTF()
			#inputParm.zScoreCriterion = options.zScoreCriterion + atan(abs(ctfParm[0])-1.5)/(pi/4) +0.59 #adjust zScore criterion -0.6 --> +1.2, 1.5, 2.0
			#inputParm.numOfRefImages = int(min(numOfRefImages, max(numOfRefImages*exp(-(abs(ctfParm[0])/2.0-0.15))+0.5, 5.0))) # adjust maxNumOfRun, the min is 2

		inputParm.thisRawImageSN = rawImgSN

		solutionCenterDiffCriterion = inputParm.solutionCenterDiffCriterion
		solutionOrientationDiffCriterion = inputParm.solutionOrientationDiffCriterion

		#initialize Center And Orientation by ont of the following modes

		if iniCenterOrientationMode == "iniparmfile" :
			inputParm.initializeCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif iniCenterOrientationMode == "headerfile" :
			inputParm.initializeCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else :
			inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center

		#set the refence Center And Orientation by ont of the following modes

		if refCenterOrientationMode == "iniparmfile" : inputParm.setRefCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif refCenterOrientationMode == "headerfile" : inputParm.setRefCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else : inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

		rawImage.crossCommonLineSearchReadRawParticle(inputParm) #create pseudo PFT of raw image

		maxNumOfRun = inputParm.maxNumOfRun
		outputParmList = []
		numOfRun = 0
		passAllConsistencyCriteria = 0
		while (numOfRun < maxNumOfRun) or (len(outputParmList) < 2):

			if (iniCenterOrientationMode != "iniparmfile") and (iniCenterOrientationMode != "headerfile") :
				inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center
			if (refCenterOrientationMode != "iniparmfile") and (refCenterOrientationMode != "headerfile") :
				inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

			numOfRun = numOfRun + 1
			print "numOfRun = ", numOfRun

			############################################################################
			############ execute cross common line search for reference ################
			############################################################################
			outputParm  = rawImage.crossCommonLineSearch(inputParm)
			############################################################################
			# pass criteria check
			outputParmList.append(outputParm) #if passed criteria, e.g. zscore, residualThreshold, etc
			############################################################################

			outputParmList.sort(lambda x, y: cmp(x.residual, y.residual))

			############################################################################
			########################## consistency check ###############################
			############################################################################
			#passConsistencyCriteria = 0
			finalOutputParmList = []
			lowestResidualList = []
			lengthOfList = len(outputParmList)
			if lengthOfList < 2 : continue
			for i in range(lengthOfList-1):
				thisOutputParm = outputParmList[i]
				numOfPairsPassConsistencyCheck = 0
				for j in range(i+1,lengthOfList):
					refOutputParm = outputParmList[j]
					tmpOutputParm = EMAN.ccmlOutputParm() #create a new output parm object
					tmpOutputParm.rawImageSN = thisOutputParm.rawImageSN #copy all paramenters
					tmpOutputParm.residual = thisOutputParm.residual
					tmpOutputParm.sigma = thisOutputParm.sigma
					tmpOutputParm.verbose = thisOutputParm.verbose
					tmpOutputParm.zScore = thisOutputParm.zScore
					tmpOutputParm.zScoreCriterion = thisOutputParm.zScoreCriterion

					tmpOutputParm.passAllCriteria = 0
					tmpOutputParm.setCalculatedCenterAndOrientation(thisOutputParm.cx,thisOutputParm.cy,thisOutputParm.q)
					tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q)
					tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference

					centerDiff = tmpOutputParm.centerDiff
					orientationDiff = tmpOutputParm.orientationDiff
					
					#####  FLIP CASE :  if no consistency found, try flip this orientation
					if ((centerDiff > solutionCenterDiffCriterion) or (orientationDiff > solutionOrientationDiffCriterion)) :
						quatFlip = EMAN.Quaternion(refOutputParm.q.getEuler().alt(), refOutputParm.q.getEuler().az(), refOutputParm.q.getEuler().phi()+pi)
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, quatFlip)
						tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference
						centerDiff = tmpOutputParm.centerDiff
						orientationDiff = tmpOutputParm.orientationDiff
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q) #set back the exact orientation of reference 

					#Save the configurations with lowest residuals
					if (i<3) and (j==i+1) : lowestResidualList.append(tmpOutputParm)
					
					#make the good/answers list
					if ((centerDiff < solutionCenterDiffCriterion) and (orientationDiff < solutionOrientationDiffCriterion)) :
						numOfPairsPassConsistencyCheck += 1
						if numOfPairsPassConsistencyCheck == 1 : #save to the final list
							tmpOutputParm.passAllCriteria = 1
							finalOutputParmList.append(tmpOutputParm)
						if i==0 and numOfPairsPassConsistencyCheck >= options.numConsistentRun: #if the first one, check whether it has 3 pair of consistencies
							passAllConsistencyCriteria = 1
							break
						if i>0 : break #if not the first one, find one pair of consistency, then break
				
				#no break here, just for saving all possible solutions

			if passAllConsistencyCriteria and len(finalOutputParmList) >= options.numConsistentRun: break #if 3 consistent pair orientations were found, then stop


		rawImage.crossCommonLineSearchReleaseParticle(inputParm) # release the memory related to this raw particle

		# if no consistency found, keep the lowest ones as output
		if len(finalOutputParmList) == 0 : finalOutputParmList = lowestResidualList
		for i in range(len(finalOutputParmList)) : 
			if passAllConsistencyCriteria : finalOutputParmList[i].passAllCriteria = 1
			else : finalOutputParmList[i].passAllCriteria = 0

		if options.solutionFile:
			for i in range(len(finalOutputParmList)) : finalOutputParmList[i].outputResult(solutionFile)

		outputParm = finalOutputParmList[0] #just use the lowest residual as regular output
		if outputParm.passAllCriteria: 	passfail = "pass"
		else: passfail = "fail"

		print "Final result: euler=%g\t%g\t%g\tcenter=%g\t%g\tresidue=%g\t%s" % (outputParm.alt*180/pi, outputParm.az*180/pi, outputParm.phi*180/pi, outputParm.cx, outputParm.cy, outputParm.residual, passfail)
		
		if options.scoreFile:
			rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1], 1) # read header only
			if rawImage.hasCTF(): 
				defocus = rawImage.getCTF()[0]
		else:
			defocus = 0

		solution = (rawImages[rawImgSN][0], rawImages[rawImgSN][1], outputParm.alt, outputParm.az, outputParm.phi, \
					   outputParm.cx, outputParm.cy, defocus, outputParm.residual, outputParm.passAllCriteria)
		solutions.append( solution )

		sys.stdout.flush()

	rawImage.crossCommonLineSearchFinalize(inputParm) #finalize, i.e. delete memories

	if mpi:
		if options.verbose: 
			print "rank %d: done and ready to output" % (mpi.rank)
			sys.stdout.flush()
		mpi.barrier()
		#print "rank %d: %s" % (mpi.rank, solutions)
		if mpi.rank==0:
			for r in range(1,mpi.size):
				msg, status = mpi.recv(source = r, tag = r)
				solutions += msg
			def ptcl_cmp(x, y):
				eq = cmp(x[0], y[0])
				if not eq: return cmp(x[1],y[1])
				else: return eq
			solutions.sort(ptcl_cmp)
		else:
			mpi.send(solutions, 0, tag = mpi.rank)

	if not mpi or (mpi and mpi.rank==0):
		if options.scoreFile:
			sFile = open(options.scoreFile, "w")
			sFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					sFile.write("%d\t%s\tdefocus=%g\tresidual=%g\n" % (i[1], i[0], i[7], i[8]))
			sFile.close()
			
		if options.listFile:
			lFile = open(options.listFile, "w")
			lFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					lFile.write("%d\t%s\t%g\t%g\t%g\t%g\t%g\n" % (i[1], i[0], i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6]))
			lFile.close()
		if options.mrcSolutionFile:
			outFile = open(options.mrcSolutionFile, "w")
			for i in solutions:
				if i[-1]:
					#rawImage.readImage(i[0], i[1], 1)
					rawImage.readImage(i[0], i[1])
					thisEu = EMAN.Euler(i[2], i[3], i[4])
					thisEu.convertToMRCAngle()
					alt = thisEu.alt_MRC()*180.0/pi
					az  = thisEu.az_MRC()*180.0/pi
					phi = thisEu.phi_MRC()*180.0/pi
		
					cx  = i[5]
					cy  = i[6]
					dx = cx - rawImage.xSize()/2
					dy = cy - rawImage.ySize()/2
					rawImage.applyMask(rMask1,6,dx,dy,0) #apply mask type 4 [outside=0] to raw image, center will be the solved center
					#tnfFileName = "%s-%d.tnf" % (os.path.basename(os.path.splitext(rawImages[rawImgSN][0])[0]), rawImages[rawImgSN][1])
					prefix = os.path.dirname(options.mrcSolutionFile).replace(" ", "")
					if prefix != "" : prefix = prefix + "/"
					tnfFileName = "%s%s-%d.tnf" % (prefix,os.path.basename(os.path.splitext(i[0])[0]), i[1])
					rawFFT = rawImage.doFFT()
					rawFFT.writeImage(tnfFileName,0)  #tnf file no header information, it is a pure FFT of raw image file
		
					outFile.write("%s\n" % (os.path.abspath(tnfFileName)))
					outFile.write(" %d, %.4f, %.4f, %.4f, %.4f, %.4f, 0.0\n" % (0, alt, az, phi, cy, cx))
			outFile.close()
		if updataHeader:
			for i in solutions:
				rawImage.readImage(i[0], i[1], 1)
				if options.verbose:
					cx  = rawImage.get_center_x()
					cy  = rawImage.get_center_y()
					alt = rawImage.alt()
					az  = rawImage.az()
					phi = rawImage.phi()
					print "Update header: %s %d\t%7.5f  %7.5f  %7.2f  %7.2f  %7.2f => %7.5f  %7.5f  %7.2f  %7.2f  %7.2f" % \
						(i[0], i[1], alt*180.0/pi, az*180.0/pi, phi*180.0/pi, cx, cy, i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6])
				rawImage.setRAlign(i[2], i[3], i[4])
				rawImage.set_center_x(i[5])
				rawImage.set_center_y(i[6])
				imgtype = EMAN.EMData.ANY
				rawImage.writeImage(i[0], i[1], imgtype, 1)
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.LOGend()
示例#29
0
sleepEventStart = mpe.log_get_event_number()
sleepEventEnd = mpe.log_get_event_number()

mpe.describe_state( runEventStart, runEventEnd, "Full Runtime", "blue" )
mpe.describe_state( sendEventStart, sendEventEnd, "send", "red" )
mpe.describe_state( recvEventStart, recvEventEnd, "recv", "green" )
mpe.describe_state( sleepEventStart, sleepEventEnd, "sleep", "turquoise" )

mpe.log_event( runEventStart, rank, "starting run")
# Let's send and receive a 100 messages and generate 100(200?) events.
for i in xrange(100):
    if( rank == 0 ):
        # Generate 100 numbers, send them to rank 1
        mpe.log_event( sendEventStart, i, "start send" )
        data = Numeric.array( range(10000), Numeric.Int32 )
        mpi.send( data, 10000, mpi.MPI_INT, 1, i, mpi.MPI_COMM_WORLD )
        mpe.log_event( sendEventEnd, i, "end send")
    else:
        mpe.log_event( recvEventStart, i, "start recv" )
        rdata = mpi.recv( 10000, mpi.MPI_INT, 0, i, mpi.MPI_COMM_WORLD )
        mpe.log_event( recvEventEnd, i, "end recv" )
    if( i == 50 ):
        mpe.log_event( sleepEventStart, i, "start sleep" )
        time.sleep(1)
        mpi.barrier( mpi.MPI_COMM_WORLD )
        mpe.log_event( sleepEventEnd, i, "end sleep")

mpe.log_event( runEventEnd, rank, "stopping run")
mpe.finish_log("test1")
mpi.finalize()
示例#30
0
文件: test.py 项目: steder/maroonmpi
import mpi
import sys, Numeric

print "Creating Data Array..."
data = Numeric.array( [1,2,3,4], Numeric.Int32 )

rank, size = mpi.init( len(sys.argv), sys.argv )
assert size == 2
print "(%s,%s): initialized..." %(rank,size)

if( rank == 0 ):
    print "(%s,%s): sending: %s" %( rank, size, data )
    mpi.send( data, 4, mpi.MPI_INT, 1, 0, mpi.MPI_COMM_WORLD )
    data2 = Numeric.array([ -1, -1, -1, -1 ], Numeric.Int32 )
else:
    print "(%s,%s): receiving..." %(rank,size)
    data2 = mpi.recv( 4, mpi.MPI_INT, 0, 0, mpi.MPI_COMM_WORLD )
    print "(%s,%s): received: %s" %(rank, size, data2)

print "(%s,%s): received: %s" % ( rank, size, data2 )

mpi.finalize()
示例#31
0
import mpi

rank = mpi.rank
if rank == 0:
    data = {"a": 7, "b": 3.14}
    mpi.send(data, 1)
    print("Sending data from", rank, "data", data)
    data, status = mpi.recv(source=1)
    mpi.barrier()
    print("Receving data at", rank, "data", data)
elif rank == 1:
    data1 = {"a": 7, "b": "abc"}
    mpi.send(data1, 0)
    print("Sending data from", rank, "data", data1)
    data, status = mpi.recv(source=0)
    mpi.barrier()
    print("Recieving data at", rank, "data", data1)
示例#32
0
文件: parallel.py 项目: umansky/UEDGE
    import mpi
    me = mpi.rank
    npes = mpi.procs
except ImportError:
    me = 0
    npes = 0

if npes > 0: lparallel = 1
else: lparallel = 0

# --- The interface has changed some in the newest version of pyMPI.
# --- Check the interface to the mpi.recv command. The newer versions
# --- return a tuple instead of just the data itself.
# --- Is there a better way of doing this?
if lparallel:
    mpi.send(me, me)
    _i = mpi.recv(me)
    if type(_i) == TupleType: _newpympi = 1
    else: _newpympi = 0
else:
    _newpympi = 1

if _newpympi:

    def mpirecv(pe=0, ms=0):
        result, stat = mpi.recv(pe, ms)
        return result
else:

    def mpirecv(pe=0, ms=0):
        return mpi.recv(pe, ms)
示例#33
0
def main():
    EMAN.appinit(sys.argv)
    if sys.argv[-1].startswith("usefs="):
        sys.argv = sys.argv[:-1]  # remove the runpar fileserver info

    (options, rawimage, refmap) = parse_command_line()

    sffile = options.sffile
    verbose = options.verbose
    shrink = options.shrink
    mask = options.mask
    first = options.first
    last = options.last
    scorefunc = options.scorefunc

    projfile = options.projection
    output_ptcls = options.update_rawimage
    cmplstfile = options.cmplstfile
    ortlstfile = options.ortlstfile
    startSym = options.startSym
    endSym = options.endSym

    if not options.nocmdlog:
        pid = EMAN.LOGbegin(sys.argv)
        EMAN.LOGInfile(pid, rawimage)
        EMAN.LOGInfile(pid, refmap)
        if projfile:
            EMAN.LOGOutfile(pid, projfile)
        if output_ptcls:
            EMAN.LOGOutfile(pid, output_ptcls)
        if cmplstfile:
            EMAN.LOGOutfile(pid, cmplstfile)
        if ortlstfile:
            EMAN.LOGOutfile(pid, ortlstfile)

    ptcls = []
    if not (mpi or pypar) or ((mpi and mpi.rank == 0) or (pypar and pypar.rank == 0)):
        ptcls = EMAN.image2list(rawimage)
        ptcls = ptcls[first:last]

        print "Read %d particle parameters" % (len(ptcls))
        # ptcls = ptcls[0:10]

    if mpi and mpi.size > 1:
        ptcls = mpi.bcast(ptcls)
        print "rank=%d\t%d particles" % (mpi.rank, len(ptcls))
    elif pypar and pypar.size() > 1:
        ptcls = pypar.broadcast(ptcls)
        print "rank=%d\t%d particles" % (pypar.rank(), len(ptcls))

    if sffile:
        sf = EMAN.XYData()
        sf.readFile(sffile)
        sf.logy()

    if not mpi or ((mpi and mpi.rank == 0) or (pypar and pypar.rank() == 0)):
        if cmplstfile and projfile:
            if output_ptcls:
                raw_tmp = output_ptcls
            else:
                raw_tmp = rawimage
            raw_tmp = rawimage
            fp = open("tmp-" + cmplstfile, "w")
            fp.write("#LST\n")
            for i in range(len(ptcls)):
                fp.write("%d\t%s\n" % (first + i, projfile))
                fp.write("%d\t%s\n" % (first + i, raw_tmp))
            fp.close()
        if (mpi and mpi.size > 1 and mpi.rank == 0) or (pypar and pypar.size() > 1 and pypar.rank() == 0):
            total_recv = 0
            if output_ptcls:
                total_recv += len(ptcls)
            if projfile:
                total_recv += len(ptcls)
            for r in range(total_recv):
                # print "before recv from %d" % (r)
                if mpi:
                    msg, status = mpi.recv()
                else:
                    msg = pypar.receive(r)
                    # print "after recv from %d" % (r)
                    # print msg, status
                d = emdata_load(msg[0])
                fname = msg[1]
                index = msg[2]
                d.writeImage(fname, index)
                print "wrtie %s %d" % (fname, index)
            if options.ortlstfile:
                solutions = []
                for r in range(1, mpi.size):
                    msg, status = mpi.recv(source=r, tag=r)
                    solutions += msg

                def ptcl_cmp(x, y):
                    eq = cmp(x[0], y[0])
                    if not eq:
                        return cmp(x[1], y[1])
                    else:
                        return eq

                solutions.sort(ptcl_cmp)
    if (not mpi or (mpi and ((mpi.size > 1 and mpi.rank > 0) or mpi.size == 1))) or (
        not pypar or (pypar and ((pypar.size() > 1 and pypar.rank() > 0) or pypar.size() == 1))
    ):
        map3d = EMAN.EMData()
        map3d.readImage(refmap, -1)
        map3d.normalize()
        if shrink > 1:
            map3d.meanShrink(shrink)
        map3d.realFilter(0, 0)  # threshold, remove negative pixels

        imgsize = map3d.ySize()

        img = EMAN.EMData()

        ctffilter = EMAN.EMData()
        ctffilter.setSize(imgsize + 2, imgsize, 1)
        ctffilter.setComplex(1)
        ctffilter.setRI(1)

        if (mpi and mpi.size > 1) or (pypar and pypar.size() > 1):
            ptclset = range(mpi.rank - 1, len(ptcls), mpi.size - 1)
        else:
            ptclset = range(0, len(ptcls))

        if mpi:
            print "Process %d/%d: %d/%d particles" % (mpi.rank, mpi.size, len(ptclset), len(ptcls))

        solutions = []
        for i in ptclset:
            ptcl = ptcls[i]
            e = EMAN.Euler(ptcl[2], ptcl[3], ptcl[4])
            dx = ptcl[5] - imgsize / 2
            dy = ptcl[6] - imgsize / 2
            print "%d\talt,az,phi=%8g,%8g,%8g\tx,y=%8g,%8g" % (
                i + first,
                e.alt() * 180 / pi,
                e.az() * 180 / pi,
                e.phi() * 180 / pi,
                dx,
                dy,
            ),

            img.readImage(ptcl[0], ptcl[1])
            img.setTAlign(-dx, -dy, 0)
            img.setRAlign(0, 0, 0)
            img.rotateAndTranslate()  # now img is centered
            img.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
            if img.hasCTF():
                fft = img.doFFT()

                ctfparm = img.getCTF()
                ctffilter.setCTF(ctfparm)
                if options.phasecorrected:
                    if sffile:
                        ctffilter.ctfMap(64, sf)  # Wiener filter with 1/CTF (no sign) correction
                else:
                    if sffile:
                        ctffilter.ctfMap(32, sf)  # Wiener filter with 1/CTF (including sign) correction
                    else:
                        ctffilter.ctfMap(2, EMAN.XYData())  # flip phase

                fft.mult(ctffilter)
                img2 = fft.doIFT()  # now img2 is the CTF-corrected raw image

                img.gimmeFFT()
                del fft
            else:
                img2 = img

            img2.normalize()
            if shrink > 1:
                img2.meanShrink(shrink)
            # if sffile:
            # 	snrcurve = img2.ctfCurve(9, sf)	# absolute SNR
            # else:
            # 	snrcurve = img2.ctfCurve(3, EMAN.XYData())		# relative SNR

            e.setSym(startSym)
            maxscore = -1e30  # the larger the better
            scores = []
            for s in range(e.getMaxSymEl()):
                ef = e.SymN(s)
                # proj = map3d.project3d(ef.alt(), ef.az(), ef.phi(), -6)		# Wen's direct 2D accumulation projection
                proj = map3d.project3d(
                    ef.alt(), ef.az(), ef.phi(), -1
                )  # Pawel's fast projection, ~3 times faster than mode -6 with 216^3
                # don't use mode -4, it modifies its own data
                # proj2 = proj
                proj2 = proj.matchFilter(img2)
                proj2.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
                if scorefunc == "ncccmp":
                    score = proj2.ncccmp(img2)
                elif scorefunc == "lcmp":
                    score = -proj2.lcmp(img2)[0]
                elif scorefunc == "pcmp":
                    score = -proj2.pcmp(img2)
                elif scorefunc == "fsccmp":
                    score = proj2.fscmp(img2, [])
                elif scorefunc == "wfsccmp":
                    score = proj2.fscmp(img2, snrcurve)
                if score > maxscore:
                    maxscore = score
                    best_proj = proj2
                    best_ef = ef
                    best_s = s
                scores.append(score)
                # proj2.writeImage("proj-debug.img",s)
                # print "\tsym %2d/%2d: euler=%8g,%8g,%8g\tscore=%12.7g\tbest=%2d euler=%8g,%8g,%8g score=%12.7g\n" % \
                # 		   (s,60,ef.alt()*180/pi,ef.az()*180/pi,ef.phi()*180/pi,score,best_s,best_ef.alt()*180/pi,best_ef.az()*180/pi,best_ef.phi()*180/pi,maxscore)
            scores = Numeric.array(scores)
            print "\tbest=%2d euler=%8g,%8g,%8g max score=%12.7g\tmean=%12.7g\tmedian=%12.7g\tmin=%12.7g\n" % (
                best_s,
                best_ef.alt() * 180 / pi,
                best_ef.az() * 180 / pi,
                best_ef.phi() * 180 / pi,
                maxscore,
                MLab.mean(scores),
                MLab.median(scores),
                MLab.min(scores),
            )
            if projfile:
                best_proj.setTAlign(dx, dy, 0)
                best_proj.setRAlign(0, 0, 0)
                best_proj.rotateAndTranslate()

                best_proj.set_center_x(ptcl[5])
                best_proj.set_center_y(ptcl[6])
                best_proj.setRAlign(best_ef)
                # print "before proj send from %d" % (mpi.rank)

                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(best_proj), projfile, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(best_proj), projfile, i + first), 0)
                # print "after proj send from %d" % (mpi.rank)
                else:
                    best_proj.writeImage(projfile, i + first)

            img2.setTAlign(0, 0, 0)
            img2.setRAlign(best_ef)
            img2.setNImg(1)
            # print "before raw send from %d" % (mpi.rank)
            if output_ptcls:
                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(img2), output_ptcls, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(img2), output_ptcls, i + first), 0)
                # print "after raw send from %d" % (mpi.rank)
                else:
                    img2.writeImage(output_ptcls, i + first)

            solutions.append((ptcl[0], ptcl[1], best_ef.alt(), best_ef.az(), best_ef.phi(), ptcl[5], ptcl[6]))
        if mpi and (mpi.size > 1 and mpi.rank > 0):
            mpi.send(solutions, 0, tag=mpi.rank)

    if mpi:
        mpi.barrier()
    elif pypar:
        pypar.barrier()
    if mpi:
        mpi.finalize()
    elif pypar:
        pypar.finalize()

    if options.cmplstfile:
        os.rename("tmp-" + cmplstfile, cmplstfile)
    if options.ortlstfile:
        lFile = open(options.ortlstfile, "w")
        lFile.write("#LST\n")
        for i in solutions:
            lFile.write(
                "%d\t%s\t%g\t%g\t%g\t%g\t%g\n"
                % (i[1], i[0], i[2] * 180.0 / pi, i[3] * 180.0 / pi, i[4] * 180.0 / pi, i[5], i[6])
            )
        lFile.close()

    if not options.nocmdlog:
        EMAN.LOGend()
示例#34
0
 def run(self):
     for task in self.taskList:
         data, message = mpi.recv()
         source = message.source
         mpi.send( task, source)
示例#35
0
文件: game.py 项目: johnmoore/Heorot
		if (board == 0):
			raise StandardError("No room for player " + player.__str__())

	mpi.barrier()
	
	#now we run the game
	starttime = time.time()
	exited = 0
	
	while(time.time() <= starttime + GAME_LENGTH):
		#Update our queues
		if (len(mpi._recv_queue) > 0):
			packet = mpi.recv(mpi.ANY_SOURCE)
			(data, status) = packet #data[0] is the command string, data[1] is a list of args
			if (data == "check_in_game"):
				mpi.send(1, status.source)
			elif (data == "exiting"):
				exited += 1
			else:
				player = players[status.source]
				command = data[0]
				args = data[1]
				player.SetBoard(board)
				player.QueueAction(command, args) #forward our command to the master
				board = player.GetBoard()
		#Process our queues
		for player in range(1, mpi.size):
			players[player].SetBoard(board)
			players[player].ProcessQueue()
			board = players[player].GetBoard()
	print "Game completed. Anything following will not be taken into consideration."
示例#36
0
import mpi

if mpi.rank == 0:
        for i in range(1,mpi.size):
                print mpi.recv()[0]
else:
        mpi.send("Hello from process " + str(mpi.rank),0)
示例#37
0
	def action(self):
		mpi.send([self.Actions.GetArrows, []], 0)
		(result, code) = mpi.recv(0)[0]
		print mpi.rank, "] I have", result,"arrows."
		mpi.send([self.Actions.FireBow, [10, self.Direction.randomDir()]], 0)
		(result, code) = mpi.recv(0)[0]
		if (result == 1):
			print mpi.rank, "] firing bow attack succeeded!"
		else:
			if (code == self.ResultCodes.game_over):
				print mpi.rank, "] firing bow attack failed because the game was over"
			elif (code == self.ResultCodes.not_enough_arrows):
				print mpi.rank, "] firing bow attack failed because we were out of arrows"
			elif (code == self.ResultCodes.invalid_bow_direction):
				print mpi.rank, "] could not aim bow in that direction"
		mpi.send([self.Actions.FaceHorse, [self.Direction.West]], 0)
		(result, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] set horse's direction!"
		elif (code == self.ResultCodes.invalid_horse_facing):
			print mpi.rank, "] I can't face my horse that way!"
		mpi.send([self.Actions.GetHorseFacing, []], 0)
		(dir, code) = mpi.recv(0)[0]
		mpi.send([self.Actions.GetHorsePosition, []], 0)
		(pos, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] My horse is faced", self.Direction.ToString(dir), "at", "(", pos.x, ",", pos.y, ")"
			print mpi.rank, "] Where is (5, 5)? In relation to me, approx.: ", self.Direction.ToString(pos.toward(self.Position(5, 5)))
		elif (code == self.ResultCodes.game_over):
			print mpi.rank, "] I failed to get my horse's position because the game was over"
		print mpi.rank, "] Attempting to ride horse 2 spaces"
		mpi.send([self.Actions.RideHorse, [2]], 0)
		(result, code) = mpi.recv(0)[0]
		if (code == self.ResultCodes.success):
			print mpi.rank, "] horse trodded successfully"
		elif (code == self.ResultCodes.invalid_horse_path):
			print mpi.rank, "] horse trodded unsuccessfully, he must have run off the board or attempted to move too far at once"
		elif (code == self.ResultCodes.collision):
			print mpi.rank, "] I collided"
		elif (code == self.ResultCodes.game_over):
			print mpi.rank, "] could not ride horse because game over"
		elif (code == self.ResultCodes.defeated):
			print mpi.rank, "] could not ride horse because I have been defeated"
    jobs_completed_by_worker.append(0)
 
if mpi.rank == 0:
    # "master" node:
 
    workers_running = 0
    next_row_to_assign  = 0
 
    # initialize list of image rows
    image = []
    for i in range(ny):
        image.append(-1)
 
    # get all workers started on tasks
    for n in range(1, mpi.size):
        mpi.send(next_row_to_assign, n)
        next_row_to_assign += 1
        workers_running += 1
 
    # master's main loop:
    while workers_running > 0:
        # receive computed result from any worker
        result, status = mpi.recv(mpi.ANY_SOURCE)
        worker_id = status.source
        row_completed, row_data = result
 
        jobs_completed_by_worker[worker_id] += 1
 
        # incorporate newly computed next_row_to_assign into image data
        image[row_completed] = row_data
 
    jobs_completed_by_worker.append(0)

if mpi.rank == 0:
    # "master" node:

    workers_running = 0
    next_row_to_assign = 0

    # initialize list of image rows
    image = []
    for i in range(ny):
        image.append(-1)

    # get all workers started on tasks
    for n in range(1, mpi.size):
        mpi.send(next_row_to_assign, n)
        next_row_to_assign += 1
        workers_running += 1

    # master's main loop:
    while workers_running > 0:
        # receive computed result from any worker
        result, status = mpi.recv(mpi.ANY_SOURCE)
        worker_id = status.source
        row_completed, row_data = result

        jobs_completed_by_worker[worker_id] += 1

        # incorporate newly computed next_row_to_assign into image data
        image[row_completed] = row_data
示例#40
0
import mpi

if mpi.rank == 0:
    for i in range(1, mpi.size):
        print mpi.recv()[0]
else:
    mpi.send("Hello from process " + str(mpi.rank), 0)
示例#41
0
# Run various size messages
for power in range(k):
     mpi.barrier()
     n = 2 ** power
     msg = 'a'*n

     python = []
     c = []
     # I am a sender
     if mpi.rank in sendmap.keys():
          dst = sendmap[mpi.rank]
          for run in range(runs):
               mpi.barrier()
               t0 = time()
               x = mpi.send(msg,dst)
               y,status = mpi.recv(dst)
               python.append( time()-t0 )
               del t0
               del x

               mpi.barrier()
               t0 = time()
               x = pingpong.send(n,dst)
               y = pingpong.recv(n,dst)
               c.append( time()-t0 )

               del t0
               del x
               del y
               
示例#42
0
	def ProcessQueue(self):
		if (self.PayloadFinished() == 1 and len(self.ActionQueue) > 0):
			action = self.ActionQueue.popleft()
			command = action[0]
			args = action[1]
			replyto = action[2]
			#process action
			if (self.Game_Over == 1):
				mpi.send((0, self.ResultCodes.game_over), self.proc) #return false because game is over
			elif (self.Defeated == 1):
				mpi.send((0, self.ResultCodes.defeated), self.proc)
				if not (self.Game_Over == 1):
					print self.proc, "] waiting for the game to end..."
					self.AddPayload(2)
			else:
				try:
					if (command.GetActionString() == "Attack"):
						if (len(args) == 1):
							mpi.send(self.Attack(args[0], self.Facing), self.proc)
						elif (len(args) == 2):
							mpi.send(self.Attack(args[0], args[1]), self.proc)
					elif (command.GetActionString() == "GetPower"):
						mpi.send((self.power, self.ResultCodes.success), self.proc)
					elif (command.GetActionString() == "SetFacing"):
						mpi.send(self.SetFacing(args[0]), self.proc)
					elif (command.GetActionString() == "GetFacing"):
						mpi.send((self.Facing, self.ResultCodes.success), self.proc)
					elif (command.GetActionString() == "GetPosition"):
						mpi.send((self.Position, self.ResultCodes.success), self.proc)
					elif (command.GetActionString() == "Move"):
						mpi.send(self.Move(args[0]), self.proc)
					else:
						mpi.send((0, self.ResultCodes.invalid_command), self.proc)
				except IndexError as e:
					mpi.send((0, self.ResultCodes.invalid_arguments), self.proc)
示例#43
0
    def writeMasterSiloFile(ndim, nblock, jsplit, baseDirectory, baseName,
                            procDir, materials, vars, label, time, cycle):

        nullOpts = silo.DBoptlist()

        # Decide which domains have information.
        if len(vars[0][0]) > 0:
            myvote = mpi.rank + 1
        else:
            myvote = 0
        maxproc = mpi.allreduce(myvote, mpi.MAX)
        assert maxproc <= mpi.procs

        # Pattern for constructing per domain variables.
        domainNamePatterns = [
            os.path.join(procDir, "domain%i.silo:%%s" % i)
            for i in xrange(maxproc)
        ]

        # We need each domains nblock info.
        nblocks = [nblock]
        for sendproc in xrange(1, maxproc):
            if mpi.rank == sendproc:
                mpi.send(nblock, dest=0, tag=50)
            if mpi.rank == 0:
                nblocks.append(mpi.recv(source=sendproc, tag=50)[0])

        # Create the master file.
        if mpi.rank == 0:
            fileName = os.path.join(baseDirectory, baseName + ".silo")
            f = silo.DBCreate(fileName, silo.DB_CLOBBER, silo.DB_LOCAL, label,
                              silo.DB_HDF5)
            nullOpts = silo.DBoptlist()

            # Write the domain file names and types.
            domainNames = Spheral.vector_of_string(
                [p % "hblk0/hydro_mesh" for p in domainNamePatterns])
            meshTypes = Spheral.vector_of_int([silo.DB_QUADMESH] * maxproc)
            optlist = silo.DBoptlist(1024)
            assert optlist.addOption(silo.DBOPT_CYCLE, cycle) == 0
            assert optlist.addOption(silo.DBOPT_DTIME, time) == 0
            assert silo.DBPutMultimesh(f, "hydro_mesh", domainNames, meshTypes,
                                       optlist) == 0

            # Write material names.
            if materials:
                material_names = Spheral.vector_of_string(
                    [p % "/hblk0/Materials" for p in domainNamePatterns])
                matnames = Spheral.vector_of_string(
                    ["void"] + [x.name for x in materials])
                matnos = Spheral.vector_of_int(range(len(materials) + 1))
                assert len(material_names) == maxproc
                assert len(matnames) == len(materials) + 1
                assert len(matnos) == len(materials) + 1
                optlist = silo.DBoptlist(1024)
                assert optlist.addOption(silo.DBOPT_CYCLE, cycle) == 0
                assert optlist.addOption(silo.DBOPT_DTIME, time) == 0
                assert optlist.addOption(silo.DBOPT_MMESH_NAME,
                                         "hydro_mesh") == 0
                assert optlist.addOption(silo.DBOPT_MATNAMES,
                                         silo.DBOPT_NMATNOS, matnames) == 0
                assert optlist.addOption(silo.DBOPT_MATNOS, silo.DBOPT_NMATNOS,
                                         matnos) == 0
                assert silo.DBPutMultimat(f, "Materials", material_names,
                                          optlist) == 0

            # Write the variables descriptors.
            types = Spheral.vector_of_int([silo.DB_QUADVAR] * maxproc)
            for var, varname in vars:
                domainVarNames = Spheral.vector_of_string()
                for iproc, p in enumerate(domainNamePatterns):
                    domainVarNames.append(p % ("/hblk0/" + varname))
                assert len(domainVarNames) == maxproc
                optlistMV = silo.DBoptlist()
                assert optlistMV.addOption(silo.DBOPT_CYCLE, cycle) == 0
                assert optlistMV.addOption(silo.DBOPT_DTIME, time) == 0
                #assert optlistMV.addOption(silo.DBOPT_TENSOR_RANK, silo.DB_VARTYPE_SCALAR) == 0
                assert optlistMV.addOption(silo.DBOPT_BLOCKORIGIN, 0) == 0
                assert optlistMV.addOption(silo.DBOPT_MMESH_NAME,
                                           "hydro_mesh") == 0
                assert silo.DBPutMultivar(f, varname, domainVarNames, types,
                                          optlistMV) == 0

            # Write the dummy variable "akap_0" to fool Hades into thinking we're actually Hydra or something.
            assert silo.DBPutQuadvar1(
                f, "akap_0", "hydro_mesh",
                Spheral.vector_of_double([0.0] * (ndim * ndim)),
                Spheral.vector_of_double(), silo.DB_ZONECENT,
                Spheral.vector_of_int([ndim] * ndim), nullOpts) == 0

            # Write domain and mesh size info.
            assert silo.DBMkDir(f, "Decomposition") == 0
            assert silo.DBWrite(f, "Decomposition/NumDomains", maxproc) == 0
            assert silo.DBWrite(f, "Decomposition/NumLocalDomains",
                                maxproc) == 0
            assert silo.DBWrite(f, "Decomposition/NumBlocks", 1) == 0
            #assert silo.DBWrite(f, "Decomposition/LocalName", "hblk") == 0
            localDomains = Spheral.vector_of_int(range(maxproc))
            domainFiles = Spheral.vector_of_vector_of_int(
                [Spheral.vector_of_int(range(maxproc))])
            assert silo.DBWrite(f, "Decomposition/LocalDomains",
                                localDomains) == 0
            assert silo.DBWrite(f, "DomainFiles", domainFiles) == 0

            for iproc in xrange(maxproc):
                assert silo.DBMkDir(f, "Decomposition/gmap%i" % iproc) == 0
                stuff = Spheral.vector_of_int([0] * 12)
                for jdim in xrange(ndim):
                    stuff[6 + jdim] = nblocks[iproc][jdim]
                if iproc in (0, maxproc - 1):
                    assert silo.DBWrite(
                        f, "Decomposition/gmap%i/NumNeighbors" % iproc, 1) == 0
                else:
                    assert silo.DBWrite(
                        f, "Decomposition/gmap%i/NumNeighbors" % iproc, 2) == 0
                assert silo.DBWrite(f, "Decomposition/gmap%i/gmap" % iproc,
                                    stuff) == 0

        # Close the file.
        if mpi.rank == 0:
            assert silo.DBClose(f) == 0
            del f

        return maxproc
示例#44
0
 def action(self):
     mpi.send([self.Actions.GetBreath, []], 0)
     (result, code) = mpi.recv(0)[0]
     print mpi.rank, "] I have", result, "breath power."
     mpi.send([self.Actions.BreatheFire, [10]], 0)
     (result, code) = mpi.recv(0)[0]
     if result == 1:
         print mpi.rank, "] fire breathing attack succeeded!"
     else:
         if code == self.ResultCodes.game_over:
             print mpi.rank, "] fire breathing attack failed because the game was over"
         elif code == self.ResultCodes.not_enough_breath:
             print mpi.rank, "] fire breathing attack failed because we were out of breath"
         elif code == self.ResultCodes.invalid_breathing_direction:
             print mpi.rank, "] can only breath fire straight"
     mpi.send([self.Actions.SetFlyingDirection, [self.Direction.East]], 0)
     (result, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] set direction!"
     elif code == self.ResultCodes.invalid_flying_direction:
         print mpi.rank, "] I can't fly that way!"
     mpi.send([self.Actions.GetFlyingDirection, []], 0)
     (dir, code) = mpi.recv(0)[0]
     mpi.send([self.Actions.GetFlyingPosition, []], 0)
     (pos, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] I am flying", self.Direction.ToString(dir), "at", "(", pos.x, ",", pos.y, ")"
         print mpi.rank, "] Where is (5, 5)? In relation to me, approx.: ", self.Direction.ToString(
             pos.toward(self.Position(5, 5))
         )
     elif code == self.ResultCodes.game_over:
         print mpi.rank, "] I failed to get my flying position because the game was over"
     print mpi.rank, "] Attempting to fly 2 spaces"
     mpi.send([self.Actions.Fly, [1]], 0)
     (result, code) = mpi.recv(0)[0]
     if code == self.ResultCodes.success:
         print mpi.rank, "] flying successful"
     elif code == self.ResultCodes.invalid_flight_path:
         print mpi.rank, "] flying unsuccessful, I must have flown off the board or attempted to fly too far at once"
     elif code == self.ResultCodes.collision:
         print mpi.rank, "] I collided"
     elif code == self.ResultCodes.game_over:
         print mpi.rank, "] could not fly because game over"
     elif code == self.ResultCodes.slain:
         print mpi.rank, "] could not fly because I have been slain"