Esempio n. 1
0
        def syncAndApply(self):
            if mpi.rank == 0:
                self.dataLock.acquire(
                )  # this lock probably isn't necessary yet

                try:
                    for i in range(len(self.cursorObjects)):
                        self.cursors[i] = self.cursorObjects[i][0].getPos()

                except:
                    traceback.print_exc()
                finally:
                    self.dataLock.release()

            if mpi.rank == 0:
                resultCursors = mpi.bcast(self.cursors)
            else:
                self.resultCursors = mpi.bcast()

            if mpi.rank != 0:
                for i in range(len(self.cursorObjects)):
                    if len(self.resultCursors) > i:
                        # access: cursorObjects[i][localcopies]
                        for j in range(len(self.cursorObjects[i])
                                       ):  # set for each local copy
                            self.cursorObjects[i][j].setPos(
                                self.resultCursors[i][0],
                                self.resultCursors[i][1])
Esempio n. 2
0
File: sp.py Progetto: ursk/sparco
 def iteration(self):
     mpi.bcast(self.phi)
     mpi.scatter(self.rootbufs.x, self.nodebufs.x)
     self.infer_coefficients()
     self.learn_basis()
     if self.t > 0 and self.t % self.update_coefficient_statistics_interval == 0:
         self.update_coefficient_statistics()
Esempio n. 3
0
    def syncAndApply(self):
        tmpQueue = None
        if mpi.rank == 0:
            self.dataLock.acquire()  # this lock probably isn't necessary

            try:
                tmpQueue = self.queue
                self.queue = []
            except:
                traceback.print_exc()
            finally:
                self.dataLock.release()

        if mpi.rank == 0 and len(tmpQueue) > 0:
            print "Bcasting:", tmpQueue
        #print "mpi barrier", mpi.rank
        #mpi.barrier()
        #print "bcast:", mpi.rank
        if mpi.rank == 0:
            resultQueue = mpi.bcast(tmpQueue)
        else:
            resultQueue = mpi.bcast()

        if len(resultQueue) > 0:
            print "Bcasting recv:", resultQueue, mpi.rank

        for panningObj in self.panningObjects:
            for result in resultQueue:
                x = int((result[0] - self.lastX) * self.width)
                y = int((result[1] - self.lastY) * self.height)
                print "panning, ", mpi.rank, ":", x, y
                panningObj.pan(x, y)
                self.lastX = result[0]
                self.lastY = result[1]
Esempio n. 4
0
    def parallelRunTest(self):
        if mpi.rank == 0:
            st = "Hello World!!" 
            recvSt = mpi.bcast(st)
            if recvSt != "Hello World!!":
                self.fail("bcast test failed on short string")

            longStr = ""
            for i in range(2048):
              longStr += "Foo"
              longStr += str(i)

            recvSt = mpi.bcast(longStr)
            if recvSt != longStr:
              self.fail( "bcast test failed on long string")
        else:
            recvSt = mpi.bcast()
            if recvSt != "Hello World!!":
                self.fail("Received incorrect string in broadcast")

            longStr = ""
            for i in range(2048):
              longStr += "Foo"
              longStr += str(i)

            recvSt = mpi.bcast( )
            if recvSt != longStr:
                self.fail( "bcast test failed on long string")

        return
Esempio n. 5
0
    def syncAndApply(self):
        tmpQueue = None
        if mpi.rank == 0:
            self.dataLock.acquire() # this lock probably isn't necessary

            try:
                tmpQueue = self.queue
                self.queue = []
            except:
                traceback.print_exc()
            finally:
                self.dataLock.release()
 
        if mpi.rank == 0 and len(tmpQueue) > 0:
            print "Bcasting:", tmpQueue
        #print "mpi barrier", mpi.rank
        #mpi.barrier()
        #print "bcast:", mpi.rank
        if mpi.rank == 0:
            resultQueue = mpi.bcast(tmpQueue)
        else:
            resultQueue = mpi.bcast()

        if len(resultQueue) > 0:
            print "Bcasting recv:", resultQueue, mpi.rank

        for panningObj in self.panningObjects:
            for result in resultQueue:
                x = int((result[0] - self.lastX) * self.width)
                y = int((result[1] - self.lastY) * self.height)
                print "panning, ", mpi.rank, ":", x, y
                panningObj.pan( x, y)
                self.lastX = result[0]
                self.lastY = result[1]
Esempio n. 6
0
    def do_M_step(self, ess_list):
        # CPU 0
        if mpi.rank==0:
            mus=zeros((self.node_size,1), 'd')
            kappas=zeros(self.node_size, 'd')
            ess_r, ess_w=ess_list.pop()
            for i in range(0, len(ess_list)):
                r, w=ess_list[i]
                ess_r+=r
                ess_w+=w
            for i in range(0, self.node_size):
                r=ess_r[i]
                w=ess_w[i]
                norm_r=norm(r)

                print "do_M_step (%i): r=%s, w=%s, norm_r=%s" % (i,r,w,norm_r)

                if w == 0:
                    print "WARNING: There is no data to estimate mu and kappa from"
                    kappas[i]=self.kappas[i]
                    mus[i]=self.mus[i]
                else:
                    mu_coords=r/norm_r
                    kappas[i]=estimate_kappa(w,r,mu_coords)
                    mus[i]=to_radian(mu_coords)

            vm_list, samplers=self._make_vm_list(mus, kappas, self.node_size)
            self.mus, self.kappas, self.vm_list, self.samplers=\
                    mpi.bcast((mus, kappas, vm_list, samplers))
        # Other CPUs
        else:
            self.mus, self.kappas, self.vm_list, self.samplers=mpi.bcast()
        self.ess_list=[self._get_empty_ess()]
Esempio n. 7
0
File: sp.py Progetto: ursk/sparco
 def iteration(self):
   mpi.bcast(self.phi)
   mpi.scatter(self.rootbufs.x, self.nodebufs.x)
   self.infer_coefficients()
   self.learn_basis()
   if self.t > 0 and self.t % self.update_coefficient_statistics_interval == 0:
     self.update_coefficient_statistics()
Esempio n. 8
0
    def parallelRunTest(self):
        if mpi.rank == 0:
            st = "Hello World!!"
            recvSt = mpi.bcast(st)
            if recvSt != "Hello World!!":
                self.fail("bcast test failed on short string")

            longStr = ""
            for i in range(2048):
                longStr += "Foo"
                longStr += str(i)

            recvSt = mpi.bcast(longStr)
            if recvSt != longStr:
                self.fail("bcast test failed on long string")
        else:
            recvSt = mpi.bcast()
            if recvSt != "Hello World!!":
                self.fail("Received incorrect string in broadcast")

            longStr = ""
            for i in range(2048):
                longStr += "Foo"
                longStr += str(i)

            recvSt = mpi.bcast()
            if recvSt != longStr:
                self.fail("bcast test failed on long string")

        return
Esempio n. 9
0
 def raw_input(self, prompt=""):
     """
     On the master node:
        1). Write a prompt and read a line.
        2). Broadcast the Line or Terminate
        3). Broadcast the Line
            a).  First, get the length of the line and broadcast it
            b).  Broadcast the line data itself
        4). Terminate
            a).  If the user types the control sequence to exit
                   EOFError is raised.
            b).  Catch EOFError and broadcast length, this time broadcasting negative
            c).  All nodes check for length < 0 and raise an EOFError.
     """
     if( self.rank == 0 ):
         try:
             data = raw_input(prompt)
         except EOFError:
             length = mpi.bcast( -1, 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD )
             self.write("\n")
             raise
         length = mpi.bcast( len(data), 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD )
         data = mpi.bcast(data,length[0], mpi.MPI_CHAR, 0, mpi.MPI_COMM_WORLD )
     else:
         length = mpi.bcast( 0, 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD )
         if (length[0] < 0):
             raise EOFError
         data = mpi.bcast("",length[0], mpi.MPI_CHAR, 0, mpi.MPI_COMM_WORLD )
     s = ""
     for e in data:
         s += "".join(e)
     return s
Esempio n. 10
0
 def _globalValue(self, i, method):
     dom = self.domainForID(i)
     result = None
     if rank == dom:
         j = self.globalIDs[i]
         result = method(j)
     mpi.bcast(result, dom)
     return result
Esempio n. 11
0
 def wrapped_method(self, *args, **kwargs):
     mpi.bcast((expression, args, kwargs))
     # local execution
     ret = method(self, *args, **kwargs)
     # get return values from all cpus
     ret = mpi.gather(ret)
     if not init:
         # filter object creation which must not return
         return ret
Esempio n. 12
0
def meshScales(xnodes, xmin, xmax):
    nx = len(xnodes)
    xsort = list(xnodes)
    xsort.sort()
    dx = ([(0.5 * (xsort[i + 1] + xsort[i]) - 0.5 * (xsort[i] + xsort[i - 1]))
           for i in xrange(1, nx - 1)] + [0.5 * (xsort[0] + xsort[1]) - xmin] +
          [xmax - 0.5 * (xsort[-2] + xsort[-1])])
    dxmin = mpi.bcast(0.5 * min(dx))
    dxmax = mpi.bcast(0.5 * max(dx))
    return dxmin, dxmax
Esempio n. 13
0
def main( local_comm ):
    name = "test"
    local_rank = mpi.comm_rank( local_comm )
    local_size = mpi.comm_size( local_comm )
    
    print "%s (%s,%s): creating root communicator!"%(name,local_rank,local_size)
    sys.stdout.flush()
    if local_rank == 0:
        tmp_comm = mpi.comm_split( mpi.MPI_COMM_WORLD, 5, 0 )
        print "%s (%s,%s): joined root communicator %s"%(name,local_rank,local_size,tmp_comm)
        sys.stdout.flush()
        ncomponents = mpi.comm_size( tmp_comm )
    else:
        tmp_comm = mpi.comm_split( mpi.MPI_COMM_WORLD, 6, 0 )
        print "%s (%s,%s): Joined non-root communicator %s"%(name,local_rank,local_size,tmp_comm)
        sys.stdout.flush()
        ncomponents = 0
    print "%s (%s,%s): Distributing root communicator!"%(name,local_rank,local_size)
    ncomponents = mpi.bcast( ncomponents, 1, mpi.MPI_INT, 0, local_comm )
    ncomponents = ncomponents[0]
    print "%s (%s,%s): Distributed root communicator!"%(name,local_rank,local_size)
    # Get total number of components and distribute to every node:
    # ncomponents = mpi.allreduce( ncomponents, 1, mpi.MPI_INT, mpi.MPI_SUM, root_comm )
    #print "%s (%s,%s): Root Comm = %s"%(name,local_rank,local_size,root_comm)
    #ncomponents = mpi.comm_size( root_comm )
    print "%s(%s,%s): ncomponents = %s"%(name, local_rank, local_size, ncomponents )
Esempio n. 14
0
def randomDistribute3d(thisDomain,     # domain ID to be calculated
                       nDomains,       # total number of domains
                       nNodesGlobal,   # global number of nodes in this nodelist
                       xyzRangeTotal): # total simulation volume

    assert thisDomain >= 0 and thisDomain < nDomains
    assert nDomains > 0

    import random
    g = random.Random()
    globalNodeIDs = []
    nodePositions = []
    for globalNodeID in xrange(0, nNodesGlobal):
        mpi.barrier()
        domain0 = g.randint(0, nDomains - 1)
        domain = mpi.bcast(domain0)
        if domain == thisDomain:
            globalNodeIDs.append(globalNodeID)
            nodePositions.append(Vector3d(g.uniform(xyzRangeTotal[0][0],
                                                    xyzRangeTotal[1][0]),
                                          g.uniform(xyzRangeTotal[0][1],
                                                    xyzRangeTotal[1][1]),
                                          g.uniform(xyzRangeTotal[0][2],
                                                    xyzRangeTotal[1][2])))

    assert len(globalNodeIDs) == len(nodePositions)
    assert mpi.allreduce(len(globalNodeIDs), mpi.SUM) == nNodesGlobal
    return globalNodeIDs, nodePositions
def randomDistribute(
    thisDomain,  # domain ID to be calculated
    nDomains,  # total number of domains
    nNodesGlobal,  # global number of nodes in this nodelist
    xRangeTotal):  # total simulation volume

    assert thisDomain >= 0 and thisDomain < nDomains
    assert nDomains > 0

    import random
    g = random.Random()
    globalNodeIDs = []
    xNodePositions = []
    dxNodes = (xRangeTotal[1] - xRangeTotal[0]) / nNodesGlobal
    for globalNodeID in xrange(0, nNodesGlobal):
        mpi.barrier()
        domain0 = g.randint(0, nDomains - 1)
        domain = mpi.bcast(domain0)
        if domain == thisDomain:
            globalNodeIDs.append(globalNodeID)
            xNodePositions.append(xRangeTotal[0] +
                                  (globalNodeID + 0.5) * dxNodes)

    assert len(globalNodeIDs) == len(xNodePositions)
    assert mpi.allreduce(len(globalNodeIDs), mpi.SUM) == nNodesGlobal
    return globalNodeIDs, xNodePositions
Esempio n. 16
0
    def checkHashConsistency(myHashes, sharedIDs, allowEmptyComm):
        msg = "ok"
        myHashSet = set(myHashes)
        for sendProc in xrange(numDomains):
            theirHashSet = mpi.bcast(myHashSet, root=sendProc)
            if sendProc != mpi.rank:
                commonHashes = myHashSet.intersection(theirHashSet)
                if len(commonHashes) > 0 and (not sendProc in neighborDomains):
                    msg = "Missed a neighbor domain : %i %i : %i" % (
                        mpi.rank, sendProc, len(commonHashes))

                elif len(commonHashes) == 0 and (
                        sendProc in neighborDomains) and not allowEmptyComm:
                    msg = "Erroneously communicating between domains : %i %i" % (
                        mpi.rank, sendProc)

                elif len(commonHashes) > 0:
                    k = neighborDomains.index(sendProc)
                    if len(commonHashes) != len(sharedIDs[k]):
                        msg = "Size of shared elements does not match: %i %i : %i %i" % (
                            mpi.rank, sendProc, len(commonHashes),
                            len(sharedIDs[k]))
                    else:
                        sharedHashes = set([myHashes[i] for i in sharedIDs[k]])
                        if sharedHashes != commonHashes:
                            msg = ("Set of common hashes does not match: " +
                                   str(sharedHashes) + " != " +
                                   str(commonHashes))
            msg = allReduceMsg(msg)
            if msg != "ok":
                return msg
        return msg
Esempio n. 17
0
 def set_cpd(self, new_cpd=None):
     if mpi.rank==0:
         assert(not new_cpd is None)
         cpd=normalize_cpd(new_cpd)
         # Get rid of very small values that lead to log overflow
         cpd=clip(cpd, _MIN_TRANSITION, 1000) 
         # Calculate log(cpd) for speed reasons
         log_cpd=log(cpd)
         cum_cpd=cumsum(cpd, -1)
         # Avoid bisect errors
         cum_cpd[...,-1]=2.0
         self.cpd, self.log_cpd, self.cum_cpd=mpi.bcast((cpd, log_cpd, cum_cpd))
         if __debug__:
             self._test_cpd(self.cpd, self.cpd_shape)
     else:
         self.cpd, self.log_cpd, self.cum_cpd=mpi.bcast()
Esempio n. 18
0
 def __init__(self, skip_checks=True):
     import mpi
     if mpi.rank == 0:
         finder.__init__(self, skip_checks)
     else:
         finder.__init__(self, skip_checks, False)
     self._syspath, self._cache = mpi.bcast((self._syspath, self._cache))
Esempio n. 19
0
 def __init__(self,skip_checks=True):
     import mpi
     if mpi.rank == 0:
         finder.__init__(self,skip_checks)
     else:
         finder.__init__(self,skip_checks,False)
     self._syspath,self._cache = mpi.bcast((self._syspath,self._cache))
Esempio n. 20
0
 def set_cpd(self, new_cpd=None):
     if mpi.rank == 0:
         assert (not new_cpd is None)
         cpd = normalize_cpd(new_cpd)
         # Get rid of very small values that lead to log overflow
         cpd = clip(cpd, _MIN_TRANSITION, 1000)
         # Calculate log(cpd) for speed reasons
         log_cpd = log(cpd)
         cum_cpd = cumsum(cpd, -1)
         # Avoid bisect errors
         cum_cpd[..., -1] = 2.0
         self.cpd, self.log_cpd, self.cum_cpd = mpi.bcast(
             (cpd, log_cpd, cum_cpd))
         if __debug__:
             self._test_cpd(self.cpd, self.cpd_shape)
     else:
         self.cpd, self.log_cpd, self.cum_cpd = mpi.bcast()
Esempio n. 21
0
def randomString():
    l = range(20)
    random.shuffle(l)
    result = ""
    for x in l:
        result += str(x)
    result = mpi.bcast(result, 0)
    return result
Esempio n. 22
0
 def allReduceMsg(msg):
     if msg == "ok":
         badProc = numDomains
     else:
         badProc = rank
     badProc = mpi.allreduce(badProc, mpi.MIN)
     if badProc != numDomains:
         msg = mpi.bcast(msg, root=badProc)
     return msg
Esempio n. 23
0
    def __init__(self, 
                 fileName,               # Name of the file
                 meshName,               # Name of mesh variable in file
                 rho0,                   # Initial mass density
                 serialFile = True,      # Should this be treated as a serial file or broken up in parallel?
                 nNodePerh = 2.01,       # number of nodes per smoothing scale
                 SPH = False,            # Force round H tensors
                 scale = 1.0):           # Optionally scale the coordinates by some factor

        self.x, self.y, self.z, self.m, self.H = [], [], [], [], []
        self.rho0 = rho0
        if rank == 0 or (not serialFile):

            # Read the file to a set of positions, volumes, and H's.
            pos = vector_of_Vector3d()
            vol = vector_of_double()
            H = vector_of_SymTensor3d()
            readSiloPolyMesh(fileName, meshName, pos, vol, H)
            print "Read %i points from %s." % (len(pos), fileName)
            assert len(pos) == len(vol) == len(H)

            self.x = [scale * x.x for x in pos]
            self.y = [scale * x.y for x in pos]
            self.z = [scale * x.z for x in pos]
            self.m = [scale**3 * x*rho0 for x in vol]
            self.H = [x/scale for x in H]

        self.x = mpi.bcast(self.x, root=0)
        self.y = mpi.bcast(self.y, root=0)
        self.z = mpi.bcast(self.z, root=0)
        self.m = mpi.bcast(self.m, root=0)
        self.H = mpi.bcast(self.H, root=0)

        # Initialize the base class, which will break up the serial node distribution
        # for parallel cases if required.
        NodeGeneratorBase.__init__(self, serialFile,
                                   self.x, self.y, self.z, self.m, self.H)

        # If SPH has been specified, make sure the H tensors are round.
        if SPH:
            self.makeHround()

        return
Esempio n. 24
0
def gatherarray(a, root=0, othersempty=0, bcast=0):
    if not lparallel: return a
    # --- First check if input can be converted to an array
    isinputok = 1
    try:
        if type(a) in [type(0.), type(0)]:
            a = array([a])
        else:
            a = array(a)
    except:
        isinputok = 0
    # --- Make sure the input is ok on all of the processors
    isinputok = globalmin(isinputok)
    # --- If any returned an error, then all exit (to avoid a deadlock)
    if not isinputok:
        print "Object could not be converted to an array"
        return None
    # --- Now, actually gather the array.
    # --- The check of whether the result is ok may not be needed.
    try:
        result = gather(a, root)
        isinputok = 1
    except:
        isinputok = 0
    # --- Make sure again that the input is ok on all of the processors
    isinputok = globalmin(isinputok)
    if not isinputok:
        print "Error in gather object"
        if type(a) == ArrayType: print "Object has shape ", shape(a)
        return None
    # --- All processors but root simply return either the input argument
    # --- or an empty array unless the result is to be broadcast
    if me != root and not bcast:
        if othersempty: return zeros(len(shape(a)) * [0], a.typecode())
        else: return a
    # --- Root processor reshapes the data, removing the first dimension
    # --- Do it bit by bit since the data passed by the other processors may
    # --- not be all the same size.
    if me == root:
        newlen = 0
        for i in range(npes):
            newlen = newlen + shape(result[i])[0]
        newshape = list(shape(result[0]))
        newshape[0] = newlen
        newresult = zeros(newshape, a.typecode())
        i1 = 0
        for i in range(npes):
            i2 = i1 + shape(result[i])[0]
            newresult[i1:i2, ...] = result[i]
            i1 = i2
    else:
        newresult = 0
    if bcast: newresult = mpi.bcast(newresult, root)
    return newresult
Esempio n. 25
0
    def restoreState(self, cacheFileName):
        def readNodeData(f, iproc):
            pos = f.readObject("proc%06i/pos" % iproc)
            m = f.readObject("proc%06i/m" % iproc)
            H = f.readObject("proc%06i/H" % iproc)
            vol = f.readObject("proc%06i/vol" % iproc)
            surface = f.readObject("proc%06i/surface" % iproc)
            return pos, m, H, vol, surface

        if cacheFileName is None:
            return False

        if os.path.splitext(cacheFileName) != ".silo":
            cacheFileName += ".silo"
        result = False
        if mpi.rank == 0:
            result = (cacheFileName and os.path.exists(cacheFileName))
        result = mpi.bcast(result, root=0)
        if result:
            print "Restoring MedialGenerator state from %s" % cacheFileName
            if mpi.rank == 0:
                f = SiloFileIO(cacheFileName, Read)
                numGeneratingProcs = f.readObject("numGeneratingProcs")

                # Decide how to divide the generating domains between our current processes.
                n0 = numGeneratingProcs / mpi.procs
                remainder = numGeneratingProcs % mpi.procs
                for iproc in xrange(mpi.procs):
                    if iproc >= numGeneratingProcs:
                        imin, imax = 0, 0
                    else:
                        imin = iproc * n0 + min(iproc, remainder)
                        imax = imin + n0
                        if iproc < remainder:
                            imax += 1
                    pos, m, H, vol, surface = [], [], [], [], []
                    for igenproc in xrange(imin, imax):
                        posi, mi, Hi, voli, surfacei = readNodeData(
                            f, igenproc)
                        pos += posi
                        m += mi
                        H += Hi
                        vol += voli
                        surface += surfacei
                    if iproc == 0:
                        self.pos, self.m, self.H, self.vol, self.surface = pos, m, H, vol, surface
                    else:
                        mpi.send((pos, m, H, vol, surface), dest=iproc)
                f.close()
            else:
                self.pos, self.m, self.H, self.vol, self.surface = mpi.recv(
                    source=0)[0]
        return result
Esempio n. 26
0
    def setUp(self):
        global itest
        self.testext = "Random%s_%idomain" % (randomString(), mpi.procs)
        eos = GammaLawGasMKS(5.0/3.0, 1.0)
        self.nodes = makeFluidNodeList("test nodes %i" % itest, eos,
                                       numInternal = nperdomain,
                                       nPerh = 2.01,
                                       hmin = 1.0e-5,
                                       hmax = 0.3)
        itest += 1
        self.pos = self.nodes.positions()
        self.H = self.nodes.Hfield()

        # Figure out the domain bounding volumes.
        dxproc = (x1 - x0)/nxproc
        dyproc = (y1 - y0)/nxproc
        ixproc = rank % nxproc
        iyproc = rank / nxproc
        xminproc = Vector(x0 + ixproc*dxproc, y0 + iyproc*dyproc)
        xmaxproc = Vector(x0 + (ixproc + 1)*dxproc, y0 + (iyproc + 1)*dyproc)

        # Randomly seed the generators.  We choose from random cells in order
        # to keep nodes from getting too close together.
        xynodes_all = []
        occupiedCells = set()
        for k in xrange(n):
            i = rangen.randint(0, ncell)
            while i in occupiedCells:
                i = rangen.randint(0, ncell)
            ix = i % nxcell
            iy = i / nxcell
            xynodes_all.append(Vector((ix + 0.5)*dxcell, (iy + 0.5)*dycell))
            occupiedCells.add(i)
        assert len(occupiedCells) == n
        xynodes_all = mpi.bcast(xynodes_all)
        xynodes = [v for v in xynodes_all if testPointInBox(v, xminproc, xmaxproc)]
        dxavg = (x1 - x0)/nx
        dyavg = (y1 - y0)/ny
        self.dxmin = dxavg
        assert mpi.allreduce(len(xynodes), mpi.SUM) == n

        # Now we can set the node conditions.
        self.nodes.numInternalNodes = len(xynodes)
        for i in xrange(len(xynodes)):
            self.pos[i] = xynodes[i]
            self.H[i] = SymTensor(1.0/(2.0*dxavg), 0.0,
                                  0.0, 1.0/(2.0*dyavg))
        self.nodes.neighbor().updateNodes()

        # Fix up the H's.
        iterateThoseHs(self.nodes)
        return
Esempio n. 27
0
    def setUp(self):
        nxperdomain = nx / numDomains
        eos = GammaLawGasMKS(5.0 / 3.0, 1.0)
        self.nodes = makeFluidNodeList("test nodes",
                                       eos,
                                       numInternal=nxperdomain,
                                       nPerh=2.01)
        pos = self.nodes.positions()
        H = self.nodes.Hfield()

        # Generate initial positions, and split them up between domains appropriately.
        gap = 0.9
        dxavg = (x1 - x0 - gap) / nx
        xnodes = [x0 + (i + 0.5) * dxavg for i in xrange(nx / 2)] + [
            0.5 * (x0 + x1 + gap) + (i + 0.5) * dxavg for i in xrange(nx / 2)
        ]
        xnodes.sort()
        self.dxmin, self.dxmax = meshScales(xnodes, x0, x1)
        for proc in xrange(numDomains):
            xnodes[proc * nxperdomain:(proc + 1) * nxperdomain] = mpi.bcast(
                xnodes[proc * nxperdomain:(proc + 1) * nxperdomain])
        xnodes = xnodes[rank * nxperdomain:(rank + 1) * nxperdomain]
        assert len(xnodes) == nxperdomain
        assert mpi.allreduce(len(xnodes), mpi.SUM) == nx

        # We now have the positions for each domain appropriately divided, so shuffle
        # the local positions.
        #random.shuffle(xnodes)

        # Now we can set the node conditions.
        for i in xrange(nxperdomain):
            pos[i] = Vector(xnodes[i])
            H[i] = SymTensor(1.0 / (2.0 * dxavg))
        self.nodes.neighbor().updateNodes()

        # Iterate the H tensors to somthing reasonable.
        db = DataBase()
        db.appendNodeList(self.nodes)
        for bc in bclist:
            bc.setAllGhostNodes(db)
        for bc in bclist:
            bc.finalizeGhostBoundary()
        db = DataBase()
        db.appendNodeList(self.nodes)
        vecbound = vector_of_Boundary()
        for bc in bclist:
            vecbound.append(bc)
        WT = TableKernel(BSplineKernel(), 1000)
        smooth = SPHSmoothingScale()
        iterateIdealH(db, vecbound, WT, smooth)
        return
Esempio n. 28
0
def __import_module__(partname, fqname, parent):
    try:
        return sys.modules[fqname]
    except KeyError:
        pass
    fp = None         # module's file
    pathname = None   # module's location
    stuff = None      # tuple of (suffix,mode,type) for the module
    ierror = False    # are we propagating an import error from rank 0?

    # Start with the lookup on rank 0. The other processes will be waiting
    # on a broadcast, so we need to send one even if we're bailing out due
    # to an import error.
    if mpi.rank == 0:
        try:
            fp, pathname, stuff = imp.find_module(partname,
                                                  parent and parent.__path__)
        except ImportError:
            ierror = True
            return None
        finally:
            pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
    else:
        pathname,stuff,ierror = mpi.bcast((pathname,stuff,ierror))
        if ierror:
            return None
        # If imp.find_module returned an open file to rank 0, then we should
        # open the corresponding file for this process too.
        if stuff and stuff[1]:
            fp = open(pathname,stuff[1])

    try:
        m = imp.load_module(fqname, fp, pathname, stuff)
    finally:
        if fp: fp.close()
    if parent:
        setattr(parent, partname, m)
    return m
Esempio n. 29
0
def __import_module__(partname, fqname, parent):
    try:
        return sys.modules[fqname]
    except KeyError:
        pass
    fp = None  # module's file
    pathname = None  # module's location
    stuff = None  # tuple of (suffix,mode,type) for the module
    ierror = False  # are we propagating an import error from rank 0?

    # Start with the lookup on rank 0. The other processes will be waiting
    # on a broadcast, so we need to send one even if we're bailing out due
    # to an import error.
    if mpi.rank == 0:
        try:
            fp, pathname, stuff = imp.find_module(partname, parent
                                                  and parent.__path__)
        except ImportError:
            ierror = True
            return None
        finally:
            pathname, stuff, ierror = mpi.bcast((pathname, stuff, ierror))
    else:
        pathname, stuff, ierror = mpi.bcast((pathname, stuff, ierror))
        if ierror:
            return None
        # If imp.find_module returned an open file to rank 0, then we should
        # open the corresponding file for this process too.
        if stuff and stuff[1]:
            fp = open(pathname, stuff[1])

    try:
        m = imp.load_module(fqname, fp, pathname, stuff)
    finally:
        if fp: fp.close()
    if parent:
        setattr(parent, partname, m)
    return m
Esempio n. 30
0
    def integrate(self, rectangles, function):
        # equivalent to mpi.WORLD.bcast(n,0) or rather a
        # C call to MPI_Bcast(MPI_COMM_WORLD,n,0,&status)
        n = mpi.bcast(rectangles)

        h = 1.0/n
        sum = 0.0
        for i in range(mpi.rank+1,n+1,mpi.procs):
            x = h * (i-0.5)
            sum = sum + function(x)

        myAnswer = h * sum
        answer = mpi.allreduce(myAnswer,mpi.SUM)
        return answer
Esempio n. 31
0
    def integrate(self, rectangles, function):
        # equivalent to mpi.WORLD.bcast(n,0) or rather a
        # C call to MPI_Bcast(MPI_COMM_WORLD,n,0,&status)
        n = mpi.bcast(rectangles)

        h = 1.0 / n
        sum = 0.0
        for i in range(mpi.rank + 1, n + 1, mpi.procs):
            x = h * (i - 0.5)
            sum = sum + function(x)

        myAnswer = h * sum
        answer = mpi.allreduce(myAnswer, mpi.SUM)
        return answer
Esempio n. 32
0
    def fromLabel(
        cls,
        fileName,  # Name of Abaqus file.
        materialLabel,  # Name of the material we're generating from in the file
        elsetLabel,  # Name of the element set we're generating from in the file
        serialFile=True,  # Should this be treated as a serial file or broken up in parallel?
        nNodePerh=2.01,  # number of nodes per smoothing scale
        SPH=False,  # Force round H tensors
        scale=1.0):  # Optionally scale the coordinates by some factor
        """Construct an Abaqus NodeGenerator for a single element set."""
        lines = []
        vertices = {}
        if rank == 0 or (not serialFile):
            f = open(fileName, "r")
            lines = f.readlines()
            f.close()

            # Find and read the vertex coordinates.
            iline = 0
            while (iline < len(lines) and lines[iline][:5] != "*NODE"):
                iline += 1
            if iline == len(lines):
                raise RuntimeError, "Unable to find *NODE specification in %s" % fileName
            iline += 1
            while lines[iline][0] != "*":
                stuff = lines[iline].split(",")
                assert len(stuff) == 4
                i = int(stuff[0])
                xi, yi, zi = (float(stuff[j]) for j in xrange(1, 4))
                vertices[i] = scale * Vector(xi, yi, zi)
                iline += 1
            print "AbaqusNodeGenerator : Read %i vertices from file %s" % (
                len(vertices), fileName)
        lines = mpi.bcast(lines, root=0)
        vertices = mpi.bcast(vertices, root=0)
        return cls(lines, vertices, materialLabel, elsetLabel, serialFile,
                   nNodePerh, SPH, scale)
Esempio n. 33
0
 def checkConsistentCommInfo(myHashes, sharedIDs):
     msg = "ok"
     positions = [
         quantizedPosition(hashi, xmin, xmax) for hashi in myHashes
     ]
     for sendProc in xrange(numDomains):
         numChecks = mpi.bcast(len(neighborDomains), root=sendProc)
         assert mpi.allreduce(numChecks,
                              mpi.MIN) == mpi.allreduce(numChecks, mpi.MAX)
         for k in xrange(numChecks):
             if rank == sendProc:
                 ksafe = k
             else:
                 ksafe = 0
             recvProc = mpi.bcast(neighborDomains[ksafe], root=sendProc)
             recvHashes = mpi.bcast([myHashes[i] for i in sharedIDs[ksafe]],
                                    root=sendProc)
             recvPos = mpi.bcast([positions[i] for i in sharedIDs[ksafe]],
                                 root=sendProc)
             if rank == recvProc:
                 assert sendProc in neighborDomains
                 kk = neighborDomains.index(sendProc)
                 assert kk < len(sharedIDs)
                 if not ([myHashes[i]
                          for i in sharedIDs[kk]] == recvHashes):
                     msg = (
                         "Shared indices don't match %i %i\n   %s != %s\n    %s\n    %s"
                         % (rank, sendProc,
                            str([myHashes[i]
                                 for i in sharedIDs[kk]]), recvHashes, [
                                     str(positions[i])
                                     for i in sharedIDs[kk]
                                 ], [str(xi) for xi in recvPos]))
             msg = allReduceMsg(msg)
             if msg != "ok":
                 return msg
     return msg
Esempio n. 34
0
    def parallelRunTest(self):
        # -----------------------------------------------
        # See if we can broadcast a python object
        # -----------------------------------------------
        original = [1,2,3]
        if mpi.rank == 0:
            result = mpi.bcast(original)
        else:
            result = mpi.bcast(None)
        if original != result:
            self.fail('Bcast of pickled list fails')

        # -----------------------------------------------
        # See if we can broadcast an advanced python object
        # -----------------------------------------------
        if mpi.rank == 0:
            val = something(33)
        else:
            val = None
        received = mpi.bcast(val)
        if received.x != 33:
            self.fail('Bcast fails with advanced object')

        return
Esempio n. 35
0
        def syncAndApply(self):
            if mpi.rank == 0:
                self.dataLock.acquire() # this lock probably isn't necessary yet

                try:
                    for i in range(len(self.cursorObjects)):
                        self.cursors[i] = self.cursorObjects[i][0].getPos()

                except:
                    traceback.print_exc()
                finally:
                    self.dataLock.release()
 
            if mpi.rank == 0:
                resultCursors = mpi.bcast(self.cursors)
            else:
                self.resultCursors = mpi.bcast()

            if mpi.rank != 0:
                for i in range(len(self.cursorObjects)):
                    if len(self.resultCursors) > i:
                        # access: cursorObjects[i][localcopies]
                        for j in range(len(self.cursorObjects[i])): # set for each local copy
                            self.cursorObjects[i][j].setPos( self.resultCursors[i][0], self.resultCursors[i][1])
Esempio n. 36
0
    def parallelRunTest(self):
        # -----------------------------------------------
        # See if we can broadcast a python object
        # -----------------------------------------------
        original = [1, 2, 3]
        if mpi.rank == 0:
            result = mpi.bcast(original)
        else:
            result = mpi.bcast(None)
        if original != result:
            self.fail('Bcast of pickled list fails')

        # -----------------------------------------------
        # See if we can broadcast an advanced python object
        # -----------------------------------------------
        if mpi.rank == 0:
            val = something(33)
        else:
            val = None
        received = mpi.bcast(val)
        if received.x != 33:
            self.fail('Bcast fails with advanced object')

        return
Esempio n. 37
0
    def parallelRunTest(self):
        myList = [ 0,0,0,0,0,0 ]

        myList[0] = 42
        myList[1] = "Hello"
        myList[2] = ["Another list", 2]
        myList[3] = ("A tuple", 2)
        myList[4] = 4+5j
        myList[5] = 3.14159

        for x in range(6):
            mpi.barrier()
            z = mpi.bcast( myList[x],0 )
            if z != myList[x]:
                self.fail( "Broadcast test 2 failed on test " + str(x))

        return
Esempio n. 38
0
    def parallelRunTest(self):
        myList = [0, 0, 0, 0, 0, 0]

        myList[0] = 42
        myList[1] = "Hello"
        myList[2] = ["Another list", 2]
        myList[3] = ("A tuple", 2)
        myList[4] = 4 + 5j
        myList[5] = 3.14159

        for x in range(6):
            mpi.barrier()
            z = mpi.bcast(myList[x], 0)
            if z != myList[x]:
                self.fail("Broadcast test 2 failed on test " + str(x))

        return
Esempio n. 39
0
def VFSurfaceGenerator(filename,
                       rho,
                       nx,
                       nNodePerh=2.01,
                       SPH=False,
                       scaleFactor=1.0,
                       refineFactor=0,
                       rejecter=None):
    surface = None
    if mpi.rank == 0:
        surface = readPolyhedronOBJ(filename)
        if refineFactor != 0:
            surface = refinePolyhedron(surface, refineFactor)
        if scaleFactor != 1.0:
            surface *= scaleFactor
    surface = mpi.bcast(surface, 0)
    return PolyhedralSurfaceGenerator(surface, rho, nx, nNodePerh, SPH,
                                      rejecter)
Esempio n. 40
0
    def __init__(self,
                 serialInitialization,
                 *vars):

        if serialInitialization:
            ntot = len(vars[0])
            minGlobalID, maxGlobalID = self.globalIDRange(ntot)
            self.globalIDs = range(minGlobalID, maxGlobalID)
            self._cullVars(minGlobalID, maxGlobalID, *vars)

        else:
            ntot = 0
            for proc in xrange(mpi.procs):
                if mpi.rank == proc:
                    self.globalIDs = range(ntot, ntot + len(vars[0]))
                ntot += mpi.bcast(len(vars[0]), proc)

        return
Esempio n. 41
0
def plotPolygonalMesh(mesh, persist=False):
    polylocal = []
    for izone in xrange(mesh.numZones):
        zone = mesh.zone(izone)
        polylocal.append([mesh.node(i).position() for i in zone.nodeIDs])
        polylocal[-1].append(polylocal[-1][0])
    assert len(polylocal) == mesh.numZones

    p = generateNewGnuPlot(persist)
    for sendProc in xrange(mpi.procs):
        polys = mpi.bcast(polylocal, root=sendProc)
        for poly in polys:
            p.replot(
                Gnuplot.Data([x.x for x in poly], [x.y for x in poly],
                             with_="lines lt %i lw 2" % 1,
                             title=None,
                             inline=True))
    return p
Esempio n. 42
0
def readField2String(materialName, fieldName, file):
    delimiter = "$"
    result = ""
    if mpi.rank == 0:
        found = False
        for line in file:
            vals = line.split(delimiter)
            if vals[0][0] != "#":
                if (vals[0] == materialName and vals[1] == fieldName):
                    n = int(vals[2])
                    result = vals[3:-1]
                    assert len(result) == n
                    found = True
                    break
        if not found:
            raise ValueError, "Unable to find %s %s" % (materialName,
                                                        fieldName)

    result = mpi.bcast(result, 0)
    return result
Esempio n. 43
0
    def __call__(self, x, y, z, m, H):
        n = len(x)
        assert len(y) == n
        assert len(z) == n
        assert len(m) == n
        assert len(H) == n

        # We'll take advantage of any available parallelism to split
        # up the containment testing.  The following algorithm is borrowed
        # from NodeGeneratorBase to divvy up the ID range.
        ndomain0 = n / mpi.procs
        remainder = n % mpi.procs
        assert remainder < mpi.procs
        ndomain = ndomain0
        if mpi.rank < remainder:
            ndomain += 1
        imin = mpi.rank * ndomain0 + min(mpi.rank, remainder)
        imax = imin + ndomain

        # Check our local range of IDs.
        xloc, yloc, zloc, mloc, Hloc = [], [], [], [], []
        localIndices = [
            i for i in xrange(imin, imax) if self.accept(x[i], y[i], z[i])
        ]

        # Now cull to the interior values.
        xnew, ynew, znew, mnew, Hnew = [], [], [], [], []
        for iproc in xrange(mpi.procs):
            otherIndices = mpi.bcast(localIndices, iproc)
            for i in otherIndices:
                xnew.append(x[i])
                ynew.append(y[i])
                znew.append(z[i])
                mnew.append(m[i])
                Hnew.append(H[i])

        # That's it.
        return xnew, ynew, znew, mnew, Hnew
Esempio n. 44
0
 def testArrayDouble(self):
     gamma = mpi.bcast( [9.9,3.3,4.4],3,mpi.MPI_DOUBLE,0,mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([9.9,3.3,4.4],'d'), gamma )
Esempio n. 45
0
from os import system, getpid, environ
from subprocess import Popen

from nodefactory import createNode
from util import *

stop = False
node = None
goingdown = False

if size > 1:

	if rank == 0:
		stop = runningCheck()		
	stop = bcast( stop )
		

	if not stop:

		try:
			node = createNode('mpi')

		except:
			displayExcept()
			print 'fatal error creating node %d' % rank
			stop = True

		stop = bcast( stop )

	if not stop:
Esempio n. 46
0
File: sp.py Progetto: ursk/sparco
 def initialize_phi(self, *dims):
   self.phi = np.empty(basis_dims) if self.phi is None else self.phi
   mpi.bcast(self.phi)
Esempio n. 47
0
plot "pingpong.plt" using 1:2 title "pyMPI" with linespoints, "pingpong.plt" using 1:4 title "C" with linespoints
"""

# Randomly split into senders and receivers
if mpi.rank == 0:
     import random
     ranks = list(range(mpi.size))
     random.shuffle(ranks)
     half = mpi.size/2
     senders = ranks[:half]
     receivers = ranks[half:half*2]
     dead = ranks[half*2:]
     sendmap = dict(zip(senders,receivers))
     recvmap = dict(zip(receivers,senders))

     mpi.bcast(sendmap)
     mpi.bcast(recvmap)
     mpi.bcast(dead)
else:
     sendmap = mpi.bcast()
     recvmap = mpi.bcast()
     dead = mpi.bcast()


runs = 10
k = 25

# Log file
fd = open('pingpong_%d.plt'%mpi.size,'w')
fd.write('# %s\n'%asctime())
fd.write('# %d processors\n'%mpi.size)
Esempio n. 48
0
 def step(self, phi0, a, X):
   if mpi.rank == mpi.root:
     self.update(phi0, a, X)
   mpi.bcast(phi0, mpi.root)
Esempio n. 49
0
 def on_disconnect(self):
     logger.info('Client is disconnected: closing server')
     server.close()
     mpi.bcast(('return', None, None))
Esempio n. 50
0
def run_server(port, settings):
    server = None

    if mpi.size == 1:
        class PodService(rpyc.Service):
            def on_disconnect(self):
                logger.info('Client is disconnected: closing server')
                server.close()
            class exposed_Pod(Pod):
                pass

        logger.info('Creating poder on %s:%i', socket.gethostname(), port)
        server = ThreadedServer(PodService, port=port, auto_register = False,
                                protocol_config={'allow_public_attrs' : True})
        server.start()

    else:
        if mpi.myid == 0:
            # Master serves, it holds a PodMPI instance as opposed to the slaves.
            class PodService(rpyc.Service):
                def on_disconnect(self):
                    logger.info('Client is disconnected: closing server')
                    server.close()
                    mpi.bcast(('return', None, None))

                class exposed_Pod(PodMPI):
                    pass

            logger.info('Creating poder on %s:%i', socket.gethostname(), port)
            server = ThreadedServer(PodService, port=port,
                                    auto_register = False,
                                    protocol_config={'allow_public_attrs' : True})
            server.start()

        else:
            # Slaves work with instances of Pod.
            #
            # Slaves scenario:
            #   * on the first loop occurence, they create a Pod object named 'pod',
            #   * then on subsequent loop occurences, they call a pod method and bind its return value to the variable 'ret'.
            #
            # In the body of the loop, a python expression is dynamically executed. The expression is formed of a stub and function's arguments. The stub contains a function name with a variable assignment (like 'ret = pod.dump'). The positional and optionnal arguments are passed to the named function in the stub.
            #
            # One loop occurence goes as follows:
            #   * wait for an expression from master node,
            #   * execute the expression
            #   * then return the expression's left hand side (if any) to master.

            # local namespace scope for using exec,
            # it keeps track of the already created variables over multiple loop iterations
            scope = {}
            while True:
                (expression, args, kwargs) = mpi.bcast()
                # logger.info(expression)
                # logger.info(args)
                # logger.info(kwargs)
                if expression == 'return':
                    # TODO: how to tell exec it's in a function?
                    return
                scope.update(locals())
                exec expression in globals(), scope
                mpi.gather(scope.get('ret'))
Esempio n. 51
0
 def testSingleChar(self):
     sigma = mpi.bcast( 'g', 1, mpi.MPI_CHAR, 0, mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array(['g'],'c'), sigma )
Esempio n. 52
0
 def testNil1(self):
     nil1 = mpi.bcast( Numeric.zeros(0,Numeric.Int32), 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD )
     self.assertEqual( [0], nil1 )
Esempio n. 53
0
 def testSingleFloat(self):
     sigma = mpi.bcast( 7.7, 1, mpi.MPI_FLOAT, 0, mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([7.7],'f'), sigma)
Esempio n. 54
0
 def testSingleInt(self):
     sigma = mpi.bcast( 7, 1, mpi.MPI_INT, 0, mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([7],'i'), sigma )
Esempio n. 55
0
 def testSingleDouble(self):
     sigma = mpi.bcast( 7.7, 1, mpi.MPI_DOUBLE, 0, mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([7.7],'d'), sigma)
Esempio n. 56
0
 def testArrayChar(self):
     gamma = mpi.bcast( ['i','c','d'],3,mpi.MPI_CHAR,0,mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array(['i','c','d'],'c'), gamma )
Esempio n. 57
0
 def testArrayInt(self):
     gamma = mpi.bcast( [9,3,4],3,mpi.MPI_INT,0,mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([9,3,4],'i'), gamma )
Esempio n. 58
0
 def testArrayFloat(self):
     gamma = mpi.bcast( [9.9,3.3,4.4],3,mpi.MPI_FLOAT,0,mpi.MPI_COMM_WORLD )
     self.assertEqual( Numeric.array([9.9,3.3,4.4],'f'), gamma )
Esempio n. 59
0
def main():
	if sys.argv[-1].startswith("usefs="): sys.argv = sys.argv[:-1]	# remove the runpar fileserver info

	(options,args) =  parse_command_line()
	
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.appinit(sys.argv)

	inputParm = EMAN.ccmlInputParm()
	sf = EMAN.XYData()
	if options.sfFileName != "" :
		readsf = sf.readFile(options.sfFileName)
		if ((readsf == -1) and (options.verbose > 0)) :
			print "The file of scattering factor does NOT exist"
	inputParm.scateringFactor = sf

	startNumOfRawImages = options.startNumOfRawImages
	#endNumOfRawImages = options.endNumOfRawImages

	refImageFileName = args[-1]
	numOfRefImages = options.numOfRefImages
	solutionFile = options.solutionFile

	# write log info to .emanlog file so that eman program can browse the history
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): 
		pid = EMAN.LOGbegin(sys.argv)
		for f in args[0:-1]: EMAN.LOGInfile(pid,f)
		EMAN.LOGReffile(pid,args[-1])
		if options.solutionFile: EMAN.LOGOutfile(pid,options.solutionFile)
		if options.listFile: EMAN.LOGOutfile(pid,options.listFile)
		if options.mrcSolutionFile: EMAN.LOGOutfile(pid,options.mrcSolutionFile)

	inputParm.sym = options.sym
	inputParm.FFTOverSampleScale = options.FFTOverSampleScale
	inputParm.pftStepSize = options.pftStepSize
	inputParm.deltaR = options.deltaR
	inputParm.RMin = options.RMin
	inputParm.RMax = options.RMax
	inputParm.searchMode = options.searchMode
	inputParm.scalingMode = options.scalingMode
	inputParm.residualMode = options.residualMode
	inputParm.weightMode = options.weightMode
	# inputParm.rawImageFN will be set later
	inputParm.refImagesFN = refImageFileName
	inputParm.rawImageIniParmFN = options.rawImageIniParmFN
	inputParm.rawImagePhaseCorrected = options.phasecorrected

	inputParm.maxNumOfRun = options.maxNumOfRun
	inputParm.zScoreCriterion = options.zScoreCriterion
	inputParm.residualCriterion = options.residualCriterion
	inputParm.solutionCenterDiffCriterion = options.solutionCenterDiffCriterion
	inputParm.solutionOrientationDiffCriterion = options.solutionOrientationDiffCriterion/180.0*pi
	inputParm.maxNumOfIteration = options.maxNumOfIteration
	inputParm.numOfRandomJump = options.numOfRandomJump
	inputParm.numOfFastShrink = options.numOfFastShrink
	inputParm.numOfStartConfigurations = options.numOfStartConfigurations
	inputParm.orientationSearchRange = options.orientationSearchRange/180.0*pi
	inputParm.centerSearchRange = options.centerSearchRange

	inputParm.numOfRefImages = options.numOfRefImages
	inputParm.refEulerConvention = options.refEulerConvention
	#maskR = options.maskR
	#if (maskR<=0): maskR = refImageSizeY/2

	inputParm.verbose = options.verbose
	verbose = options.verbose
	#verboseSolution = options.verboseSolution

	updataHeader = options.updataHeader
	solutionFile = options.solutionFile
	mrcSolutionFile = options.mrcSolutionFile
	iniCenterOrientationMode = options.iniCenterOrientationMode
	refCenterOrientationMode = options.refCenterOrientationMode

	rawImages = []
	if not mpi or (mpi and mpi.rank==0):
		for imgfile in args[0:-1]:
			imgnum = EMAN.fileCount(imgfile)[0]
			for i in range(imgnum): rawImages.append((imgfile, i))
	if mpi: rawImages = mpi.bcast(rawImages)
	
	endNumOfRawImages = options.endNumOfRawImages
	if endNumOfRawImages <=0  or endNumOfRawImages > len(rawImages):
		endNumOfRawImages = len(rawImages)

	numRawImages = endNumOfRawImages - startNumOfRawImages

	if mpi:
		ptclset = range(startNumOfRawImages + mpi.rank, endNumOfRawImages, mpi.size)
	else:
		ptclset = range(startNumOfRawImages, endNumOfRawImages)
	
	solutions = []

	rMask = options.rMask        #mask size is given
	if options.rMask <= 0 : rMask = refImageSizeY/2   #mask size = half image size
	
	rMask1 = options.rMask1             #output tnf mask size is given
	if options.rMask1 <= 0 : rMask1 = rMask    #output tnf mask size = half image size

	inputParm.rMask = rMask
	inputParm.rMask1 = rMask1

	rawImage = EMAN.EMData()
	rawImage.getEuler().setSym(inputParm.sym) #set the symmetry of the raw partile
	inputParm.rawImageFN = rawImages[0][0] #give the initial raw particle filename
	print "start to prepare------"
	rawImage.crossCommonLineSearchPrepare(inputParm) #prepare, create pseudo PFT of ref images
	print "end to prepare------"
	inputParm.rawImage = rawImage
	#for rawImgSN in ptclset:
	for index in range(len(ptclset)):
		rawImgSN = ptclset[index]
		inputParm.rawImageFN = rawImages[rawImgSN][0]
		inputParm.thisRawImageSN = rawImages[rawImgSN][1]
		if mpi: print "rank %d: %d in %d-%d (%d in %d-%d)" % (mpi.rank, rawImgSN, startNumOfRawImages, endNumOfRawImages, index, 0, len(ptclset))
		#rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1])

		#rawImage.applyMask(rMask, 6) #apply mask type 6 [edge mean value] to raw image, center will be image center
		#rawImage.getEuler().setSym("icos")
		#if rawImage.hasCTF() == 1:
			#ctfParm = rawImage.getCTF()
			#inputParm.zScoreCriterion = options.zScoreCriterion + atan(abs(ctfParm[0])-1.5)/(pi/4) +0.59 #adjust zScore criterion -0.6 --> +1.2, 1.5, 2.0
			#inputParm.numOfRefImages = int(min(numOfRefImages, max(numOfRefImages*exp(-(abs(ctfParm[0])/2.0-0.15))+0.5, 5.0))) # adjust maxNumOfRun, the min is 2

		inputParm.thisRawImageSN = rawImgSN

		solutionCenterDiffCriterion = inputParm.solutionCenterDiffCriterion
		solutionOrientationDiffCriterion = inputParm.solutionOrientationDiffCriterion

		#initialize Center And Orientation by ont of the following modes

		if iniCenterOrientationMode == "iniparmfile" :
			inputParm.initializeCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif iniCenterOrientationMode == "headerfile" :
			inputParm.initializeCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else :
			inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center

		#set the refence Center And Orientation by ont of the following modes

		if refCenterOrientationMode == "iniparmfile" : inputParm.setRefCenterAndOrientationFromIniParmFile() # need to set "refEulerConvention"
		elif refCenterOrientationMode == "headerfile" : inputParm.setRefCenterAndOrientationFromParticle() # need to set "refEulerConvention"
		else : inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

		rawImage.crossCommonLineSearchReadRawParticle(inputParm) #create pseudo PFT of raw image

		maxNumOfRun = inputParm.maxNumOfRun
		outputParmList = []
		numOfRun = 0
		passAllConsistencyCriteria = 0
		while (numOfRun < maxNumOfRun) or (len(outputParmList) < 2):

			if (iniCenterOrientationMode != "iniparmfile") and (iniCenterOrientationMode != "headerfile") :
				inputParm.initializeCenterAndOrientationFromRandom()  # default is random orientation and physical center
			if (refCenterOrientationMode != "iniparmfile") and (refCenterOrientationMode != "headerfile") :
				inputParm.setRefCenterAndOrientationFromInitializedParms() # default is copy the initial center and orientation

			numOfRun = numOfRun + 1
			print "numOfRun = ", numOfRun

			############################################################################
			############ execute cross common line search for reference ################
			############################################################################
			outputParm  = rawImage.crossCommonLineSearch(inputParm)
			############################################################################
			# pass criteria check
			outputParmList.append(outputParm) #if passed criteria, e.g. zscore, residualThreshold, etc
			############################################################################

			outputParmList.sort(lambda x, y: cmp(x.residual, y.residual))

			############################################################################
			########################## consistency check ###############################
			############################################################################
			#passConsistencyCriteria = 0
			finalOutputParmList = []
			lowestResidualList = []
			lengthOfList = len(outputParmList)
			if lengthOfList < 2 : continue
			for i in range(lengthOfList-1):
				thisOutputParm = outputParmList[i]
				numOfPairsPassConsistencyCheck = 0
				for j in range(i+1,lengthOfList):
					refOutputParm = outputParmList[j]
					tmpOutputParm = EMAN.ccmlOutputParm() #create a new output parm object
					tmpOutputParm.rawImageSN = thisOutputParm.rawImageSN #copy all paramenters
					tmpOutputParm.residual = thisOutputParm.residual
					tmpOutputParm.sigma = thisOutputParm.sigma
					tmpOutputParm.verbose = thisOutputParm.verbose
					tmpOutputParm.zScore = thisOutputParm.zScore
					tmpOutputParm.zScoreCriterion = thisOutputParm.zScoreCriterion

					tmpOutputParm.passAllCriteria = 0
					tmpOutputParm.setCalculatedCenterAndOrientation(thisOutputParm.cx,thisOutputParm.cy,thisOutputParm.q)
					tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q)
					tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference

					centerDiff = tmpOutputParm.centerDiff
					orientationDiff = tmpOutputParm.orientationDiff
					
					#####  FLIP CASE :  if no consistency found, try flip this orientation
					if ((centerDiff > solutionCenterDiffCriterion) or (orientationDiff > solutionOrientationDiffCriterion)) :
						quatFlip = EMAN.Quaternion(refOutputParm.q.getEuler().alt(), refOutputParm.q.getEuler().az(), refOutputParm.q.getEuler().phi()+pi)
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, quatFlip)
						tmpOutputParm.calculateDifferenceWithRefParm() #calculated the difference
						centerDiff = tmpOutputParm.centerDiff
						orientationDiff = tmpOutputParm.orientationDiff
						tmpOutputParm.setRefCenterAndOrientation(refOutputParm.cx, refOutputParm.cy, refOutputParm.q) #set back the exact orientation of reference 

					#Save the configurations with lowest residuals
					if (i<3) and (j==i+1) : lowestResidualList.append(tmpOutputParm)
					
					#make the good/answers list
					if ((centerDiff < solutionCenterDiffCriterion) and (orientationDiff < solutionOrientationDiffCriterion)) :
						numOfPairsPassConsistencyCheck += 1
						if numOfPairsPassConsistencyCheck == 1 : #save to the final list
							tmpOutputParm.passAllCriteria = 1
							finalOutputParmList.append(tmpOutputParm)
						if i==0 and numOfPairsPassConsistencyCheck >= options.numConsistentRun: #if the first one, check whether it has 3 pair of consistencies
							passAllConsistencyCriteria = 1
							break
						if i>0 : break #if not the first one, find one pair of consistency, then break
				
				#no break here, just for saving all possible solutions

			if passAllConsistencyCriteria and len(finalOutputParmList) >= options.numConsistentRun: break #if 3 consistent pair orientations were found, then stop


		rawImage.crossCommonLineSearchReleaseParticle(inputParm) # release the memory related to this raw particle

		# if no consistency found, keep the lowest ones as output
		if len(finalOutputParmList) == 0 : finalOutputParmList = lowestResidualList
		for i in range(len(finalOutputParmList)) : 
			if passAllConsistencyCriteria : finalOutputParmList[i].passAllCriteria = 1
			else : finalOutputParmList[i].passAllCriteria = 0

		if options.solutionFile:
			for i in range(len(finalOutputParmList)) : finalOutputParmList[i].outputResult(solutionFile)

		outputParm = finalOutputParmList[0] #just use the lowest residual as regular output
		if outputParm.passAllCriteria: 	passfail = "pass"
		else: passfail = "fail"

		print "Final result: euler=%g\t%g\t%g\tcenter=%g\t%g\tresidue=%g\t%s" % (outputParm.alt*180/pi, outputParm.az*180/pi, outputParm.phi*180/pi, outputParm.cx, outputParm.cy, outputParm.residual, passfail)
		
		if options.scoreFile:
			rawImage.readImage(rawImages[rawImgSN][0], rawImages[rawImgSN][1], 1) # read header only
			if rawImage.hasCTF(): 
				defocus = rawImage.getCTF()[0]
		else:
			defocus = 0

		solution = (rawImages[rawImgSN][0], rawImages[rawImgSN][1], outputParm.alt, outputParm.az, outputParm.phi, \
					   outputParm.cx, outputParm.cy, defocus, outputParm.residual, outputParm.passAllCriteria)
		solutions.append( solution )

		sys.stdout.flush()

	rawImage.crossCommonLineSearchFinalize(inputParm) #finalize, i.e. delete memories

	if mpi:
		if options.verbose: 
			print "rank %d: done and ready to output" % (mpi.rank)
			sys.stdout.flush()
		mpi.barrier()
		#print "rank %d: %s" % (mpi.rank, solutions)
		if mpi.rank==0:
			for r in range(1,mpi.size):
				msg, status = mpi.recv(source = r, tag = r)
				solutions += msg
			def ptcl_cmp(x, y):
				eq = cmp(x[0], y[0])
				if not eq: return cmp(x[1],y[1])
				else: return eq
			solutions.sort(ptcl_cmp)
		else:
			mpi.send(solutions, 0, tag = mpi.rank)

	if not mpi or (mpi and mpi.rank==0):
		if options.scoreFile:
			sFile = open(options.scoreFile, "w")
			sFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					sFile.write("%d\t%s\tdefocus=%g\tresidual=%g\n" % (i[1], i[0], i[7], i[8]))
			sFile.close()
			
		if options.listFile:
			lFile = open(options.listFile, "w")
			lFile.write("#LST\n")
			for i in solutions:
				if i[-1]: 
					lFile.write("%d\t%s\t%g\t%g\t%g\t%g\t%g\n" % (i[1], i[0], i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6]))
			lFile.close()
		if options.mrcSolutionFile:
			outFile = open(options.mrcSolutionFile, "w")
			for i in solutions:
				if i[-1]:
					#rawImage.readImage(i[0], i[1], 1)
					rawImage.readImage(i[0], i[1])
					thisEu = EMAN.Euler(i[2], i[3], i[4])
					thisEu.convertToMRCAngle()
					alt = thisEu.alt_MRC()*180.0/pi
					az  = thisEu.az_MRC()*180.0/pi
					phi = thisEu.phi_MRC()*180.0/pi
		
					cx  = i[5]
					cy  = i[6]
					dx = cx - rawImage.xSize()/2
					dy = cy - rawImage.ySize()/2
					rawImage.applyMask(rMask1,6,dx,dy,0) #apply mask type 4 [outside=0] to raw image, center will be the solved center
					#tnfFileName = "%s-%d.tnf" % (os.path.basename(os.path.splitext(rawImages[rawImgSN][0])[0]), rawImages[rawImgSN][1])
					prefix = os.path.dirname(options.mrcSolutionFile).replace(" ", "")
					if prefix != "" : prefix = prefix + "/"
					tnfFileName = "%s%s-%d.tnf" % (prefix,os.path.basename(os.path.splitext(i[0])[0]), i[1])
					rawFFT = rawImage.doFFT()
					rawFFT.writeImage(tnfFileName,0)  #tnf file no header information, it is a pure FFT of raw image file
		
					outFile.write("%s\n" % (os.path.abspath(tnfFileName)))
					outFile.write(" %d, %.4f, %.4f, %.4f, %.4f, %.4f, 0.0\n" % (0, alt, az, phi, cy, cx))
			outFile.close()
		if updataHeader:
			for i in solutions:
				rawImage.readImage(i[0], i[1], 1)
				if options.verbose:
					cx  = rawImage.get_center_x()
					cy  = rawImage.get_center_y()
					alt = rawImage.alt()
					az  = rawImage.az()
					phi = rawImage.phi()
					print "Update header: %s %d\t%7.5f  %7.5f  %7.2f  %7.2f  %7.2f => %7.5f  %7.5f  %7.2f  %7.2f  %7.2f" % \
						(i[0], i[1], alt*180.0/pi, az*180.0/pi, phi*180.0/pi, cx, cy, i[2]*180.0/pi, i[3]*180.0/pi, i[4]*180.0/pi, i[5], i[6])
				rawImage.setRAlign(i[2], i[3], i[4])
				rawImage.set_center_x(i[5])
				rawImage.set_center_y(i[6])
				imgtype = EMAN.EMData.ANY
				rawImage.writeImage(i[0], i[1], imgtype, 1)
	if not options.nolog and (not mpi or (mpi and mpi.rank==0)): EMAN.LOGend()