コード例 #1
0
    def createWorkOrder(self, activeNodes):
        global _rank
        global _size

        # First the data collection
        nnodes = mpitools.Allgather_Int(len(activeNodes))
        allnodes = mpitools.Allgather_IntVec(
            [n.getIndex() for n in activeNodes], size_known=nnodes)
        allsignatures = mpitools.Allgather_IntVec(
            [n.nshared() for n in activeNodes], size_known=nnodes)
        nshared = [reduce(lambda x, y: x + y, s) for s in allsignatures]
        myshared = [n.sharedWith() for n in activeNodes]
        myshared = reduce(lambda x, y: x + y, myshared)
        allshared = mpitools.Allgather_IntVec(myshared, size_known=nshared)

        def listrize(list, signature):
            nsig = len(signature)
            count = 0
            output = [[] for i in range(nsig)]
            for i in range(nsig):
                for j in range(signature[i]):
                    output[i].append(list[count])
                    count += 1
            return output

        for i in range(len(allshared)):
            allshared[i] = listrize(allshared[i], allsignatures[i])

        scheduler = Scheduler(nnodes, allnodes, allshared, self)
        self.mywork = scheduler(_rank)
コード例 #2
0
ファイル: refineParallel.py プロジェクト: song2001/OOF2
def shareMarkings(markedEdges, skeleton):
    global _rank
    global _size
    segs = getSharedSegments(markedEdges)  # shared segs per process
    sigs = [len(s) for s in segs]  # no. of shared segs per process: [0, m, n]
    # Now, collect all the shared segment data.
    allsigs = mpitools.Allgather_IntVec(sigs)  # Gathered version of sigs
    segs = reduce(lambda x, y: x + y, segs)  # Reduced version of segs
    allsegs = mpitools.Allgather_IntVec(segs)  # Gathered version of segs
    # Listrized version of allsegs: list of "segs" indexed by process id.
    allsegs = [listrize(seg, sig) for seg, sig in zip(allsegs, allsigs)]

    for i in range(_size):

        if i == _rank: continue  # can't shared with myself
        # allsegs[i][_rank] contains information for (shared) segments
        # that are marked at i-th process.
        if len(allsegs[i][_rank]) == 0: continue  # nothing in it

        for j in range(len(allsegs[i][_rank]) / 3):
            # Ugly but efficient enough
            nd0 = skeleton.getNodeWithIndex(allsegs[i][_rank][3 * j])
            nd1 = skeleton.getNodeWithIndex(allsegs[i][_rank][3 * j + 1])
            ndivs = allsegs[i][_rank][3 * j + 2]
            markedEdges.mark(nd0, nd1, ndivs)
コード例 #3
0
ファイル: skeletonIPC.py プロジェクト: pk-organics/OOF3D
def collect_pieces(skel):
    global _rank
    global _size
    # Gather minimal info for element polygons (to display Skeleton at #0)
    # Only the #0 will store the gathered information.
    # no. of nodes & no. of elements
    nnodes = mpitools.Allgather_Int(skel.nnodes())
    #RCL: Use maxnnodes as base offset for the indices of non-corner mesh nodes
    skel.maxnnodes = max(nnodes)
    nelems = mpitools.Allgather_Int(skel.nelements())
    myCoords = list(
        reduce(
        lambda x,y: x+y, [(skel.nodePosition(nd)[0],
                           skel.nodePosition(nd)[1])
                          for nd in skel.node_iterator()]
        ))

    coordSizes = [i*2 for i in nnodes]
    allCoords = mpitools.Allgather_DoubleVec(myCoords,
                                             size_known=coordSizes)
    # allCoords = [[(x0,y0),(x1,y1), ...], [...], ...]
    allCoords = [ [(allCoords[i][2*j],allCoords[i][2*j+1])
                   for j in range(nnodes[i])]
                  for i in range(_size) ]
    # element connectivity signature
    myEConSigs = [el.nnodes() for el in skel.element_iterator()]
    allEConSigs = mpitools.Allgather_IntVec(myEConSigs, size_known=nelems)
    # element connectivity
    myECons = [ [skel.node_index_dict[nd.getIndex()] for nd in el.nodes]
                for el in skel.element_iterator()]
    myECons = reduce(lambda x,y: x+y, myECons)
    conSizes = [reduce(lambda x,y: x+y, aECS) for aECS in allEConSigs]
    temp = mpitools.Allgather_IntVec(myECons, size_known=conSizes)

    def listrize(list, signature):
        nsig = len(signature)
        count = 0
        output = [[] for i in range(nsig)]
        for i in range(nsig):
            for j in range(signature[i]):
                output[i].append(list[count])
                count += 1
        return output

    allECons = [listrize(temp[i], allEConSigs[i]) for i in range(_size)]
    if _rank == 0:
        skel.all_skeletons = {"nodes": allCoords,
                              "elements": allECons}
コード例 #4
0
ファイル: refineParallel.py プロジェクト: song2001/OOF2
def shareCommonNodes(refiner, markedEdges, skeleton, newSkeleton):
    global _rank
    global _size
    segs = getSharedSegments(markedEdges, actual_node=True)
    nodes = [[] for i in range(_size)]
    for i in range(_size):
        if i != _rank and segs[i]:
            for j in range(len(segs[i]) / 3):  # iterate over segments
                nd0 = segs[i][3 * j]
                nd1 = segs[i][3 * j + 1]
                ndivs = segs[i][3 * j + 2]
                nodes[i] += refiner.getNewEdgeNodes(nd0, nd1, ndivs,
                                                    newSkeleton, skeleton)
                # These nodes are shared -- it is guaranteed that same nodes
                # exist on the other side but not necessarily in the same order.
                # Once they are re-arranged in the same order, updating
                # process-sharing info for these nodes can be easily done.
            nodes[i] = geometric_sort(nodes[i])

    sigs = [len(s) for s in nodes]  # Partition signature of nodes
    allsigs = mpitools.Allgather_IntVec(sigs)  # Gathered version of sigs
    nodes = reduce(lambda x, y: x + y, nodes)  # Reduced version of nodes
    allnodes = mpitools.Allgather_IntVec(nodes)  # Gathered version of segs
    # Listrized version of allnodes
    allnodes = [listrize(nds, sig) for nds, sig in zip(allnodes, allsigs)]

    mynodes = allnodes[_rank]  # Same as "nodes" before it was reduced.
    for i in range(_size):
        if i != _rank and mynodes[i]:
            for j in range(len(mynodes[i])):
                thenode = newSkeleton.getNodeWithIndex(mynodes[i][j])
                # mynodes[i] and allnodes[i][_rank] are lists of same size.
                # They contains indices of same nodes (position-wise) in
                # the same order.
                thenode.sharesWith(i,
                                   allnodes[i][_rank][j])  # proc, remote-index
コード例 #5
0
def collect_pieces(femesh):

    # Build dictionary (instead of using the mesh assigned indices (via index() or get_index())
    #  which are unique but may have gaps)
    nodedict = {}
    i = 0
    for node in femesh.node_iterator():
        nodedict[node.index()] = i
        i += 1

    global _rank
    global _size
    # Gather minimal info for element polygons (to display mesh Skeleton at #0)
    # Only the #0 will store the reconstituted information.

    #RCL: Collect number of nodes from each process into an array.
    # One gets this for nnodes: [nnodes0, nnodes1, ...]
    nnodes = mpitools.Allgather_Int(femesh.nnodes())

    #RCL: Same for the elements.
    # One gets this for nelems: [nelems0, nelems1, ...]
    nelems = mpitools.Allgather_Int(femesh.nelements())
    myCoords = reduce(lambda x, y: x + y,
                      [[nd.position().x, nd.position().y]
                       for nd in femesh.node_iterator()])

    coordSizes = [i * 2 for i in nnodes]
    #RCL: Collect (x,y) coordinates of nodes from each process
    # coordSizes contains the array of number of coordinates (counting x and y separately) from each process
    # The return value is a 2-D array, indexed first by the process number(?)
    # One gets this for allCoords: [[x0,y0,x1,y1,...], [x0',y0',x1',y1',...], ...]
    allCoords = mpitools.Allgather_DoubleVec(myCoords, size_known=coordSizes)

    #RCL: One gets the following format after the list comprehension operations
    # allCoords = [[(x0,y0),(x1,y1), ...], [...], ...]
    allCoords = [[(allCoords[i][2 * j], allCoords[i][2 * j + 1])
                  for j in range(nnodes[i])] for i in range(_size)]

    # element connectivity signature
    myEConSigs = [len(el.perimeter()) for el in femesh.element_iterator()]
    #RCL: One gets this for allEConSigs: [[elnnodes0,elnnodes1,...],[elnnodes0',elnnodes1',...],...]
    allEConSigs = mpitools.Allgather_IntVec(myEConSigs, size_known=nelems)

    # element connectivity
    #RCL: nodedict must be a map to the 0-based indices of the nodes
    myECons = [[nodedict[nd.index()] for nd in el.node_iterator()]
               for el in femesh.element_iterator()]
    myECons = reduce(lambda x, y: x + y, myECons)

    #RCL: conSizes looks like [[elnnodes0+elnnodes1+...],[elnnodes0'+elnnodes1'+...],...]
    conSizes = [reduce(lambda x, y: x + y, aECS) for aECS in allEConSigs]

    #RCL: temp looks like
    # [[el0_nodeindex1,el0_nodeindex2,...el1_nodeindex1,el1_nodeindex2,...],[el0'_nodeindex1,el0'_nodeindex2,...,el1'_nodeindex1,el1'_nodeindex2,...],...]
    temp = mpitools.Allgather_IntVec(myECons, size_known=conSizes)

    #RCL: The connectivity information could still be shrunk, but its hard...
    # If we store the nodes for each element, duplicate nodes results. Storage becomes ~4*(number of (double x and double y))

    def listrize(list, signature):
        #RCL: nsig is the number of elements if allEConsigs[i] is passed as signature
        nsig = len(signature)
        count = 0
        output = [[] for i in range(nsig)]
        for i in range(nsig):
            for j in range(signature[i]):
                output[i].append(list[count])
                count += 1
        return output

    #RCL: allECons looks like [[[nodeindex1,nodeindex2,...],[nodeindex1',nodeindex2',...],...], [], ...]
    allECons = [listrize(temp[i], allEConSigs[i]) for i in range(_size)]
    if _rank == 0:
        femesh.all_meshskeletons = {"nodes": allCoords, "elements": allECons}