Esempio n. 1
0
    def passiveProcess(self, stopper):
        self.mover.passive(self.skeleton, stopper)  # Non-trivial for Smooth
        moveData = cfiddler.Recv_MoveData(stopper, tag=self.move_channel)
        node = self.skeleton.getNodeWithIndex(moveData.index)
##        REPORT("HELPING", stopper, "FOR NODE #", node.remoteIndex(stopper))
        
        # recording energy-before (should this use periodic neighbor Elements?)
        neighbors = node.aperiodicNeighborElements(self.skeleton)
        reportData = [el.energyHomogeneity(self.skeleton) for el in neighbors]
        reportData += [el.energyShape() for el in neighbors]
        # move to the position -- self.skeleton is a DeputySkeleton
        self.skeleton.moveNodeTo(
            node, primitives.Point(moveData.x, moveData.y))
        # Check & send illegality
        mpitools.Send_Bool(bool(node.illegal()),
                           stopper,
                           tag=self.illegal_channel)

        # if illegal in any processes, it should be aborted
        if mpitools.Recv_Bool(
            stopper, tag=self.verdict_channel):  # True:continue, False:abort
            # recording energy-after
            reportData += [el.energyHomogeneity(self.skeleton)
                            for el in neighbors]
            reportData += [el.energyShape() for el in neighbors]
            # reporting
            mpitools.Send_DoubleVec(reportData,
                                    stopper,
                                    tag=self.report_channel)
            # receiving verdivt, True:stay, False:move back
            if not mpitools.Recv_Bool(stopper, self.verdict_channel):
                self.skeleton.moveNodeBack(node)
        else:  # Illegal!
            self.skeleton.moveNodeBack(node)
Esempio n. 2
0
    def passiveProcess(self, stopper):
        # the node to move
        myindex = mpitools.Recv_Int(stopper, tag=self.move_channel)
        node = self.skeleton.getNodeWithIndex(myindex)
        self.mover.passive(self.skeleton, node, stopper)

        # getting no. of move candidates
        nmoves = mpitools.Recv_Int(stopper, tag=self.move_channel)

        for i in range(nmoves):
            moveData = cfiddler.Recv_MoveData(stopper, tag=self.move_channel)
            ##            REPORT("HELPING", stopper, "FOR NODE #", node.remoteIndex(stopper))

            # recording energy-before
            neighbors = node.aperiodicNeighborElements(self.skeleton)
            reportData = [
                el.energyHomogeneity(self.skeleton) for el in neighbors
            ]
            reportData += [el.energyShape() for el in neighbors]
            # move to the position -- self.skeleton is a DeputySkeleton
            self.skeleton.moveNodeTo(node,
                                     primitives.Point(moveData.x, moveData.y))
            # Check & send illegality
            mpitools.Send_Bool(bool(node.illegal()),
                               stopper,
                               tag=self.illegal_channel)

            # if illegal in any processes, it should be aborted
            if mpitools.Recv_Bool(
                    stopper,
                    tag=self.verdict_channel):  # True:continue, False:abort
                # recording energy-after
                reportData += [
                    el.energyHomogeneity(self.skeleton) for el in neighbors
                ]
                reportData += [el.energyShape() for el in neighbors]
                # reporting
                mpitools.Send_DoubleVec(reportData,
                                        stopper,
                                        tag=self.report_channel)
            # reset for the next one
            self.skeleton.moveNodeBack(node)
##            REPORT("DONE HELPING", moveData.master, " ON NODE #",
##                   node.remoteIndex(moveData.master))

# receiving verdivt, True:stay, False:move back
        if mpitools.Recv_Bool(stopper, self.verdict_channel):
            x, y = mpitools.Recv_DoubleVec(stopper,
                                           tag=self.move_channel,
                                           size=2)
            self.skeleton.moveNodeTo(node, primitives.Point(x, y))
Esempio n. 3
0
def _postProcess(self, context):
    if _rank == 0:
        pBar = progressbar.getProgress()
        pBar.set_message(self.header)

    skeleton = context.getObject()
    before = mpitools.Allreduce_DoubleSum(
        skeleton.energyTotal(self.criterion.alpha))

    if _rank == 0:
        if self.pbar_type == "continuous":
            n = self.iteration.iterations

    self.count = 0
    while self.iteration.goodToGo():
        self.count += 1
        # the context to acquires the writing persmissions
        # inside coreProcess.

        mpitools.Barrier()
        self.coreProcess_parallel(context)
        self.updateIteration_parallel()

        if _rank == 0:
            if pBar.query_stop():
                pBar.set_failure()
                pBar.set_message("Failed")
                # Sending a break signal
                mpitools.Isend_Bool(False, range(1, _size))
                break
            else:
                if self.pbar_type == "continuous":
                    pBar.set_progress(1.0 * self.count / n)
                    # does this ever get displayed?
                    pBar.set_message("%s%d/%d" % (self.header, self.count, n))
                    # Sending a continue signal
                    mpitools.Isend_Bool(True, range(1, _size))
        else:
            if not mpitools.Recv_Bool(0):
                break

    switchboard.notify("skeleton nodes moved", context)

    if _rank == 0:
        if pBar.query_stop():  # or pBar.get_success() <0:
            pBar.set_failure()
            pBar.set_message("Failed")
            mpitools.Isend_Bool(False, range(1, _size))
            return
        else:
            mpitools.Isend_Bool(True, range(1, _size))
    else:
        if not mpitools.Recv_Bool(0):
            return

    mpitools.Barrier()
    after = mpitools.Allreduce_DoubleSum(
        skeleton.energyTotal(self.criterion.alpha))

    # Reporting to the message window
    if _rank == 0:
        if before:
            rate = 100.0 * (before - after) / before
        else:
            rate = 0.0
        diffE = after - before
        reporter.report("%s deltaE = %10.4e (%6.3f%%)" %
                        (self.outro, diffE, rate))
Esempio n. 4
0
File: oof.py Progetto: creuzige/OOF2
def run(no_interp=None):
    global _rank
    global startupfiles

    process_inline_options()  # execute well-formed oof options

    # Look for .oof2rc in the user's home directory.
    if not no_rc:
        oofrcpath = os.path.join(os.path.expanduser("~"), ".oof2rc")
        if os.path.exists(oofrcpath):
            startupfiles = [StartUpScriptNoLog(oofrcpath)] + startupfiles

    if thread_enable.query() and not (runtimeflags.text_mode
                                      or config.no_gui()):
        # TODO: Is this still necessary?
        garbage.disable()  # work-around for gtk bug?

    start_parallel_machine()  # start parallel suite (if available)

    if _rank == 0:
        if parallel_enable.enabled():
            from ooflib.SWIG.common import mpitools
            _size = mpitools.Size()
            mpitools.Isend_Bool(thread_enable.enabled(), range(1, _size))

        if parallel_enable.enabled():
            from ooflib.common.IO import socket2me

        if config.petsc():
            print "Going to InitPETSc"
            from ooflib.SWIG.engine.PETSc.petsc_solverdriver import InitPETSc
            InitPETSc(sys.argv)
            for s in sys.argv:
                print s

        start_sockets_Front_End()
        # Import mainmenu only *after* processing command line options, so
        # that the options can affect which menus are loaded.
        global mainmenu
        from ooflib.common.IO import mainmenu
        front_end(no_interp)  # all non-parallel menu items are executed here.
    else:
        # parallel back-end
        parallel_enable.set(True)  # notify back-end of its parallel status

        # thread status at the back-ends
        from ooflib.SWIG.common import mpitools
        thread_enable.set(mpitools.Recv_Bool(0))
        if not thread_enable.enabled():
            lock.disable_all()

        if parallel_enable.enabled():
            from ooflib.common.IO import socket2me

        if config.petsc():
            print "Going to InitPETSc"
            from ooflib.SWIG.engine.PETSc.petsc_solverdriver import InitPETSc
            InitPETSc(sys.argv)
            for s in sys.argv:
                print s

        debug.set_debug_mode()  # set for debugging parallel mode
        from ooflib.common import quit
        quit.set_quiet()  ## back-end exits quietly.
        start_sockets_Back_End()  # socket initialization
        from ooflib.common import backEnd  # import back end machine
        # The back end shouldn't run the gui!
        runtimeflags.text_mode = True
        backEnd.back_end()  # back-end awaits for your command
Esempio n. 5
0
def _refinement(self, skeleton, newSkeleton, context):
    global _rank
    if _rank == 0:
        pBar = progressbar.getProgress()

    markedEdges = refine.EdgeMarkings()
    self.newEdgeNodes = {}  # allows sharing of new edge nodes

    # Primary marking
    self.targets(skeleton, context, self.degree.divisions, markedEdges,
                 self.criterion)
    # Additional marking -- only for (conservative) bisection at this point.
    self.degree.markExtras(skeleton, markedEdges)

    # Now, share the marking information on the boundary
    shareMarkings(markedEdges, skeleton)

    # Refine elements and segments
    segmentdict = {}  # which segments have been handled already.
    elements = skeleton.elements

    # At this point, to properly update progress bar, every process has
    # loop over the same range(maxn).
    # TODO: Is progress bar not supposed to work within IPC call?
    n = len(elements)
    alln = mpitools.Allgather_Int(n)
    maxn = max(alln)
    nelem = 0
    ntotal = reduce(lambda x, y: x + y, alln)
    for ii in range(maxn):
        try:
            oldElement = elements[ii]
            count = 1

            oldnnodes = oldElement.nnodes()
            # Get list of number of subdivisions on each edge ("marks")
            marks = markedEdges.getMarks(oldElement)
            # Find the canonical order for the marks. (The order is
            # ambiguous due to the arbitrary choice of the starting
            # edge.  Finding the canonical order allows the refinement
            # rule to be found in the rule table.)  rotation is the
            # offset into the elements node list required to match the
            # refinement rule to the element's marked edges.
            # signature is the canonical ordering of the marks.
            rotation, signature = refine.findSignature(marks)
            # Create new nodes along the subdivided element edges
            edgenodes = [
                self.getNewEdgeNodes(oldElement.nodes[i],
                                     oldElement.nodes[(i + 1) % oldnnodes],
                                     marks[i], newSkeleton, skeleton)
                for i in range(oldnnodes)
            ]
            # Create new elements
            newElements = self.rules[signature].apply(oldElement, rotation,
                                                      edgenodes, newSkeleton,
                                                      self.alpha)
            # If the old element's homogeneity is "1", it's safe to say that
            # new elements' homogeneities are "1".
            if oldElement.homogeneity(skeleton.MS) == 1.:
                for el in newElements:
                    el.copyHomogeneity(oldElement)

            # The calls to Skeleton.newElement() made by the
            # refinement rules have created new SkeletonSegments in
            # newSkeleton, but have not set the parentage of those
            # segments.  We have to fix that here.
            for newElement in newElements:
                for segment in newElement.getSegments(newSkeleton):
                    # Only look at each segment once.
                    if not segmentdict.has_key(segment):
                        segmentdict[segment] = 1
                        pseg = refine.findParentSegment(
                            skeleton, newElement, segment, edgenodes)
                        if pseg:
                            pseg.add_child(segment)
                            segment.add_parent(pseg)

        except IndexError:
            count = 0

        # No. of elements done.
        nelem_step = mpitools.Allgather_Int(count)
        nelem += reduce(lambda x, y: x + y, nelem_step)
        if _rank == 0:
            if pBar.query_stop():
                pBar.set_failure()
                pBar.set_message("Failed")
                mpitools.Isend_Bool(False, range(1, _size))
                return None
            else:
                pBar.set_progress(1.0 * (nelem) / ntotal)
                pBar.set_message("refining skeleton: %d/%d" % (nelem, ntotal))
                mpitools.Isend_Bool(True, range(1, _size))
        else:
            if not mpitools.Recv_Bool(0):
                return

    # New nodes that are created from the segments that are shared,
    # have to be informed bertween sharers.
    shareCommonNodes(self, markedEdges, skeleton, newSkeleton)
    # Collecting Skeletons
    skeletonIPC.collect_pieces(newSkeleton)

    newSkeleton.cleanUp()
    ##    report_skeleton(newSkeleton)
    ##    if _rank == 0:
    ##        debug.fmsg(newSkeleton.all_skeletons)
    return newSkeleton