Esempio n. 1
0
def _refinement(self, skeleton, newSkeleton, context):
    global _rank
    if _rank == 0:
        pBar = progressbar.getProgress()

    markedEdges = refine.EdgeMarkings()
    self.newEdgeNodes = {}  # allows sharing of new edge nodes

    # Primary marking
    self.targets(skeleton, context, self.degree.divisions, markedEdges,
                 self.criterion)
    # Additional marking -- only for (conservative) bisection at this point.
    self.degree.markExtras(skeleton, markedEdges)

    # Now, share the marking information on the boundary
    shareMarkings(markedEdges, skeleton)

    # Refine elements and segments
    segmentdict = {}  # which segments have been handled already.
    elements = skeleton.elements

    # At this point, to properly update progress bar, every process has
    # loop over the same range(maxn).
    # TODO: Is progress bar not supposed to work within IPC call?
    n = len(elements)
    alln = mpitools.Allgather_Int(n)
    maxn = max(alln)
    nelem = 0
    ntotal = reduce(lambda x, y: x + y, alln)
    for ii in range(maxn):
        try:
            oldElement = elements[ii]
            count = 1

            oldnnodes = oldElement.nnodes()
            # Get list of number of subdivisions on each edge ("marks")
            marks = markedEdges.getMarks(oldElement)
            # Find the canonical order for the marks. (The order is
            # ambiguous due to the arbitrary choice of the starting
            # edge.  Finding the canonical order allows the refinement
            # rule to be found in the rule table.)  rotation is the
            # offset into the elements node list required to match the
            # refinement rule to the element's marked edges.
            # signature is the canonical ordering of the marks.
            rotation, signature = refine.findSignature(marks)
            # Create new nodes along the subdivided element edges
            edgenodes = [
                self.getNewEdgeNodes(oldElement.nodes[i],
                                     oldElement.nodes[(i + 1) % oldnnodes],
                                     marks[i], newSkeleton, skeleton)
                for i in range(oldnnodes)
            ]
            # Create new elements
            newElements = self.rules[signature].apply(oldElement, rotation,
                                                      edgenodes, newSkeleton,
                                                      self.alpha)
            # If the old element's homogeneity is "1", it's safe to say that
            # new elements' homogeneities are "1".
            if oldElement.homogeneity(skeleton.MS) == 1.:
                for el in newElements:
                    el.copyHomogeneity(oldElement)

            # The calls to Skeleton.newElement() made by the
            # refinement rules have created new SkeletonSegments in
            # newSkeleton, but have not set the parentage of those
            # segments.  We have to fix that here.
            for newElement in newElements:
                for segment in newElement.getSegments(newSkeleton):
                    # Only look at each segment once.
                    if not segmentdict.has_key(segment):
                        segmentdict[segment] = 1
                        pseg = refine.findParentSegment(
                            skeleton, newElement, segment, edgenodes)
                        if pseg:
                            pseg.add_child(segment)
                            segment.add_parent(pseg)

        except IndexError:
            count = 0

        # No. of elements done.
        nelem_step = mpitools.Allgather_Int(count)
        nelem += reduce(lambda x, y: x + y, nelem_step)
        if _rank == 0:
            if pBar.query_stop():
                pBar.set_failure()
                pBar.set_message("Failed")
                mpitools.Isend_Bool(False, range(1, _size))
                return None
            else:
                pBar.set_progress(1.0 * (nelem) / ntotal)
                pBar.set_message("refining skeleton: %d/%d" % (nelem, ntotal))
                mpitools.Isend_Bool(True, range(1, _size))
        else:
            if not mpitools.Recv_Bool(0):
                return

    # New nodes that are created from the segments that are shared,
    # have to be informed bertween sharers.
    shareCommonNodes(self, markedEdges, skeleton, newSkeleton)
    # Collecting Skeletons
    skeletonIPC.collect_pieces(newSkeleton)

    newSkeleton.cleanUp()
    ##    report_skeleton(newSkeleton)
    ##    if _rank == 0:
    ##        debug.fmsg(newSkeleton.all_skeletons)
    return newSkeleton
Esempio n. 2
0
    def activeProcess(self, index):
        node = self.skeleton.getNodeWithIndex(index)
        shared = node.sharedWith()
        # send the node (remote) index
        for s in shared:
            mpitools.Send_Int(node.remoteIndex(s), s, self.move_channel)

        move_candidates = self.mover.active(self.skeleton, node)
        mpitools.Isend_Int(len(move_candidates), shared, tag=self.move_channel)

        changes = []
        for mc in move_candidates:
            change = deputy.DeputyProvisionalChanges()
            change.moveNode(node, mc, self.skeleton)  # moved the node

            # Building data to be sent to sharers.
            nodeMoves = []
            for s in shared:
                nodeMoves.append(
                    cfiddler.create_movedata(
                        _rank,  # master process
                        node.remoteIndex(s),  # remote index
                        mc.x,  # x
                        mc.y  # y
                    ))
            # Sending move data to shared processes
            cfiddler.Isend_MoveData(nodeMoves, shared, tag=self.move_channel)

            ##            REPORT("STARTED WORKING ON NODE #", index, "WITH", shared)
            # receiving illegality from shared processes
            illegal = mpitools.Irecv_Bools(shared, tag=self.illegal_channel)
            legal = True not in illegal and not change.illegal(self.skeleton)
            mpitools.Isend_Bool(legal, shared, tag=self.verdict_channel)
            if not legal:
                continue

            # Receiving report from shared processes
            reports = mpitools.Irecv_DoubleVecs(shared,
                                                tag=self.report_channel)
            homog0 = []
            shape0 = []
            homog1 = []
            shape1 = []
            for r in reports:
                n = len(r) / 4
                homog0 += r[:n]
                shape0 += r[n:2 * n]
                homog1 += r[2 * n:3 * n]
                shape1 += r[3 * n:4 * n]
            change.augmentData(homog0, homog1, shape0, shape1)
            changes.append(change)

        # Now, the decision time
        bestchange = self.criterion(changes, self.skeleton)
        if bestchange is not None:
            self.nok += 1
            self.deltaE += bestchange.deltaE(self.skeleton, self.alpha)
            bestchange.accept(self.skeleton)
            mpitools.Isend_Bool(True, shared, tag=self.verdict_channel)
            theindex = changes.index(bestchange)
            x = move_candidates[theindex].x
            y = move_candidates[theindex].y
            mpitools.Isend_DoubleVec([x, y],
                                     shared,
                                     tag=self.move_channel,
                                     size=2)
        else:
            self.nbad += 1
            mpitools.Isend_Bool(False, shared, tag=self.verdict_channel)
Esempio n. 3
0
 def moveBack(self, node):
     self.nbad += 1
     if node.isShared():
         mpitools.Isend_Bool(False,
                             node.sharedWith(),
                             tag=self.verdict_channel)
Esempio n. 4
0
def _postProcess(self, context):
    if _rank == 0:
        pBar = progressbar.getProgress()
        pBar.set_message(self.header)

    skeleton = context.getObject()
    before = mpitools.Allreduce_DoubleSum(
        skeleton.energyTotal(self.criterion.alpha))

    if _rank == 0:
        if self.pbar_type == "continuous":
            n = self.iteration.iterations

    self.count = 0
    while self.iteration.goodToGo():
        self.count += 1
        # the context to acquires the writing persmissions
        # inside coreProcess.

        mpitools.Barrier()
        self.coreProcess_parallel(context)
        self.updateIteration_parallel()

        if _rank == 0:
            if pBar.query_stop():
                pBar.set_failure()
                pBar.set_message("Failed")
                # Sending a break signal
                mpitools.Isend_Bool(False, range(1, _size))
                break
            else:
                if self.pbar_type == "continuous":
                    pBar.set_progress(1.0 * self.count / n)
                    # does this ever get displayed?
                    pBar.set_message("%s%d/%d" % (self.header, self.count, n))
                    # Sending a continue signal
                    mpitools.Isend_Bool(True, range(1, _size))
        else:
            if not mpitools.Recv_Bool(0):
                break

    switchboard.notify("skeleton nodes moved", context)

    if _rank == 0:
        if pBar.query_stop():  # or pBar.get_success() <0:
            pBar.set_failure()
            pBar.set_message("Failed")
            mpitools.Isend_Bool(False, range(1, _size))
            return
        else:
            mpitools.Isend_Bool(True, range(1, _size))
    else:
        if not mpitools.Recv_Bool(0):
            return

    mpitools.Barrier()
    after = mpitools.Allreduce_DoubleSum(
        skeleton.energyTotal(self.criterion.alpha))

    # Reporting to the message window
    if _rank == 0:
        if before:
            rate = 100.0 * (before - after) / before
        else:
            rate = 0.0
        diffE = after - before
        reporter.report("%s deltaE = %10.4e (%6.3f%%)" %
                        (self.outro, diffE, rate))
Esempio n. 5
0
def run(no_interp=None):
    global _rank
    global startupfiles
    global program_name

    program_name = os.path.basename(sys.argv[0])
    process_inline_options()  # execute well-formed oof options

    # Look for .oof2rc or .oof3drc in the user's home directory.
    if not no_rc:
        oofrcpath = os.path.join(os.path.expanduser("~"),
                                 ".%src" % program_name)
        if os.path.exists(oofrcpath):
            startupfiles = [StartUpScriptNoLog(oofrcpath)] + startupfiles

    if (thread_enable.query()
            and not (runtimeflags.text_mode or config.no_gui())):
        # TODO OPT: Is this still necessary?
        garbage.disable()  # work-around for gtk bug?

    start_parallel_machine()  # start parallel suite (if available)

    if _rank == 0:
        if parallel_enable.enabled():
            from ooflib.SWIG.common import mpitools
            _size = mpitools.Size()
            mpitools.Isend_Bool(thread_enable.enabled(), range(1, _size))

        if parallel_enable.enabled():
            from ooflib.common.IO import socket2me

        if config.petsc():
            print "Going to InitPETSc"
            from ooflib.SWIG.engine.PETSc.petsc_solverdriver import InitPETSc
            InitPETSc(sys.argv)
            for s in sys.argv:
                print s

        start_sockets_Front_End()
        # Import mainmenu only *after* processing command line options, so
        # that the options can affect which menus are loaded.
        global mainmenu
        from ooflib.common.IO import mainmenu
        front_end(no_interp)  # all non-parallel menu items are executed here.
    else:
        # parallel back-end
        parallel_enable.set(True)  # notify back-end of its parallel status

        # thread status at the back-ends
        from ooflib.SWIG.common import mpitools
        thread_enable.set(mpitools.Recv_Bool(0))
        if not thread_enable.enabled():
            lock.disableLocks()

        if parallel_enable.enabled():
            from ooflib.common.IO import socket2me

        if config.petsc():
            print "Going to InitPETSc"
            from ooflib.SWIG.engine.PETSc.petsc_solverdriver import InitPETSc
            InitPETSc(sys.argv)
            for s in sys.argv:
                print s

        debug.set_debug_mode()  # set for debugging parallel mode
        from ooflib.common import quit
        quit.set_quiet()  ## back-end exits quietly.
        start_sockets_Back_End()  # socket initialization
        from ooflib.common import backEnd  # import back end machine
        # The back end shouldn't run the gui!
        runtimeflags.text_mode = True
        backEnd.back_end()  # back-end awaits for your command