def create_skeleton(self): global _rank # create an empty Skeleton MS = microStructures[self.msname].getObject() self.skeleton = skeleton.Skeleton(MS) #RCL:Store the bounds of the subdomain of this process self.skeleton.localbounds=self.localbounds # create nodes for nd in self.nodePartitions[_rank]: node = self.skeleton.newNode(nd.position()[0], nd.position()[1]) for owner in nd.owners(): if owner != _rank: node.sharesWith(owner, self.indexMap[(owner, nd.getIndex())]) # create elements for el in self.elemPartitions[_rank]: nodes = [] for n in el.nodes: nodes.append(self.skeleton.getNodeWithIndex( self.indexMap[(_rank, n.getIndex())])) element = self.skeleton.newElement(nodes) # take care of boundaries self.set_boundaries() # Proc#0 collects minimal data from others for displaying mpitools.Barrier() self.collect_pieces()
def play(self): global _rank global _size # Get the nodes & shuffle them activeNodes = self.targets(self.context) activeNodes = filter(self.ownNode, activeNodes) random.shuffle(activeNodes) self.createWorkOrder(activeNodes) mpitools.Barrier() for work in self.mywork: if work is not None: # work = (callback function, arguments) work[0](work[1]) mpitools.Barrier() skeletonIPC.collect_pieces(self.skeleton) self.skeleton.timestamp.increment()
def _parallel_init(menuitem, name, microstructure, x_elements, y_elements, skeleton_geometry): initializer = InitializeSkeletonParallel(name, microstructure, x_elements, y_elements, skeleton_geometry) skel = initializer.create() ## if _rank == 0: ## initializer.report_skeleton() mpitools.Barrier() mscontext = microStructures[microstructure] import ooflib.engine.skeletoncontext skeletoncontext.skeletonContexts.add([microstructure, name], skel, parent=mscontext)
def parallel_move_node(menuitem, origin, destination, allow_illegal, skeletonpath): skelcontext = skeletoncontext.skeletonContexts[skeletonpath] #skelcontext = self.gfxwindow().topwho('Skeleton') if skelcontext: skeleton = skelcontext.getObject().deputyCopy() skeleton.activate() #If one looks at moveNodeGUI.py, origin here is already #set to the position of the nearest node. node = skeleton.nearestNode(origin) nodeindex=node.getIndex() distance2=(node.position()-origin)**2 ############################################################### # Get nearest node distances from all processes if _rank==0: #Get list of squares of distance of node to the click point distance2list=[distance2] dmin=-1 dmin_proc_list=[] #Get distances from other processes for proc in range(_size): if proc!=0: distance2list.append(mpitools.Recv_Double(proc)) if distance2list[proc]>=0: dmin=distance2list[proc] #Find closest node among those "nominated" by each process for proc in range(_size): if distance2list[proc]>=0: if distance2list[proc]<dmin: dmin=distance2list[proc] for proc in range(_size): if distance2list[proc]==dmin: dmin_proc_list.append(proc) else: #Backend sends report to front end mpitools.Send_Double(distance2,0) mpitools.Barrier() #Tell the processes in dmin_proc_list to try moving their nodes #and report back the result. Then really move the node if the #move is valid for all of them. if _rank==0: for proc in range(_size): if proc in dmin_proc_list: if proc==0: moveit=1 else: mpitools.Send_Int(1,proc) else: if proc==0: moveit=0 else: mpitools.Send_Int(0,proc) else: moveit=mpitools.Recv_Int(0) mpitools.Barrier() # ############################################################### skelcontext.reserve() skelcontext.begin_writing() try: if moveit: skeleton.moveNodeTo(node, destination) #TODO 3.1: If the node is shared, the move may be valid in one process #but invalid in another. if node.illegal(): if allow_illegal==1: skeleton.setIllegal() else: node.moveBack() elif skeleton.illegal(): # node motion may have rehabilitated skeleton.checkIllegality() skelcontext.pushModification(skeleton) finally: # collect_pieces(skeleton) # skelcontext.end_writing() skelcontext.cancel_reservation() skeleton.needsHash() switchboard.notify('redraw')
def _modify(menuitem, skeleton, modifier): global _rank if _rank == 0: ModifyProgressBar = menuitem.getProgressBar( modifier.get_progressbar_type()) context = skeletoncontext.skeletonContexts[skeleton] context.reserve() try: context.begin_writing() try: if _rank == 0: reporter.start_progressbar() skel = modifier.apply_parallel(context.getObject(), context) if _rank == 0: reporter.progressbar_completed() reporter.end_progressbar() # skel is None whenever the modifier fails # or is interrupted from finishing its task if skel is None: reporter.warn("Modify Process Interrupted") # return will force all the finally's to be executed return mpitools.Barrier() context.pushModification(skel) skel.needsHash() finally: context.end_writing() if _rank == 0: reporter.start_progressbar() mpitools.Barrier() modifier.postProcess_parallel(context) if _rank == 0: reporter.progressbar_completed() reporter.end_progressbar() # If the skeleton is modified in postProcess, use # begin/end_writing inside the function call to guarantee # that no dead-locking occurs because of possible switchboard # calls to READ or REDRAW that may make use of # begin/end_reading(). See anneal.py for an example. finally: # guarantee that the reservation is cancelled even if an # exception is raised context.cancel_reservation() if _rank == 0: if ModifyProgressBar.query_stop() or \ ModifyProgressBar.get_success()<0: ModifyProgressBar.set_failure() ModifyProgressBar.set_message("Failed") return else: ModifyProgressBar.set_success() ModifyProgressBar.set_message("Succeeded") switchboard.notify('redraw') switchboard.notify('Skeleton modified', skeleton, modifier)
def _postProcess(self, context): if _rank == 0: pBar = progressbar.getProgress() pBar.set_message(self.header) skeleton = context.getObject() before = mpitools.Allreduce_DoubleSum( skeleton.energyTotal(self.criterion.alpha)) if _rank == 0: if self.pbar_type == "continuous": n = self.iteration.iterations self.count = 0 while self.iteration.goodToGo(): self.count += 1 # the context to acquires the writing persmissions # inside coreProcess. mpitools.Barrier() self.coreProcess_parallel(context) self.updateIteration_parallel() if _rank == 0: if pBar.query_stop(): pBar.set_failure() pBar.set_message("Failed") # Sending a break signal mpitools.Isend_Bool(False, range(1, _size)) break else: if self.pbar_type == "continuous": pBar.set_progress(1.0 * self.count / n) # does this ever get displayed? pBar.set_message("%s%d/%d" % (self.header, self.count, n)) # Sending a continue signal mpitools.Isend_Bool(True, range(1, _size)) else: if not mpitools.Recv_Bool(0): break switchboard.notify("skeleton nodes moved", context) if _rank == 0: if pBar.query_stop(): # or pBar.get_success() <0: pBar.set_failure() pBar.set_message("Failed") mpitools.Isend_Bool(False, range(1, _size)) return else: mpitools.Isend_Bool(True, range(1, _size)) else: if not mpitools.Recv_Bool(0): return mpitools.Barrier() after = mpitools.Allreduce_DoubleSum( skeleton.energyTotal(self.criterion.alpha)) # Reporting to the message window if _rank == 0: if before: rate = 100.0 * (before - after) / before else: rate = 0.0 diffE = after - before reporter.report("%s deltaE = %10.4e (%6.3f%%)" % (self.outro, diffE, rate))