def continue_in_execution(self): if self.fast_expand(): return True gcontext = self.gcontext gstate = gcontext.gstate # We will plan some computation but leaving this function, # current gcontext is finished self.gcontext = None if not gcontext.make_node(): gcontext.gstate.dispose() # Node already explored return False if not self.slow_expand(gcontext): node = gcontext.node if any(state.status != State.StatusFinished for state in gstate.states): active_pids = [state.pid for state in gstate.states if state.status != State.StatusFinished] gcontext = GlobalContext(self, node, gstate) message = errormsg.Deadlock(None, gcontext=gcontext, active_pids=active_pids) gcontext.add_error_and_throw(message) else: gstate.mpi_leak_check(self, node) node.allocations = sum((state.allocations for state in gstate.states), []) gstate.dispose() return False """
def expand_node(self, node, gstate, action): logging.debug("--------- Expanding node %s %s ------------", node.uid, gstate) if self.debug_compare_states is not None and node.uid in self.debug_compare_states: if self.debug_captured_states is None: self.debug_captured_states = [] self.debug_captured_states.append(gstate.copy()) gcontext = GlobalContext(self, node, gstate) if action: action.apply_action(gcontext) self.fast_expand_node(gcontext) if not gcontext.make_node(): gstate.dispose() return if not self.slow_expand(gcontext): node = gcontext.node if any(state.status != State.StatusFinished for state in gstate.states): active_pids = [state.pid for state in gstate.states if state.status != State.StatusFinished] gcontext = GlobalContext(self, node, gstate) message = errormsg.Deadlock(None, gcontext=gcontext, active_pids=active_pids) gcontext.add_error_and_throw(message) else: gstate.mpi_leak_check(self, node) node.allocations = sum((state.allocations for state in gstate.states), []) gstate.dispose()
def start_gcontext(self, node, gstate, action): logging.debug("Starting gcontext %s %s %s", self, node, gstate) gcontext = GlobalContext(self, node, gstate) self.gcontext = gcontext if action: action.apply_action(gcontext) gcontext.action = action return self.continue_in_execution()
def init_nonfirst_worker(self): initial_node = Node("init", None) gstate = GlobalState(self.generator.process_count) gcontext = GlobalContext(self, initial_node, gstate) for i in xrange(self.generator.process_count): context = gcontext.get_context(i) if not context.initial_run(False): return False return True
def memory_leak_check(self): final_nodes = list(self.statespace.all_final_nodes()) allocations = [frozenset(node.allocations) if node.allocations else frozenset() for node in final_nodes] self.deterministic_unallocated_memory = 0 if not allocations: return all_allocations = frozenset.union(*allocations) deterministic = frozenset.intersection(*allocations) for a in sorted(all_allocations - deterministic): for node in final_nodes: if node.allocations and a in node.allocations: gcontext = GlobalContext(self, node, None) m = errormsg.MemoryLeak(gcontext.get_context(a.pid), address=a.addr, size=a.size) break else: assert 0 # This shoud not happen self.add_error_message(m) for a in deterministic: self.deterministic_unallocated_memory += a.size
def make_initial_node(self): initial_node = Node("init", None) self.generator.statespace.add_node(initial_node) self.generator.statespace.initial_node = initial_node gstate = GlobalState(self.generator.process_count) gcontext = GlobalContext(self, initial_node, gstate) # TODO: Do it in parallel for i in xrange(self.generator.process_count): context = gcontext.get_context(i) if not context.initial_run(): return False gcontext.make_node() gcontext.add_to_queue(None, False) return True
def start_controllers(self): # We do actions separately to allow parallel initialization for controller in self.controllers: controller.start(capture_syscalls=["write"]) for controller in self.controllers: controller.connect() initial_node = Node("init", None) self.statespace.add_node(initial_node) self.statespace.initial_node = initial_node gstate = GlobalState(self.process_count) gcontext = GlobalContext(self, initial_node, gstate) # TODO: Do it in parallel for i in xrange(self.process_count): context = gcontext.get_context(i) if not context.initial_run(): return False gcontext.make_node() gcontext.add_to_queue(None, False) return True
def make_context(self, node, state): # This function exists to avoid importing GlobalContext in state.py gcontext = GlobalContext(self, node, state.gstate) return gcontext.get_context(state.pid)