def dfs(block_functor, take_edge_functor, start_abbs): """Performs a depth first search. It first executes block_functor on a block, then collects all outgoing edges, that satisfy the take_edge_functor. All child blocks are visited in the same manner. No block is visited more than once. The DFS starts with the list of start_abs blocks. The block_functor takes an _edge_ that leads to a _block_ """ visited = set() # First in start_abs is the first to be popped working_stack = stack() for abb in start_abbs: working_stack.append(tuple([None, abb])) while len(working_stack) > 0: leading_edge, current_block = working_stack.pop() # Call the block_functor block_functor(leading_edge, current_block) out_edges = sorted(current_block.outgoing_edges, key = lambda e: e.target.abb_id) for edge in reversed(out_edges): #for edge in out_edges: child = edge.target # Already visited or in working_stack if child in visited: continue # Check if edge satisfies condition if not take_edge_functor(edge): continue # PUSH item onto stack working_stack.push((edge, child)) visited.add(child)
def pass_graph(self): graph = GraphObjectContainer("PassManager", 'black', root = True) ws = stack() ws.extend(self.analysis_pipe) ws.extend([x for x in self.passes.values() if x.valid]) passes = {} edges = [] while not ws.isEmpty(): cur = ws.pop() if not cur in passes: passes[cur] = GraphObjectContainer(cur.name(), 'black', data = cur.__doc__) graph.subobjects.append(passes[cur]) for requires in cur.requires(): # Requires returns strings requires = self.passes[requires] ws.push(requires) edges.append((cur, requires)) for edge in edges: graph.edges.append(Edge(passes[edge[1]], passes[edge[0]])) return graph
def dfs(block_functor, take_edge_functor, start_abbs): """Performs a depth first search. It first executes block_functor on a block, then collects all outgoing edges, that satisfy the take_edge_functor. All child blocks are visited in the same manner. No block is visited more than once. The DFS starts with the list of start_abs blocks. The block_functor takes an _edge_ that leads to a _block_ """ visited = set() # First in start_abs is the first to be popped working_stack = stack() for abb in start_abbs: working_stack.append(tuple([None, abb])) while working_stack: leading_edge, current_block = working_stack.pop() # Call the block_functor block_functor(leading_edge, current_block) out_edges = sorted(current_block.outgoing_edges, key=lambda e: e.target.abb_id) for edge in reversed(out_edges): #for edge in out_edges: child = edge.target # Already visited or in working_stack if child in visited: continue # Check if edge satisfies condition if not take_edge_functor(edge): continue # PUSH item onto stack working_stack.push((edge, child)) visited.add(child)
def __slice_subtree(self, tree): "Slices all entries of the tree dict, that are below root" ret = {} ws = stack() ws.push(self.root) while not ws.isEmpty(): cur = ws.pop() ret[cur] = list(tree[cur]) ws.extend(tree[cur]) return ret
def find_region(self, start, end): region = set([start, end]) ws = stack([start]) while ws: cur = ws.pop() region.add(cur) for node in cur.get_outgoing_nodes(E.function_level): if not node in region: ws.push(node) return region
def copy(self): # Increase the copy counter SystemState.copy_count += 1 state = self.new() state.current_abb = self.current_abb state.states = list(self.states) state.continuations = list(self.continuations) for subtask_id in range(0, len(self.states)): state.call_stack[subtask_id] = stack(self.call_stack[subtask_id]) return state
def copy(self): # Increase the copy counter SystemState.copy_count += 1 state = self.new() state.current_abb = self.current_abb state.states = list(self.states) for subtask in self.get_unordered_subtasks(): state.continuations[subtask.subtask_id] = set(self.continuations[subtask.subtask_id]) # Only used by SymbolicSystemExecution if not self.call_stack[subtask.subtask_id] is None: state.call_stack[subtask.subtask_id] = stack(self.call_stack[subtask.subtask_id]) #assert self == state, repr(self) + " " + repr(state) return state
def copy(self): # Increase the copy counter SystemState.copy_count += 1 state = self.new() state.current_abb = self.current_abb state.states = list(self.states) for subtask in self.get_unordered_subtasks(): state.continuations[subtask.subtask_id] = set( self.continuations[subtask.subtask_id]) # Only used by SymbolicSystemExecution if not self.call_stack[subtask.subtask_id] is None: state.call_stack[subtask.subtask_id] = stack( self.call_stack[subtask.subtask_id]) return state
def syscall_dominance_regions(self): """Returns all subtrees of the syscall_immdom_tree""" syscall_tree = self.syscall_dominator_tree() # Find all regions ws = stack() ws.push((syscall_tree.root, syscall_tree.tree)) ret = [] while not ws.isEmpty(): root, tree = ws.pop() # Construct current region region = DominatorTree(root, tree, self.edge_levels) # For each chilren of root, we slice the subtree into a new dict for child in tree[root]: ws.push((child, tree)) ret.append(region) return ret
def copy(self): # Increase the copy counter SystemState.copy_count += 1 state = self.new() state.current_abb = self.current_abb def copyto(dst, src): for idx in range(0, len(src)): dst[idx] = src[idx] copyto(state.states, self.states) copyto(state.continuations, self.continuations) copyto(state.events, self.events) for subtask_id in range(0, len(self.states)): state.call_stack[subtask_id] = stack(self.call_stack[subtask_id]) return state
def transform_isr_transitions(self): # Special casing of sporadic events What happens here: In # order to seperate interrupt space from the application # space, we purge all state transitions from the application # level to interrupt level. We connect the interrupt handler # continuation with the block that activates the interrupt. def is_isr_state(state): if not state.current_abb.function.subtask: return False return state.current_abb.function.subtask.is_isr del_edges = [] add_edges = [] for app_level in [x for x in self.states if not is_isr_state(x)]: for isr_level in [ x for x in app_level.get_outgoing_nodes(StateTransition) if is_isr_state(x) ]: # Remove the Interrupt activation Edge del_edges.append((app_level, isr_level)) # Now we have to find all iret states that can # follow. We do this in depth-first-search ws = stack([isr_level]) exits = set() while not ws.isEmpty(): iret = ws.pop() if iret.current_abb.isA(S.iret): for retpoint in iret.get_outgoing_nodes( StateTransition): del_edges.append((iret, retpoint)) add_edges.append((app_level, retpoint)) else: # Not an IRET ws.extend(iret.get_outgoing_nodes(StateTransition)) for source, target in del_edges: x = source.remove_cfg_edge(target, StateTransition) for source, target in add_edges: source.add_cfg_edge(target, StateTransition)
def transform_isr_transitions(self): # Special casing of sporadic events What happens here: In # order to seperate interrupt space from the application # space, we purge all state transitions from the application # level to interrupt level. We connect the interrupt handler # continuation with the block that activates the interrupt. def is_isr_state(state): if not state.current_abb.function.subtask: return False return state.current_abb.function.subtask.is_isr del_edges = [] add_edges = [] for app_level in [x for x in self.states if not is_isr_state(x)]: for isr_level in[x for x in app_level.get_outgoing_nodes(StateTransition) if is_isr_state(x)]: # Remove the Interrupt activation Edge del_edges.append((app_level, isr_level)) # Now we have to find all iret states that can # follow. We do this in depth-first-search ws = stack([isr_level]) exits = set() while not ws.isEmpty(): iret = ws.pop() if iret.current_abb.isA(S.iret): for retpoint in iret.get_outgoing_nodes(StateTransition): del_edges.append((iret, retpoint)) add_edges.append((app_level, retpoint)) else: # Not an IRET ws.extend(iret.get_outgoing_nodes(StateTransition)) for source, target in del_edges: x = source.remove_cfg_edge(target, StateTransition) for source, target in add_edges: source.add_cfg_edge(target, StateTransition)
def do(self): old_copy_count = SystemState.copy_count self.running_task = self.get_analysis(CurrentRunningSubtask.name()) # Instanciate a new system call semantic self.system_call_semantic = SystemCallSemantic(self.system_graph, self.running_task) scc = self.system_call_semantic self.transitions = {S.StartOS : scc.do_StartOS, S.ActivateTask : scc.do_ActivateTask, S.TerminateTask : scc.do_TerminateTask, S.ChainTask : scc.do_ChainTask, S.computation : self.do_computation_with_sporadic_events, S.kickoff : scc.do_computation, # NO ISRS S.SetRelAlarm : scc.do_computation, # ignore S.CancelAlarm : scc.do_computation, # ignore # Done in DynamicPriorityAnalysis S.GetResource : scc.do_computation, S.ReleaseResource : scc.do_computation, # Done in InterruptControlAnalysis S.DisableAllInterrupts : scc.do_computation, S.EnableAllInterrupts : scc.do_computation, S.SuspendAllInterrupts : scc.do_computation, S.ResumeAllInterrupts : scc.do_computation, S.SuspendOSInterrupts : scc.do_computation, S.ResumeOSInterrupts : scc.do_computation, S.Idle : scc.do_Idle, S.iret : scc.do_TerminateTask} # Instanciate the big dict (State->State) self.states = {} entry_abb = self.system_graph.functions["StartOS"].entry_abb before_StartOS = PreciseSystemState(self.system_graph) before_StartOS.current_abb = entry_abb for subtask in before_StartOS.get_unordered_subtasks(): before_StartOS.call_stack[subtask.subtask_id] = stack() before_StartOS.frozen = True self.working_stack = stack() self.working_stack.push(before_StartOS) state_count = 0 ignored_count = 0 while not self.working_stack.isEmpty(): # Current is a system state current = self.working_stack.pop() # State was already marked as done! if current in self.states: ignored_count += 1 continue # The current state is marked as done. This dictionary is # used to translate all equal system states to a single instance/object. self.states[current] = current state_count += 1 if (state_count % 10000) == 0 and state_count > 0: logging.info(" + already %d states (%d on stack, %d ignored)", state_count, len(self.working_stack), ignored_count) self.state_functor(current) logging.info(" + symbolic execution done") self.transform_isr_transitions() # Group States by ABB self.states_by_abb = group_by(self.states, "current_abb") logging.info(" + %d system states", len(self.states)) # Set analysis to valid, since only statistics follow from here. self.valid = True ################################################################ # Statistics ################################################################ # Record the number of copied system states self.system_graph.stats.add_data(self, "system-states", len(self.states), scalar = True) self.system_graph.stats.add_data(self, "copied-system-states", SystemState.copy_count - old_copy_count, scalar = True) logging.info(" + %d system states copied", SystemState.copy_count - old_copy_count) # Record the precision indicators for each abb # Count the number of ABBs in the system the analysis works on is_relevant = self.system_graph.passes["AddFunctionCalls"].is_relevant_function abbs = [x for x in self.system_graph.get_abbs() if is_relevant(x.function)] precisions = [] for abb in abbs: if not abb.function.subtask or not abb.function.subtask.is_real_thread(): continue x = self.for_abb(abb) if x: precision = x.state_before.precision() if not abb.isA(S.StartOS): self.stats.add_data(abb, "sse-precision", precision, scalar=True) precisions.append(precision) else: # State will not be visited, this is for sure precisions.append(1.0) self.stats.add_data(self, "precision", precisions, scalar = True)
def do(self): old_copy_count = SystemState.copy_count self.running_task = self.get_analysis(CurrentRunningSubtask.name()) # Instanciate a new system call semantic self.system_call_semantic = SystemCallSemantic(self.system_graph, self.running_task) scc = self.system_call_semantic self.transitions = { S.StartOS: scc.do_StartOS, S.ActivateTask: scc.do_ActivateTask, S.TerminateTask: scc.do_TerminateTask, S.ChainTask: scc.do_ChainTask, S.computation: self.do_computation_with_sporadic_events, S.kickoff: scc.do_computation, # NO ISRS S.SetRelAlarm: scc.do_computation, # ignore S.CancelAlarm: scc.do_computation, # ignore # Done in DynamicPriorityAnalysis S.GetResource: scc.do_computation, S.ReleaseResource: scc.do_computation, # Done in InterruptControlAnalysis S.DisableAllInterrupts: scc.do_computation, S.EnableAllInterrupts: scc.do_computation, S.SuspendAllInterrupts: scc.do_computation, S.ResumeAllInterrupts: scc.do_computation, S.SuspendOSInterrupts: scc.do_computation, S.ResumeOSInterrupts: scc.do_computation, S.Idle: scc.do_Idle, S.iret: scc.do_TerminateTask } # Instanciate the big dict (State->State) self.states = {} entry_abb = self.system_graph.functions["StartOS"].entry_abb before_StartOS = PreciseSystemState(self.system_graph) before_StartOS.current_abb = entry_abb for subtask in before_StartOS.get_unordered_subtasks(): before_StartOS.call_stack[subtask.subtask_id] = stack() before_StartOS.frozen = True self.working_stack = stack() self.working_stack.push(before_StartOS) state_count = 0 ignored_count = 0 while not self.working_stack.isEmpty(): # Current is a system state current = self.working_stack.pop() # State was already marked as done! if current in self.states: ignored_count += 1 continue # The current state is marked as done. This dictionary is # used to translate all equal system states to a single instance/object. self.states[current] = current state_count += 1 if (state_count % 10000) == 0 and state_count > 0: logging.info(" + already %d states (%d on stack, %d ignored)", state_count, len(self.working_stack), ignored_count) self.state_functor(current) logging.info(" + symbolic execution done") self.transform_isr_transitions() # Group States by ABB self.states_by_abb = group_by(self.states, "current_abb") logging.info(" + %d system states", len(self.states)) # Set analysis to valid, since only statistics follow from here. self.valid = True ################################################################ # Statistics ################################################################ # Record the number of copied system states self.system_graph.stats.add_data(self, "system-states", len(self.states), scalar=True) self.system_graph.stats.add_data(self, "copied-system-states", SystemState.copy_count - old_copy_count, scalar=True) logging.info(" + %d system states copied", SystemState.copy_count - old_copy_count) # Record the precision indicators for each abb # Count the number of ABBs in the system the analysis works on is_relevant = self.system_graph.passes[ "AddFunctionCalls"].is_relevant_function abbs = [ x for x in self.system_graph.get_abbs() if is_relevant(x.function) ] precisions = [] for abb in abbs: if not abb.function.subtask or not abb.function.subtask.is_real_thread( ): continue x = self.for_abb(abb) if x: precision = x.state_before.precision() if not abb.isA(S.StartOS): self.stats.add_data(abb, "sse-precision", precision, scalar=True) precisions.append(precision) else: # State will not be visited, this is for sure precisions.append(1.0) self.stats.add_data(self, "precision", precisions, scalar=True)
def do(self): old_copy_count = SystemState.copy_count self.running_task = self.get_analysis(CurrentRunningSubtask.name()) # Instantiate a new system call semantic self.system_call_semantic = SystemCallSemantic(self.system_graph, self.running_task) scc = self.system_call_semantic self.transitions = {S.StartOS : scc.do_StartOS, S.ActivateTask : scc.do_ActivateTask, S.TerminateTask : scc.do_TerminateTask, S.ChainTask : scc.do_ChainTask, S.computation : self.do_computation_with_sporadic_events, S.kickoff : scc.do_computation, # NO ISRS S.SetRelAlarm : scc.do_computation, # ignore S.CancelAlarm : scc.do_computation, # ignore S.GetAlarm : scc.do_computation, # ignore S.AdvanceCounter : scc.do_AdvanceCounter, # Done in DynamicPriorityAnalysis S.GetResource : scc.do_computation, S.ReleaseResource : scc.do_computation, # Done in InterruptControlAnalysis S.DisableAllInterrupts : scc.do_computation, S.EnableAllInterrupts : scc.do_computation, S.SuspendAllInterrupts : scc.do_computation, S.ResumeAllInterrupts : scc.do_computation, S.SuspendOSInterrupts : scc.do_computation, S.ResumeOSInterrupts : scc.do_computation, # Dependability Service S.AcquireCheckedObject : scc.do_computation, S.ReleaseCheckedObject : scc.do_computation, # Event Support S.WaitEvent : scc.do_WaitEvent, S.SetEvent : scc.do_SetEvent, S.ClearEvent : scc.do_ClearEvent, # Alarm handler support S.CheckAlarm : scc.do_CheckAlarm, S.Idle : scc.do_Idle, S.iret : scc.do_TerminateTask} # Instantiate the big dict (State->State) self.states = {} entry_abb = self.system_graph.get(Function, "StartOS").entry_abb before_StartOS = PreciseSystemState(self.system_graph) before_StartOS.current_abb = entry_abb before_StartOS.frozen = True # The working stack consists of possible state edges. If the # first part of the tuple is none, we have the starting # condition. self.working_stack = stack() self.working_stack.push((None, before_StartOS)) state_count = 0 ignored_count = 0 while not self.working_stack.isEmpty(): # Current is a system state and its state predecessor before_current, current = self.working_stack.pop() # State was already marked as done! if current in self.states: # Although it was already done, we have to add the edge # noted in the working stack. current = self.states[current] before_current.add_cfg_edge(current, StateTransition) ignored_count += 1 continue elif before_current: # Add the state edge before_current.add_cfg_edge(current, StateTransition) # The current state is marked as done. This dictionary is # used to translate all equal system states to a single instance/object. self.states[current] = current state_count += 1 if (state_count % 10000) == 0 and state_count > 0: logging.info(" + already %d states (%d on stack, %d ignored)", state_count, len(self.working_stack), ignored_count) self.state_functor(current) logging.info(" + symbolic execution done") # Before we transform the state graph, we copy the original # state graph away for outgoing_state in self.states: for incoming_state in outgoing_state.get_outgoing_nodes(StateTransition): outgoing_state.add_cfg_edge(incoming_state, SavedStateTransition) # Cut out the isr transitions to match the SSF GCFG self.transform_isr_transitions() # Group States by ABB self.states_by_abb = group_by(self.states, "current_abb") logging.info(" + %d system states", len(self.states)) # Set analysis to valid, since only statistics follow from here. self.states = set(self.states.keys()) self.copied_states = SystemState.copy_count - old_copy_count self.valid = True