def do(self):
        start_basic_blocks = []
        # AtomicBasicBlock -> BlockingCounter
        self.values = {}
        # All Atomic basic blocks have a start value
        for abb in self.system_graph.abbs:
            # The default is that interrupts are allowed
            self.values[abb] = self.BlockingCounter(0, 0)
            if "Interrupts" in abb.syscall_type.name:
                start_basic_blocks.append(abb)

        fixpoint = FixpointIteration(start_basic_blocks)
        fixpoint.do(self.block_functor)

        for abb in self.system_graph.abbs:
            states_incoming = [
                self.values[x] for x in abb.get_incoming_nodes(E.task_level)
            ]
            if len(states_incoming) > 0:
                first = states_incoming[0]
                for n in states_incoming:
                    if not n == first:
                        panic("""At %s the interrupt blocking level is
                        ambigious for the interrupts level. (%s != %s)""" % \
                              (abb.path(), first, n))
            All, OS = self.values[abb]
            if not abb.isA(S.computation):
                All = 1
                OS = 1
            abb.interrupt_block_all = All > 0
            abb.interrupt_block_os = OS > 0
    def do(self):
        start_basic_blocks = []
        # AtomicBasicBlock -> BlockingCounter
        self.values = {}
        # All Atomic basic blocks have a start value
        for abb in self.system_graph.get_abbs():
            # The default is that interrupts are allowed
            self.values[abb] = self.BlockingCounter(0, 0)
            if "Interrupts" in abb.syscall_type.name:
                start_basic_blocks.append(abb)

        fixpoint = FixpointIteration(start_basic_blocks)
        fixpoint.do(self.block_functor)

        for abb in self.system_graph.get_abbs():
            states_incoming = [self.values[x] for x in abb.get_incoming_nodes(E.task_level)]
            if len(states_incoming) > 0:
                first = states_incoming[0]
                for n in states_incoming:
                    if not n == first:
                        panic("""At %s the interrupt blocking level is
                        ambigious for the All Interrupts level. This is
                        forbidden by the OSEK spec (%s != %s)""" % \
                              (abb.path(), first, n))
            All, OS = self.values[abb]
            if not abb.isA(S.computation):
                All = 1
                OS  = 1
            abb.interrupt_block_all = All > 0
            abb.interrupt_block_os  = OS > 0 
Exemple #3
0
    def do(self):
        self.removed_edges = []
        edge_count_in_ssf = 0
        edge_count_in_sse = 0

        for source_abb in self.system_graph.abbs:
            in_state_flow = set(self.edges_in_state_flow(source_abb))
            in_sse = set(self.edges_in_sse(source_abb))
            edge_count_in_sse += len(in_sse)
            edge_count_in_ssf += len(in_state_flow)

            # Edges found by both analyses are always good
            for target_abb in in_state_flow & in_sse:
                self.__add_gcfg_edge(source_abb, target_abb)

            more_in_state_flow = in_state_flow - in_sse
            more_in_sse = in_sse - in_state_flow

            for target_abb in more_in_state_flow:
                # Found in the dataflow analysis but not by the sse

                if self.sse:
                    edge = Edge(source_abb, target_abb)
                    logging.debug(" + remove edge from %s -> %s", source_abb,
                                  target_abb)
                    self.removed_edges.append(edge)
                else:
                    # If no symbolic analysis is done we use the
                    # edges build by symbolic execution
                    self.__add_gcfg_edge(source_abb, target_abb)

            for target_abb in more_in_sse:
                # Returns from or to interrupts are not part of the system_level flow
                if source_abb.function.subtask and \
                   (bool(source_abb.subtask.conf.is_isr) \
                    ^ bool(target_abb.subtask.conf.is_isr)):
                    assert False, "Invalid application/ISR transition"

                # There should not be more edges in the symbolic
                # execution, besides a few exceptions
                if self.state_flow:
                    panic(
                        "SSE has found more edges than RunningTask (%s -> %s)",
                        source_abb.path(), target_abb.path())
                else:
                    # If no state_flow analysis is done we use the
                    # edges build by symbolic execution
                    self.__add_gcfg_edge(source_abb, target_abb)

        self.edge_count_in_ssf = edge_count_in_ssf
        self.edge_count_in_sse = edge_count_in_sse

        # GCFG checksum
        chksum = hashlib.md5()
        for x in sorted(self.__hash_edges):
            chksum.update(x.encode("ascii"))
        logging.debug("gcfg hash: %s", chksum.hexdigest())

        logging.info(" + removed %d edges", len(self.removed_edges))
Exemple #4
0
    def do_assertion(self, block, assertion):
        if assertion.isA(AssertionType.TaskIsSuspended):
            task = assertion.get_arguments()[0]
            cond = "scheduler_.isSuspended(%s)" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskIsReady):
            task = assertion.get_arguments()[0]
            cond = "!scheduler_.isSuspended(%s)" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskWasKickoffed):
            task = assertion.get_arguments()[0]
            cond = "%s.tcb.is_running()" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskWasNotKickoffed):
            task = assertion.get_arguments()[0]
            cond = "!(%s.tcb.is_running())" % self.task_desc(task)
        else:
            panic("Unsupported assert type %s in %s", assertion, block)

        block.add(Statement("color_assert(%s, COLOR_ASSERT_SYSTEM_STATE)" % \
                            cond))
Exemple #5
0
    def do_assertion(self, block, assertion):
        if assertion.isA(AssertionType.TaskIsSuspended):
            task = assertion.get_arguments()[0]
            cond = "scheduler_.isSuspended(%s)" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskIsReady):
            task = assertion.get_arguments()[0]
            cond = "!scheduler_.isSuspended(%s)" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskWasKickoffed):
            task = assertion.get_arguments()[0]
            cond = "%s.tcb.is_running()" % self.task_desc(task)
        elif assertion.isA(AssertionType.TaskWasNotKickoffed):
            task = assertion.get_arguments()[0]
            cond =  "!(%s.tcb.is_running())" % self.task_desc(task)
        else:
            panic("Unsupported assert type %s in %s", assertion, block)

        block.add(Statement("color_assert(%s, COLOR_ASSERT_SYSTEM_STATE)" % \
                            cond))
Exemple #6
0
    def __do_assertion(self, assertion):
        task = assertion.get_arguments()[0]

        if assertion.isA(AssertionType.TaskIsSuspended):
            cond = "scheduler_.isReady(%s)" % self.task_desc(task)
            return (True, cond)
        elif assertion.isA(AssertionType.TaskIsReady):
            cond = "scheduler_.isReady(%s)" % self.task_desc(task)
            return (False, cond)
        elif assertion.isA(AssertionType.TaskWasKickoffed):
            cond = "%s.tcb.is_running()" % self.task_desc(task)
            return (False, cond)
        elif assertion.isA(AssertionType.TaskWasNotKickoffed):
            cond = "%s.tcb.is_running()" % self.task_desc(task)
            return (True, cond)
        elif assertion.isA(AssertionType.EventsCheck):
            event_cleared = assertion.get_arguments()[1]
            event_set = assertion.get_arguments()[2]
            var = "event_mask_%s" % task.name
            prepare = Statement(
                "uint32_t {var} = scheduler_.GetEvent_impl({task});".format(
                    task=self.task_desc(task), var=var))
            conds = []
            if event_cleared:
                # Mask Should equal to zero
                mask = Event.combine_event_masks(event_cleared)
                conds.append((True, "({var} & {mask})".format(var=var,
                                                              mask=mask)))
            if event_set:
                # Mask Should equal to zero
                mask = Event.combine_event_masks(event_set)
                conds.append(
                    (True, "(({var} & {mask}) ^ {mask})".format(var=var,
                                                                mask=mask)))

            return (conds, prepare)
        elif assertion.isA(AssertionType.EventsCleared):
            cond = "(scheduler_.GetEvent_impt({task}) & {mask}) == 0".format(
                task=self.task_desc(event_list[0].subtask),
                mask=Event.combine_event_masks(event_list))
            return (True, cond)
        else:
            panic("Unsupported assert type %s", assertion)
Exemple #7
0
    def do(self):
        self.removed_edges = []
        edge_count_in_ssf = 0
        edge_count_in_sse = 0

        for source_abb in self.system_graph.get_abbs():
            in_state_flow = set(self.edges_in_state_flow(source_abb))
            in_sse = set(self.edges_in_sse(source_abb))
            edge_count_in_sse += len(in_sse)
            edge_count_in_ssf += len(in_state_flow)

            # Edges found by both analyses are always good
            for target_abb in in_state_flow & in_sse:
                source_abb.add_cfg_edge(target_abb, E.system_level)

            more_in_state_flow = in_state_flow - in_sse
            more_in_sse = in_sse - in_state_flow

            for target_abb in more_in_state_flow:
                # Found in the dataflow analysis but not by the sse

                if self.sse:
                    edge = Edge(source_abb, target_abb)
                    logging.debug(" + remove edge from %s -> %s", source_abb,
                                  target_abb)
                    self.removed_edges.append(edge)
                else:
                    # If no symbolic analysis is done we use the
                    # edges build by symbolic execution
                    source_abb.add_cfg_edge(target_abb, E.system_level)

            for target_abb in more_in_sse:
                # Returns from or to interrupts are not part of the system_level flow
                if source_abb.function.subtask and \
                   (bool(source_abb.function.subtask.is_isr) \
                    ^ bool(target_abb.function.subtask.is_isr)):
                    assert False, "Invalid application/ISR transition"

                # There should not be more edges in the symbolic
                # execution, besides a few exceptions
                if self.state_flow:
                    panic(
                        "SSE has found more edges than RunningTask (%s -> %s)",
                        source_abb.path(), target_abb.path())
                else:
                    # If no state_flow analysis is done we use the
                    # edges build by symbolic execution
                    source_abb.add_cfg_edge(target_abb, E.system_level)
        logging.info(" + removed %d edges", len(self.removed_edges))

        ################################################################
        # Statistics
        ################################################################

        # Count the number of ABBs in the system the analysis works on
        is_relevant = self.system_graph.passes[
            "AddFunctionCalls"].is_relevant_function
        abbs = [
            x for x in self.system_graph.get_abbs() if is_relevant(x.function)
        ]
        self.stats.add_data(self, "abb-count", len(abbs), scalar=True)

        # Record Edge Count
        self.stats.add_data(self, "sse-edges", edge_count_in_sse, scalar=True)
        self.stats.add_data(self, "ssf-edges", edge_count_in_ssf, scalar=True)

        # Record the number of (possible) from lower to higher priority subtask
        static_edges = 0
        dynamic_edges = 0

        for abb1 in abbs:
            # Count only application blocks
            if not abb1.subtask or not abb1.subtask.is_real_thread():
                continue
            for abb2 in abbs:
                # Count only application blocks
                if not abb2.subtask or not abb2.subtask.is_real_thread():
                    continue

                # We would a preemption edge from abb2 to abb1, if there is no natural edge.
                # Edge from ABB2 -> ABB1
                if abb1 in abb2.get_outgoing_nodes(E.task_level):
                    static_edges += 1
                    dynamic_edges += 1
                    continue

                # Preemption edges can only come from subtask to subtask
                if abb1.subtask == abb2.subtask:
                    continue

                assert abb1.dynamic_priority >= abb1.subtask.static_priority
                assert abb2.dynamic_priority >= abb2.subtask.static_priority

                if abb1.dynamic_priority > abb2.dynamic_priority:
                    dynamic_edges += 1

                if abb1.subtask.static_priority > abb2.subtask.static_priority:
                    static_edges += 1

        assert dynamic_edges <= static_edges, "Number of dynamic edges should always be smaller than the number of static edges"
        self.stats.add_data(self,
                            "inference-edges-static",
                            static_edges,
                            scalar=True)
        self.stats.add_data(self,
                            "inference-edges-dynamic",
                            dynamic_edges,
                            scalar=True)

        # Record the number of subtasks that can be reached
        subtask_count = 0
        for subtask in self.system_graph.get_subtasks():
            if subtask.is_real_thread() and \
               len(subtask.entry_abb.get_incoming_edges(E.system_level)) > 0:
                subtask_count += 1
        self.system_graph.stats.add_data(self,
                                         "subtask-count",
                                         subtask_count,
                                         scalar=True)

        # ISR Count
        self.stats.add_data(self,
                            "isr-count",
                            len(self.system_graph.isrs +
                                self.system_graph.alarms),
                            scalar=True)

        # Describe the removed edges
        self.stats.add_data(self, "removed-edges", [])  # empty List
        for edge in self.removed_edges:
            self.stats.add_data(
                self, "removed-edges",
                (edge.source.syscall_type.name, edge.target.syscall_type.name))
Exemple #8
0
 def do_SystemCall(self, block, before, system_calls):
     if block.syscall_type in system_calls:
         after = system_calls[block.syscall_type](block, before)
         return after
     else:
         panic("BlockType %s is not supported yet" % block.syscall_type)
Exemple #9
0
    pass_manager.register_analysis(DominanceAnalysis())
    pass_manager.register_analysis(CFGRegions())
    pass_manager.register_analysis(GenerateAssertionsPass())

    # Statistics modules
    pass_manager.register_analysis(GlobalControlFlowMetric("%s/%s_metric" % (options.prefix, options.name)))


    if options.arch == "i386":
        arch_rules = X86Arch()
    elif options.arch == "ARM":
        arch_rules = ARMArch()
    elif options.arch == "posix":
        arch_rules = PosixArch()
    else:
        panic("Unknown --arch=%s", options.arch)

    if options.unencoded:
        os_rules = UnencodedSystem()
    else:
        os_rules = EncodedSystem()

    if options.specialize_systemcalls:
        # Only when we want to specialize the system calls, run the
        # System-Level analyses
        pass_manager.enqueue_analysis("SymbolicSystemExecution")
        pass_manager.enqueue_analysis("SystemStateFlow")

        global_cfg = pass_manager.enqueue_analysis("ConstructGlobalCFG")
        global_abb_information = global_cfg.global_abb_information_provider()
        logging.info("Global control flow information is provided by %s",
Exemple #10
0
    pass_manager.register_analysis(CFGRegions())
    pass_manager.register_analysis(GenerateAssertionsPass())

    # Statistics modules
    pass_manager.register_analysis(
        GlobalControlFlowMetric("%s/%s_metric" %
                                (options.prefix, options.name)))

    if options.arch == "i386":
        arch_rules = X86Arch()
    elif options.arch == "ARM":
        arch_rules = ARMArch()
    elif options.arch == "posix":
        arch_rules = PosixArch()
    else:
        panic("Unknown --arch=%s", options.arch)

    if options.unencoded:
        os_rules = UnencodedSystem()
    else:
        os_rules = EncodedSystem()

    if options.specialize_systemcalls:
        # Only when we want to specialize the system calls, run the
        # System-Level analyses
        pass_manager.enqueue_analysis("SymbolicSystemExecution")
        pass_manager.enqueue_analysis("SystemStateFlow")

        global_cfg = pass_manager.enqueue_analysis("ConstructGlobalCFG")
        global_abb_information = global_cfg.global_abb_information_provider()
        logging.info("Global control flow information is provided by %s",
Exemple #11
0
    (options, args) = parser.parse_args()

    if len(args) > 0:
        parser.print_help()
        sys.exit(-1)

    setup_logging(options.verbose)
    graph = SystemGraph(options.code_options)
    graph.add_system_objects()
    pass_manager = graph
    pass_manager.read_verify_script(options.verify)

    if options.system_desc:
        if options.system_desc.lower().endswith(".xml"):
            panic("RTSC XMLs no longer supported")
        elif options.system_desc.lower().endswith(".oil"):
            read_oil = OILReadPass(options.system_desc)
            pass_manager.register_analysis(read_oil)
        else:
            print("No valid system description file")
            parser.print_help()
            sys.exit(-1)
    else:
        print("No system description file passed")
        parser.print_help()
        sys.exit(-1)

    if options.llfiles and len(options.llfiles) > 0:
        mergedoutfile = open(options.mergedoutput, 'w')
        if not mergedoutfile: