コード例 #1
0
ファイル: build_docs.py プロジェクト: aytong/opentitan-1
def generate_testplans():
    for testplan in config["testplan_definitions"]:
        plan = testplan_utils.parse_testplan(SRCTREE_TOP.joinpath(testplan))

        plan_path = config["outdir-generated"].joinpath(testplan + '.testplan')
        plan_path.parent.mkdir(parents=True, exist_ok=True)

        testplan_html = open(str(plan_path), mode='w')
        testplan_utils.gen_html_testplan_table(plan, testplan_html)
        testplan_html.close()
コード例 #2
0
    def _create_objects(self):
        # Create build and run modes objects
        self.build_modes = Modes.create_modes(BuildModes, self.build_modes)
        self.run_modes = Modes.create_modes(RunModes, self.run_modes)

        # Walk through build modes enabled on the CLI and append the opts
        for en_build_mode in self.en_build_modes:
            build_mode_obj = Modes.find_mode(en_build_mode, self.build_modes)
            if build_mode_obj is not None:
                self.pre_build_cmds.extend(build_mode_obj.pre_build_cmds)
                self.post_build_cmds.extend(build_mode_obj.post_build_cmds)
                self.build_opts.extend(build_mode_obj.build_opts)
                self.pre_run_cmds.extend(build_mode_obj.pre_run_cmds)
                self.post_run_cmds.extend(build_mode_obj.post_run_cmds)
                self.run_opts.extend(build_mode_obj.run_opts)
                self.sw_images.extend(build_mode_obj.sw_images)
            else:
                log.error(
                    "Mode \"%s\" enabled on the the command line is not defined",
                    en_build_mode)
                sys.exit(1)

        # Walk through run modes enabled on the CLI and append the opts
        for en_run_mode in self.en_run_modes:
            run_mode_obj = Modes.find_mode(en_run_mode, self.run_modes)
            if run_mode_obj is not None:
                self.pre_run_cmds.extend(run_mode_obj.pre_run_cmds)
                self.post_run_cmds.extend(run_mode_obj.post_run_cmds)
                self.run_opts.extend(run_mode_obj.run_opts)
                self.sw_images.extend(run_mode_obj.sw_images)
            else:
                log.error(
                    "Mode \"%s\" enabled on the the command line is not defined",
                    en_run_mode)
                sys.exit(1)

        # Create tests from given list of items
        self.tests = Tests.create_tests(self.tests, self)

        # Regressions
        # Parse testplan if provided.
        if self.testplan != "":
            self.testplan = parse_testplan(self.testplan)
            # Extract tests in each milestone and add them as regression target.
            self.regressions.extend(self.testplan.get_milestone_regressions())
        else:
            # Create a dummy testplan with no entries.
            self.testplan = Testplan(name=self.name)

        # Create regressions
        self.regressions = Regressions.create_regressions(
            self.regressions, self, self.tests)
コード例 #3
0
    def _gen_results(self, fmt="md"):
        '''
        The function is called after the regression has completed. It collates the status of
        all run targets and generates a dict. It parses the testplan and maps the generated
        result to the testplan entries to generate a final table (list). It uses the fmt arg
        to dump the final result as a markdown or html.
        '''

        # TODO: add support for html
        def retrieve_result(name, results):
            for item in results:
                if name == item["name"]: return item
            return None

        def gen_results_sub(items, results):
            '''
            Generate the results table from the test runs (builds are ignored).
            The table has 3 columns - name, passing and total as a list of dicts.
            This is populated for all tests. The number of passing and total is
            in reference to the number of iterations or reseeds for that test.
            This list of dicts is directly consumed by the Testplan::results_table
            method for testplan mapping / annotation.
            '''
            if items == []: return results
            for item in items:
                # Only generate results table for runs.
                if item.target == "run":
                    result = retrieve_result(item.name, results)
                    if result is None:
                        result = {"name": item.name, "passing": 0, "total": 0}
                        results.append(result)
                    if item.status == "P": result["passing"] += 1
                    result["total"] += 1
                results = gen_results_sub(item.sub, results)
            return results

        # Generate results table for runs.
        results_str = "# " + self.name.upper() + " Regression Results\n"
        results_str += "  Run on " + self.timestamp_long + "\n"
        results_str += "\n## Test Results\n"
        testplan = testplan_utils.parse_testplan(self.testplan)
        results_str += testplan.results_table(
            regr_results=gen_results_sub(self.deploy, []),
            map_full_testplan=self.map_full_testplan)

        # Write results to the scratch area
        regr_results_file = self.scratch_path + "/regr_results_" + self.timestamp + "." + fmt
        f = open(regr_results_file, 'w')
        f.write(results_str)
        f.close()
        return results_str
コード例 #4
0
ファイル: SimCfg.py プロジェクト: sandy-2018/opentitan
    def _create_objects(self):
        # Create build and run modes objects
        build_modes = Modes.create_modes(BuildModes,
                                         getattr(self, "build_modes"))
        setattr(self, "build_modes", build_modes)

        run_modes = Modes.create_modes(RunModes, getattr(self, "run_modes"))
        setattr(self, "run_modes", run_modes)

        # Walk through build modes enabled on the CLI and append the opts
        for en_build_mode in self.en_build_modes:
            build_mode_obj = Modes.find_mode(en_build_mode, build_modes)
            if build_mode_obj is not None:
                self.build_opts.extend(build_mode_obj.build_opts)
                self.run_opts.extend(build_mode_obj.run_opts)
            else:
                log.error(
                    "Mode \"%s\" enabled on the the command line is not defined",
                    en_build_mode)
                sys.exit(1)

        # Walk through run modes enabled on the CLI and append the opts
        for en_run_mode in self.en_run_modes:
            run_mode_obj = Modes.find_mode(en_run_mode, run_modes)
            if run_mode_obj is not None:
                self.run_opts.extend(run_mode_obj.run_opts)
            else:
                log.error(
                    "Mode \"%s\" enabled on the the command line is not defined",
                    en_run_mode)
                sys.exit(1)

        # Create tests from given list of items
        tests = Tests.create_tests(getattr(self, "tests"), self)
        setattr(self, "tests", tests)

        # Regressions
        # Parse testplan if provided.
        if self.testplan != "":
            self.testplan = testplan_utils.parse_testplan(self.testplan)
            # Extract tests in each milestone and add them as regression target.
            self.regressions.extend(self.testplan.get_milestone_regressions())

        # Create regressions
        regressions = Regressions.create_regressions(
            getattr(self, "regressions"), self, tests)
        setattr(self, "regressions", regressions)
コード例 #5
0
ファイル: SimCfg.py プロジェクト: raymondrc/opentitan
    def gen_results(self, fmt="md"):
        '''
        The function is called after the regression has completed. It collates the status of
        all run targets and generates a dict. It parses the testplan and maps the generated
        result to the testplan entries to generate a final table (list). It uses the fmt arg
        to dump the final result as a markdown of html.
        '''

        # TODO: add support for html
        def retrieve_result(name, results):
            for item in results:
                if name == item["name"]: return item
            return None

        def gen_results_sub(items, results):
            if items == []: return results
            for item in items:
                # Only generate results table for runs.
                if item.target == "run":
                    result = retrieve_result(item.name, results)
                    if result is None:
                        result = {"name": item.name, "passing": 0, "total": 0}
                        results.append(result)
                    if item.status == "P": result["passing"] += 1
                    result["total"] += 1
                results = gen_results_sub(item.sub, results)
            return results

        # Generate results table for runs.
        regr_results = {}
        regr_results["timestamp"] = self.timestamp_long
        regr_results["test_results"] = gen_results_sub(self.deploy, [])
        results_str = "# " + self.name.upper() + " Regression Results\n"
        results_str += "  Run on " + regr_results["timestamp"] + "\n"
        results_str += "\n## Test Results\n"
        testplan = testplan_utils.parse_testplan(self.testplan)
        results_str += testplan.results_table(regr_results["test_results"])
        print(results_str)