Example #1
0
    def _load_child_cfg(self, entry, mk_config):
        '''Load a child configuration for a primary cfg'''
        if type(entry) is str:
            # Treat this as a file entry. Substitute wildcards in cfg_file
            # files since we need to process them right away.
            cfg_file = subst_wildcards(entry, self.__dict__, ignore_error=True)
            self.cfgs.append(self.create_instance(mk_config, cfg_file))

        elif type(entry) is dict:
            # Treat this as a cfg expanded in-line
            temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
            if not temp_cfg_file:
                return
            self.cfgs.append(self.create_instance(mk_config, temp_cfg_file))

            # Delete the temp_cfg_file once the instance is created
            try:
                log.log(VERBOSE, "Deleting temp cfg file:\n%s", temp_cfg_file)
                os.system("/bin/rm -rf " + temp_cfg_file)
            except IOError:
                log.error("Failed to remove temp cfg file:\n%s", temp_cfg_file)

        else:
            log.error(
                "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s",
                entry, str(type(entry)))
            sys.exit(1)
Example #2
0
    def __init__(self, run_items, sim_cfg):
        # Construct the cov_db_dirs right away from the run_items. This is a
        # special variable used in the HJson.
        self.cov_db_dirs = []
        for run in run_items:
            if run.cov_db_dir not in self.cov_db_dirs:
                self.cov_db_dirs.append(run.cov_db_dir)

        # Early lookup the cov_merge_db_dir, which is a mandatory misc
        # attribute anyway. We need it to compute additional cov db dirs.
        self.cov_merge_db_dir = subst_wildcards("{cov_merge_db_dir}",
                                                sim_cfg.__dict__)

        # Prune previous merged cov directories, keeping past 7 dbs.
        prev_cov_db_dirs = clean_odirs(odir=self.cov_merge_db_dir, max_odirs=7)

        # If the --cov-merge-previous command line switch is passed, then
        # merge coverage with the previous runs.
        if sim_cfg.cov_merge_previous:
            self.cov_db_dirs += [str(item) for item in prev_cov_db_dirs]

        super().__init__(sim_cfg)
        self.dependencies += run_items
        # Run coverage merge even if one test passes.
        self.needs_all_dependencies_passing = False

        # Append cov_db_dirs to the list of exports.
        self.exports["cov_db_dirs"] = "\"{}\"".format(" ".join(
            self.cov_db_dirs))
Example #3
0
def _load_single_file(target, path, is_first, arg_keys):
    '''Load a single hjson file, merging its keys into target

    Returns a list of further includes that should be loaded.

    '''
    hjson = parse_hjson(path)
    if not isinstance(hjson, dict):
        raise RuntimeError(
            '{!r}: Top-level hjson object is not a dictionary.'.format(path))

    import_cfgs = []
    for key, dict_val in hjson.items():
        # If this key got set at the start of time and we want to ignore any
        # updates: ignore them!
        if key in arg_keys:
            continue

        # If key is 'import_cfgs', this should be a list. Add each item to the
        # list of cfgs to process
        if key == 'import_cfgs':
            if not isinstance(dict_val, list):
                raise RuntimeError('{!r}: import_cfgs value is {!r}, but '
                                   'should be a list.'.format(path, dict_val))
            import_cfgs += dict_val
            continue

        # 'use_cfgs' is a bit like 'import_cfgs', but is only used for primary
        # config files (where it is a list of the child configs). This
        # shouldn't be used except at top-level (the first configuration file
        # to be loaded).
        #
        # If defined, check that it's a list, but then allow it to be set in
        # the target dictionary as usual.
        if key == 'use_cfgs':
            if not is_first:
                raise RuntimeError('{!r}: File is included by another one, '
                                   'but defines "use_cfgs".'.format(path))
            if not isinstance(dict_val, list):
                raise RuntimeError(
                    '{!r}: use_cfgs must be a list. Saw {!r}.'.format(
                        path, dict_val))

        # Otherwise, update target with this attribute
        set_target_attribute(path, target, key, dict_val)

    # Expand the names of imported configuration files as we return them
    return [
        subst_wildcards(cfg_path,
                        target,
                        ignored_wildcards=[],
                        ignore_error=False) for cfg_path in import_cfgs
    ]
Example #4
0
    def _gen_results(self):
        # '''
        # The function is called after the regression has completed. It looks
        # for a regr_results.hjson file with aggregated results from the
        # synthesis run. The hjson needs to have the following (potentially
        # empty) fields
        #
        # results = {
        #     "tool": "dc",
        #     "top" : <name of toplevel>,
        #
        #     "messages": {
        #         "flow_errors"      : [],
        #         "flow_warnings"    : [],
        #         "analyze_errors"   : [],
        #         "analyze_warnings" : [],
        #         "elab_errors"      : [],
        #         "elab_warnings"    : [],
        #         "compile_errors"   : [],
        #         "compile_warnings" : [],
        #     },
        #
        #     "timing": {
        #         # per timing group (ususally a clock domain)
        #         # in nano seconds
        #         <group>  : {
        #             "tns"    : <value>,
        #             "wns"    : <value>,
        #             "period" : <value>,
        #         ...
        #         }
        #     },
        #
        #     "area": {
        #         # gate equivalent of a NAND2 gate
        #         "ge"     : <value>,
        #
        #         # summary report, in GE
        #         "comb"   : <value>,
        #         "buf"    : <value>,
        #         "reg"    : <value>,
        #         "macro"  : <value>,
        #         "total"  : <value>,
        #
        #         # hierchical report of first submodule level
        #         "instances" : {
        #             <name> : {
        #               "comb"  : <value>,
        #               "buf"   : <value>,
        #               "reg"   : <value>,
        #               "macro" : <value>,
        #               "total" : <value>,
        #             },
        #             ...
        #         },
        #     },
        #
        #     "power": {
        #         "net"  : <value>,
        #         "int"  : <value>,
        #         "leak" : <value>,
        #     },
        #
        #     "units": {
        #         "voltage"     : <value>,
        #         "capacitance" : <value>,
        #         "time"        : <value>,
        #         "dynamic"     : <value>,
        #         "static"      : <value>,
        #     }
        # }
        #
        # note that if this is a primary config, the results will
        # be generated using the _gen_results_summary function
        # '''

        def _create_entry(val, norm=1.0, total=None, perctag="%"):
            """
            Create normalized entry with an optional
            percentage appended in brackets.
            """
            if val is not None and norm is not None:
                if total is not None:
                    perc = float(val) / float(total) * 100.0
                    entry = "%2.1f %s" % (perc, perctag)
                else:
                    value = float(val) / norm
                    entry = "%2.1f" % (value)
            else:
                entry = "--"

            return entry

        self.result = {}

        # Generate results table for runs.
        results_str = "## " + self.results_title + "\n\n"
        results_str += "### " + self.timestamp_long + "\n"
        if self.revision:
            results_str += "### " + self.revision + "\n"
        results_str += "### Branch: " + self.branch + "\n"
        results_str += "### Synthesis Tool: " + self.tool.upper() + "\n\n"

        # TODO: extend this to support multiple build modes
        for mode in self.build_modes:

            # results_str += "## Build Mode: " + mode.name + "\n\n"

            result_data = Path(
                subst_wildcards(self.build_dir, {"build_mode": mode.name}) +
                '/results.hjson')
            log.info("looking for result data file at %s", result_data)

            try:
                with result_data.open() as results_file:
                    self.result = hjson.load(results_file, use_decimal=True)
            except IOError as err:
                log.warning("%s", err)
                self.result = {
                    "messages": {
                        "flow_errors": ["IOError: %s" % err],
                        "flow_warnings": [],
                        "analyze_errors": [],
                        "analyze_warnings": [],
                        "elab_errors": [],
                        "elab_warnings": [],
                        "compile_errors": [],
                        "compile_warnings": [],
                    },
                }

            # Message summary
            # results_str += "### Tool Message Summary\n\n"
            if "messages" in self.result:

                header = [
                    "Build Mode", "Flow Warnings", "Flow Errors",
                    "Analyze Warnings", "Analyze Errors", "Elab Warnings",
                    "Elab Errors", "Compile Warnings", "Compile Errors"
                ]
                colalign = ("left", ) + ("center", ) * (len(header) - 1)
                table = [header]

                messages = self.result["messages"]
                table.append([
                    mode.name,
                    str(len(messages["flow_warnings"])) + " W ",
                    str(len(messages["flow_errors"])) + " E ",
                    str(len(messages["analyze_warnings"])) + " W ",
                    str(len(messages["analyze_errors"])) + " E ",
                    str(len(messages["elab_warnings"])) + " W ",
                    str(len(messages["elab_errors"])) + " E ",
                    str(len(messages["compile_warnings"])) + " W ",
                    str(len(messages["compile_errors"])) + " E ",
                ])

                if len(table) > 1:
                    results_str += tabulate(table,
                                            headers="firstrow",
                                            tablefmt="pipe",
                                            colalign=colalign) + "\n\n"
                else:
                    results_str += "No messages found\n\n"
            else:
                results_str += "No messages found\n\n"

            # Hierarchical Area report
            results_str += "### Circuit Complexity in [kGE]\n\n"
            if "area" in self.result:

                header = [
                    "Instance", "Comb ", "Buf/Inv", "Regs", "Macros", "Total",
                    "Total [%]"
                ]
                colalign = ("left", ) + ("center", ) * (len(header) - 1)
                table = [header]

                # print top-level summary first
                row = ["**" + self.result["top"] + "**"]
                try:
                    kge = float(self.result["area"]["ge"]) * 1000.0

                    for field in ["comb", "buf", "reg", "macro", "total"]:
                        row += [
                            "**" +
                            _create_entry(self.result["area"][field], kge) +
                            "**"
                        ]

                    row += ["**--**"]
                    table.append(row)

                    # go through submodules
                    for name in self.result["area"]["instances"].keys():
                        if name == self.result["top"]:
                            continue
                        row = [name]
                        for field in ["comb", "buf", "reg", "macro", "total"]:
                            row += [
                                _create_entry(
                                    self.result["area"]["instances"][name]
                                    [field], kge)
                            ]

                        # add percentage  of total
                        row += [
                            _create_entry(
                                self.result["area"]["instances"][name][field],
                                kge, self.result["area"]["total"], "%u")
                        ]

                        table.append(row)

                except TypeError:
                    results_str += "Gate equivalent is not properly defined\n\n"

                if len(table) > 1:
                    results_str += tabulate(table,
                                            headers="firstrow",
                                            tablefmt="pipe",
                                            colalign=colalign) + "\n\n"
                else:
                    results_str += "No area report found\n\n"
            else:
                results_str += "No area report found\n\n"

            # Timing report
            results_str += "### Timing in [ns]\n\n"
            if "timing" in self.result and "units" in self.result:

                header = ["Clock", "Period", "WNS", "TNS"]
                colalign = ("left", ) + ("center", ) * (len(header) - 1)
                table = [header]

                for clock in self.result["timing"].keys():
                    row = [clock]
                    row += [
                        _create_entry(
                            self.result["timing"][clock]["period"],
                            1.0E-09 / float(self.result["units"]["time"])),
                        _create_entry(
                            self.result["timing"][clock]["wns"], 1.0E-09 /
                            float(self.result["units"]["time"])) + " EN",
                        _create_entry(
                            self.result["timing"][clock]["tns"], 1.0E-09 /
                            float(self.result["units"]["time"])) + " EN"
                    ]
                    table.append(row)

                if len(table) > 1:
                    results_str += tabulate(table,
                                            headers="firstrow",
                                            tablefmt="pipe",
                                            colalign=colalign) + "\n\n"
                else:
                    results_str += "No timing report found\n\n"
            else:
                results_str += "No timing report found\n\n"

            # Power report
            results_str += "### Power Estimates in [mW]\n\n"
            if "power" in self.result and "units" in self.result:

                header = ["Network", "Internal", "Leakage", "Total"]
                colalign = ("center", ) * len(header)
                table = [header]

                try:
                    self.result["power"]["net"]

                    power = [
                        float(self.result["power"]["net"]) *
                        float(self.result["units"]["dynamic"]),
                        float(self.result["power"]["int"]) *
                        float(self.result["units"]["dynamic"]),
                        float(self.result["power"]["leak"]) *
                        float(self.result["units"]["static"])
                    ]

                    total_power = sum(power)

                    row = [
                        _create_entry(power[0], 1.0E-3) + " / " +
                        _create_entry(power[0], 1.0E-3, total_power),
                        _create_entry(power[1], 1.0E-3) + " / " +
                        _create_entry(power[1], 1.0E-3, total_power),
                        _create_entry(power[2], 1.0E-3) + " / " +
                        _create_entry(power[2], 1.0E-3, total_power),
                        _create_entry(total_power, 1.0E-3)
                    ]

                    table.append(row)
                # in case fp values are NoneType
                except TypeError:
                    results_str += "No power report found\n\n"

                if len(table) > 1:
                    results_str += tabulate(table,
                                            headers="firstrow",
                                            tablefmt="pipe",
                                            colalign=colalign) + "\n\n"
            else:
                results_str += "No power report found\n\n"

            # Append detailed messages if they exist
            # Note that these messages are omitted in publication mode
            hdr_key_pairs = [("Flow Warnings", "flow_warnings"),
                             ("Flow Errors", "flow_errors"),
                             ("Analyze Warnings", "analyze_warnings"),
                             ("Analyze Errors", "analyze_errors"),
                             ("Elab Warnings", "elab_warnings"),
                             ("Elab Errors", "elab_errors"),
                             ("Compile Warnings", "compile_warnings"),
                             ("Compile Errors", "compile_errors")]

            # Synthesis fails if any warning or error message has occurred
            self.errors_seen = False
            fail_msgs = ""
            for _, key in hdr_key_pairs:
                if key in self.result['messages']:
                    if self.result['messages'].get(key):
                        self.errors_seen = True
                        break

            if self.errors_seen:
                fail_msgs += "\n### Errors and Warnings for Build Mode `'" + mode.name + "'`\n"
                for hdr, key in hdr_key_pairs:
                    msgs = self.result['messages'].get(key)
                    fail_msgs += print_msg_list("#### " + hdr, msgs,
                                                self.max_msg_count)

            # the email and published reports will default to self.results_md if they are
            # empty. in case they need to be sanitized, override them and do not append
            # detailed messages.
            if self.sanitize_email_results:
                self.email_results_md = results_str
            if self.sanitize_publish_results:
                self.publish_results_md = results_str

            # locally generated result always contains all details
            self.results_md = results_str + fail_msgs

            # TODO: add support for pie / bar charts for area splits and
            # QoR history

        # Write results to the scratch area
        results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
        with open(results_file, 'w') as f:
            f.write(self.results_md)

        log.log(VERBOSE, "[results page]: [%s] [%s]", self.name, results_file)
        return self.results_md
Example #5
0
    def resolve_hjson_raw(self, hjson_dict):
        attrs = self.__dict__.keys()
        rm_hjson_dict_keys = []
        import_cfgs = []
        use_cfgs = []
        for key in hjson_dict.keys():
            if key in attrs:
                hjson_dict_val = hjson_dict[key]
                self_val = getattr(self, key)
                scalar_types = {str: [""], int: [0, -1], bool: [False]}

                # Case 1: key value in class and hjson_dict differ - error!
                if type(hjson_dict_val) != type(self_val):
                    log.error("Conflicting key types: \"%s\" {\"%s, \"%s\"}",
                              key,
                              type(hjson_dict_val).__name__,
                              type(self_val).__name__)
                    sys.exit(1)

                # Case 2: key value in class and hjson_dict are strs - set if
                # not already set, else error!
                elif type(hjson_dict_val) in scalar_types.keys():
                    defaults = scalar_types[type(hjson_dict_val)]
                    if self_val == hjson_dict_val:
                        rm_hjson_dict_keys.append(key)
                    elif self_val in defaults and hjson_dict_val not in defaults:
                        setattr(self, key, hjson_dict_val)
                        rm_hjson_dict_keys.append(key)
                    elif self_val not in defaults and hjson_dict_val not in defaults:
                        # check if key exists in command line args, use that, or
                        # throw conflicting error
                        # TODO, may throw the conflicting error but choose one and proceed rather
                        # than exit
                        override_with_args_val = False
                        if hasattr(self.args, key):
                            args_val = getattr(self.args, key)
                            if type(args_val) == str and args_val != "":
                                setattr(self, key, args_val)
                                override_with_args_val = True
                        if not override_with_args_val:
                            log.error(
                                "Conflicting values {\"%s\", \"%s\"} encountered for key \"%s\"",
                                str(self_val), str(hjson_dict_val), key)
                            sys.exit(1)

                # Case 3: key value in class and hjson_dict are lists - merge'em
                elif type(hjson_dict_val) is list and type(self_val) is list:
                    self_val.extend(hjson_dict_val)
                    setattr(self, key, self_val)
                    rm_hjson_dict_keys.append(key)

                # Case 4: unknown issue
                else:
                    log.error(
                        "Type of \"%s\" (%s) in %s appears to be invalid (should be %s)",
                        key,
                        type(hjson_dict_val).__name__, hjson_dict,
                        type(self_val).__name__)
                    sys.exit(1)

            # If key is 'import_cfgs' then add to the list of cfgs to
            # process
            elif key == 'import_cfgs':
                import_cfgs.extend(hjson_dict[key])
                rm_hjson_dict_keys.append(key)

            # If this is a master cfg list and the key is 'use_cfgs'
            elif self.is_master_cfg and key == "use_cfgs":
                use_cfgs.extend(hjson_dict[key])

            # If this is a not master cfg list and the key is 'use_cfgs'
            elif not self.is_master_cfg and key == "use_cfgs":
                # Throw an error and exit
                log.error(
                    "Key \"use_cfgs\" encountered in a non-master cfg file list \"%s\"",
                    self.flow_cfg_file)
                sys.exit(1)

            else:
                # add key-value to class
                setattr(self, key, hjson_dict[key])
                rm_hjson_dict_keys.append(key)

        # Parse imported cfgs
        for cfg_file in import_cfgs:
            if cfg_file not in self.imported_cfg_files:
                self.imported_cfg_files.append(cfg_file)
                # Substitute wildcards in cfg_file files since we need to process
                # them right away.
                cfg_file = subst_wildcards(cfg_file, self.__dict__)
                self.parse_flow_cfg(cfg_file, False)
            else:
                log.error("Cfg file \"%s\" has already been parsed", cfg_file)

        # Parse master cfg files
        if self.is_master_cfg:
            for entry in use_cfgs:
                if type(entry) is str:
                    # Treat this as a file entry
                    # Substitute wildcards in cfg_file files since we need to process
                    # them right away.
                    cfg_file = subst_wildcards(entry,
                                               self.__dict__,
                                               ignore_error=True)
                    self.cfgs.append(self.create_instance(cfg_file))

                elif type(entry) is dict:
                    # Treat this as a cfg expanded in-line
                    temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
                    if not temp_cfg_file:
                        continue
                    self.cfgs.append(self.create_instance(temp_cfg_file))

                    # Delete the temp_cfg_file once the instance is created
                    try:
                        log.log(VERBOSE, "Deleting temp cfg file:\n%s",
                                temp_cfg_file)
                        os.system("/bin/rm -rf " + temp_cfg_file)
                    except IOError:
                        log.error("Failed to remove temp cfg file:\n%s",
                                  temp_cfg_file)

                else:
                    log.error(
                        "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s",
                        entry, str(type(entry)))
                    sys.exit(1)
Example #6
0
    def _gen_results(self, results):
        # This function is called after the regression and looks for
        # results.hjson file with aggregated results from the formal logfile.
        # The hjson file is required to follow this format:
        # {
        #   "messages": {
        #      "errors"      : []
        #      "warnings"    : []
        #      "cex"         : ["property1", "property2"...],
        #      "undetermined": [],
        #      "unreachable" : [],
        #   },
        #
        #   "summary": {
        #      "errors"      : 0
        #      "warnings"    : 2
        #      "proven"      : 20,
        #      "cex"         : 5,
        #      "covered"     : 18,
        #      "undetermined": 7,
        #      "unreachable" : 2,
        #      "pass_rate"   : "90 %",
        #      "cover_rate"  : "90 %"
        #   },
        # }
        # The categories for property results are: proven, cex, undetermined,
        # covered, and unreachable.
        #
        # If coverage was enabled then results.hjson will also have an item that
        # shows formal coverage. It will have the following format:
        #   "coverage": {
        #      stimuli: "90 %",
        #      coi    : "90 %",
        #      proof  : "80 %"
        #   }
        results_str = "## " + self.results_title + "\n\n"
        results_str += "### " + self.timestamp_long + "\n"
        if self.revision:
            results_str += "### " + self.revision + "\n"
        results_str += "### Branch: " + self.branch + "\n"
        results_str += "### Tool: " + self.tool.upper() + "\n"
        summary = [self.name]  # cfg summary for publish results

        assert len(self.deploy) == 1
        mode = self.deploy[0]

        if results[mode] == "P":
            result_data = Path(
                subst_wildcards(self.build_dir, {"build_mode": mode.name}),
                'results.hjson')
            try:
                with open(result_data, "r") as results_file:
                    self.result = hjson.load(results_file, use_decimal=True)
            except IOError as err:
                log.warning("%s", err)
                self.result = {
                    "messages": {
                        "errors": ["IOError: %s" % err],
                    }
                }

        results_str += "\n\n## Formal " + self.sub_flow.upper() + " Results\n"
        formal_result_str, formal_summary = self.get_summary(self.result)
        results_str += formal_result_str
        summary += formal_summary

        if self.cov:
            results_str += "\n\n## Coverage Results\n"
            results_str += ("### Coverage html file dir: " +
                            self.scratch_path + "/default/formal-icarus\n\n")
            cov_result_str, cov_summary = self.get_coverage(self.result)
            results_str += cov_result_str
            summary += cov_summary
        else:
            summary += ["N/A", "N/A", "N/A"]

        if results[mode] != "P":
            results_str += "\n## List of Failures\n" + ''.join(
                mode.launcher.fail_msg.message)

        messages = self.result.get("messages")
        if messages is not None:
            results_str += self.parse_dict_to_str(messages)

        self.results_md = results_str

        # Generate result summary
        self.result_summary[self.name] = summary

        return self.results_md
Example #7
0
    def _gen_results(self):
        # '''
        # The function is called after the regression has completed. It looks
        # for a regr_results.hjson file with aggregated results from the lint run.
        # The hjson needs to have the following (potentially empty) fields
        #
        # {
        #   tool: ""
        #   errors: []
        #   warnings: []
        #   lint_errors: []
        #   lint_warning: []
        #   lint_infos: []
        # }
        #
        # where each entry is a string representing a lint message. This allows
        # to reuse the same LintCfg class with different tools since just the
        # parsing script that transforms the tool output into the hjson above
        # needs to be adapted.
        #
        # note that if this is a primary config, the results will
        # be generated using the _gen_results_summary function
        # '''

        # Generate results table for runs.
        results_str = "## " + self.results_title + "\n\n"
        results_str += "### " + self.timestamp_long + "\n"
        if self.revision:
            results_str += "### " + self.revision + "\n"
        results_str += "### Branch: " + self.branch + "\n"
        results_str += "### Lint Tool: " + self.tool.upper() + "\n\n"

        header = [
            "Build Mode", "Tool Warnings", "Tool Errors", "Lint Warnings",
            "Lint Errors"
        ]
        colalign = ("center", ) * len(header)
        table = [header]

        # aggregated counts
        self.result_summary["warnings"] = []
        self.result_summary["errors"] = []
        self.result_summary["lint_warnings"] = []
        self.result_summary["lint_errors"] = []

        fail_msgs = ""
        for mode in self.build_modes:

            result_data = Path(
                subst_wildcards(self.build_dir, {"build_mode": mode.name}) +
                '/results.hjson')
            log.info("[results:hjson]: [%s]: [%s]", self.name, result_data)

            try:
                with result_data.open() as results_file:
                    self.result = hjson.load(results_file, use_decimal=True)
            except IOError as err:
                log.warning("%s", err)
                self.result = {
                    "tool": "",
                    "errors": ["IOError: %s" % err],
                    "warnings": [],
                    "lint_errors": [],
                    "lint_warnings": [],
                    "lint_infos": []
                }
            if self.result:
                table.append([
                    mode.name,
                    str(len(self.result["warnings"])) + " W ",
                    str(len(self.result["errors"])) + " E",
                    # We currently do not publish these infos at
                    # the moment len(self.result["lint_infos"]),
                    str(len(self.result["lint_warnings"])) + " W",
                    str(len(self.result["lint_errors"])) + " E"
                ])
            else:
                self.result = {
                    "tool": "",
                    "errors": [],
                    "warnings": [],
                    "lint_errors": [],
                    "lint_warnings": [],
                    "lint_infos": []
                }

            self.result_summary["warnings"] += self.result["warnings"]
            self.result_summary["errors"] += self.result["errors"]
            self.result_summary["lint_warnings"] += self.result[
                "lint_warnings"]
            self.result_summary["lint_errors"] += self.result["lint_errors"]

            # Append detailed messages if they exist
            hdr_key_pairs = [("Tool Warnings", "warnings"),
                             ("Tool Errors", "errors"),
                             ("Lint Warnings", "lint_warnings"),
                             ("Lint Errors", "lint_errors")]

            # Lint fails if any warning or error message has occurred
            self.errors_seen = False
            for _, key in hdr_key_pairs:
                if key in self.result:
                    if self.result.get(key):
                        self.errors_seen = True
                        break

            if self.errors_seen:
                fail_msgs += "\n### Errors and Warnings for Build Mode `'" + mode.name + "'`\n"
                for hdr, key in hdr_key_pairs:
                    msgs = self.result.get(key)
                    fail_msgs += print_msg_list("#### " + hdr, msgs,
                                                self.max_msg_count)

        if len(table) > 1:
            self.results_md = results_str + tabulate(
                table, headers="firstrow", tablefmt="pipe",
                colalign=colalign) + "\n"

            # the email and published reports will default to self.results_md if they are
            # empty. in case they need to be sanitized, override them and do not append
            # detailed messages.
            if self.sanitize_email_results:
                self.email_results_md = self.results_md
            if self.sanitize_publish_results:
                self.publish_results_md = self.results_md
            # locally generated result always contains all details
            self.results_md += fail_msgs
        else:
            self.results_md = results_str + "\nNo results to display.\n"
            self.email_results_md = self.results_md
            self.publish_results_md = self.results_md

        # Write results to the scratch area
        results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
        with open(results_file, 'w') as f:
            f.write(self.results_md)

        log.log(VERBOSE, "[results page]: [%s] [%s]", self.name, results_file)
        return self.results_md
Example #8
0
    def _gen_results(self):
        # This function is called after the regression and looks for
        # results.hjson file with aggregated results from the FPV run.
        # The hjson file is required to follow this format:
        # {
        #   "messages": {
        #      "errors"      : []
        #      "warnings"    : []
        #      "cex"         : ["property1", "property2"...],
        #      "undetermined": [],
        #      "unreachable" : [],
        #   },
        #
        #   "fpv_summary": {
        #      "errors"      : 0
        #      "warnings"    : 2
        #      "proven"      : 20,
        #      "cex"         : 5,
        #      "covered"     : 18,
        #      "undetermined": 7,
        #      "unreachable" : 2,
        #      "pass_rate"   : "90 %",
        #      "cover_rate"  : "90 %"
        #   },
        # }
        # The categories for property results are: proven, cex, undetermined,
        # covered, and unreachable.
        #
        # If coverage was enabled then results.hjson will also have an item that
        # shows FPV coverage. It will have the following format:
        #   "fpv_coverage": {
        #      stimuli: "90 %",
        #      coi    : "90 %",
        #      proof  : "80 %"
        #   }
        results_str = "## " + self.results_title + "\n\n"
        results_str += "### " + self.timestamp_long + "\n"
        results_str += "### FPV Tool: " + self.tool.upper() + "\n"
        results_str += "### LogFile dir: " + self.scratch_path + "/default\n\n"

        summary = [self.name]  # cfg summary for publish results

        if len(self.build_modes) != 1:
            mode_names = [mode.name for mode in self.build_modes]
            log.error(
                "FPV only supports mode 'default', but found these modes: %s",
                mode_names)
        else:
            mode = self.build_modes[0]
            result_data = Path(
                subst_wildcards(self.build_dir, {"build_mode": mode.name}) +
                '/results.hjson')
            try:
                with open(result_data, "r") as results_file:
                    self.result = hjson.load(results_file, use_decimal=True)
            except IOError as err:
                log.warning("%s", err)
                self.result = {
                    "messages": {
                        "errors": ["IOError: %s" % err],
                    }
                }

            fpv_result_str, fpv_summary = self.get_fpv_summary_results(
                self.result)
            results_str += fpv_result_str
            summary += fpv_summary

            if self.cov:
                results_str += "\n\n## Coverage Results\n"
                results_str += ("### Coverage html file dir: " +
                                self.scratch_path +
                                "/default/formal-icarus\n\n")
                cov_result_str, cov_summary = self.get_fpv_coverage_results(
                    self.result)
                results_str += cov_result_str
                summary += cov_summary

            messages = self.result.get("messages")
            if messages is not None:
                results_str += self.parse_dict_to_str(messages)

        # Write results to the scratch area
        self.results_md = results_str
        results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
        with open(results_file, 'w') as f:
            f.write(self.results_md)

        log.info("[results page]: [%s] [%s]", self.name, results_file)

        # Generate result summary
        if not self.cov:
            summary += ["N/A", "N/A", "N/A"]
        self.result_summary[self.name] = summary

        return self.results_md
Example #9
0
    def _gen_results(self):
        # '''
        # The function is called after the regression has completed. It looks
        # for a regr_results.hjson file with aggregated results from the lint run.
        # The hjson needs to have the following (potentially empty) fields
        #
        # {
        #   tool: ""
        #   errors: []
        #   warnings: []
        #   lint_errors: []
        #   lint_warning: []
        #   lint_infos: []
        # }
        #
        # where each entry is a string representing a lint message. This allows
        # to reuse the same LintCfg class with different tools since just the
        # parsing script that transforms the tool output into the hjson above
        # needs to be adapted.
        #
        # note that if this is a master config, the results will
        # be generated using the _gen_results_summary function
        # '''

        # Generate results table for runs.
        results_str = "## " + self.results_title + "\n\n"
        results_str += "### " + self.timestamp_long + "\n"
        results_str += "### Lint Tool: " + self.tool.upper() + "\n\n"

        header = [
            "Build Mode", "Tool Warnings", "Tool Errors", "Lint Warnings",
            "Lint Errors"
        ]
        colalign = ("center", ) * len(header)
        table = [header]

        # aggregated counts
        self.result_summary["warnings"] = []
        self.result_summary["errors"] = []
        self.result_summary["lint_warnings"] = []
        self.result_summary["lint_errors"] = []

        fail_msgs = ""
        for mode in self.build_modes:

            result_data = Path(
                subst_wildcards(self.build_dir, {"build_mode": mode.name}) +
                '/results.hjson')
            log.info("looking for result data file at %s", result_data)

            try:
                with open(result_data, "r") as results_file:
                    self.result = hjson.load(results_file, use_decimal=True)
            except IOError as err:
                log.warning("%s", err)
                self.result = {
                    "tool": "",
                    "errors": ["IOError: %s" % err],
                    "warnings": [],
                    "lint_errors": [],
                    "lint_warnings": [],
                    "lint_infos": []
                }
            if self.result:
                table.append([
                    mode.name,
                    str(len(self.result["warnings"])) + " W ",
                    str(len(self.result["errors"])) + " E",
                    # We currently do not publish these infos at
                    # the moment len(self.result["lint_infos"]),
                    str(len(self.result["lint_warnings"])) + " W",
                    str(len(self.result["lint_errors"])) + " E"
                ])
            else:
                self.result = {
                    "tool": "",
                    "errors": [],
                    "warnings": [],
                    "lint_errors": [],
                    "lint_warnings": [],
                    "lint_infos": []
                }

            self.result_summary["warnings"] += self.result["warnings"]
            self.result_summary["errors"] += self.result["errors"]
            self.result_summary["lint_warnings"] += self.result[
                "lint_warnings"]
            self.result_summary["lint_errors"] += self.result["lint_errors"]

            # Append detailed messages if they exist
            if sum([
                    len(self.result["warnings"]),
                    len(self.result["errors"]),
                    len(self.result["lint_warnings"]),
                    len(self.result["lint_errors"])
            ]):
                fail_msgs += "\n## Errors and Warnings for Build Mode `'" + mode.name + "'`\n"
                fail_msgs += _print_msg_list("Tool Errors",
                                             self.result["errors"])
                fail_msgs += _print_msg_list("Tool Warnings",
                                             self.result["warnings"])
                fail_msgs += _print_msg_list("Lint Errors",
                                             self.result["lint_errors"])
                fail_msgs += _print_msg_list("Lint Warnings",
                                             self.result["lint_warnings"])
                # fail_msgs += _print_msg_list("Lint Infos", results["lint_infos"])

        if len(table) > 1:
            self.results_md = results_str + tabulate(
                table, headers="firstrow", tablefmt="pipe",
                colalign=colalign) + "\n" + fail_msgs
        else:
            self.results_md = results_str + "\nNo results to display.\n"

        # Write results to the scratch area
        self.results_file = self.scratch_path + "/results_" + self.timestamp + ".md"
        with open(self.results_file, 'w') as f:
            f.write(self.results_md)

        log.info("[results page]: [%s] [%s]", self.name, results_file)
        return self.results_md
Example #10
0
    def resolve_hjson_raw(self, path, hjson_dict):
        import_cfgs = []
        use_cfgs = []
        for key, dict_val in hjson_dict.items():
            # If key is 'import_cfgs' then add to the list of cfgs to process
            if key == 'import_cfgs':
                import_cfgs.extend(dict_val)
                continue

            # If the key is 'use_cfgs', we're only allowed to take effect for a
            # primary config list. If we are in a primary config list, add it.
            if key == 'use_cfgs':
                if not self.is_primary_cfg:
                    log.error("Key 'use_cfgs' encountered in the non-primary "
                              "cfg file list {!r}.".format(path))
                    sys.exit(1)

                use_cfgs.extend(dict_val)
                continue

            # Otherwise, set an attribute on self.
            self._set_attribute(path, key, dict_val)

        # Parse imported cfgs
        for cfg_file in import_cfgs:
            if cfg_file not in self.imported_cfg_files:
                self.imported_cfg_files.append(cfg_file)
                # Substitute wildcards in cfg_file files since we need to process
                # them right away.
                cfg_file = subst_wildcards(cfg_file, self.__dict__)
                self._parse_cfg(cfg_file, False)
            else:
                log.error("Cfg file \"%s\" has already been parsed", cfg_file)

        # Parse primary cfg files
        if self.is_primary_cfg:
            for entry in use_cfgs:
                if type(entry) is str:
                    # Treat this as a file entry
                    # Substitute wildcards in cfg_file files since we need to process
                    # them right away.
                    cfg_file = subst_wildcards(entry,
                                               self.__dict__,
                                               ignore_error=True)
                    self.cfgs.append(self.create_instance(cfg_file))

                elif type(entry) is dict:
                    # Treat this as a cfg expanded in-line
                    temp_cfg_file = self._conv_inline_cfg_to_hjson(entry)
                    if not temp_cfg_file:
                        continue
                    self.cfgs.append(self.create_instance(temp_cfg_file))

                    # Delete the temp_cfg_file once the instance is created
                    try:
                        log.log(VERBOSE, "Deleting temp cfg file:\n%s",
                                temp_cfg_file)
                        os.system("/bin/rm -rf " + temp_cfg_file)
                    except IOError:
                        log.error("Failed to remove temp cfg file:\n%s",
                                  temp_cfg_file)

                else:
                    log.error(
                        "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s",
                        entry, str(type(entry)))
                    sys.exit(1)
Example #11
0
    def _gen_results(self, results):
        # '''
        # The function is called after the regression has completed. It looks
        # for a results.hjson file with aggregated results from the lint run.
        # The hjson needs to have the following format:
        #
        # {
        #     bucket_key: [str],
        #     // other buckets according to message_buckets configuration
        # }
        #
        # Each bucket key points to a list of signatures (strings).
        # The bucket categories and severities are defined in the
        # message_buckets class variable, and can be set via Hjson Dvsim
        # config files.
        #
        # Note that if this is a primary config, the results will
        # be generated using the _gen_results_summary function
        # '''

        # Generate results table for runs.
        results_str = f'## {self.results_title}\n\n'
        results_str += f'### {self.timestamp_long}\n'
        if self.revision:
            results_str += f'### {self.revision}\n'
        results_str += f'### Branch: {self.branch}\n'
        results_str += f'### Tool: {self.tool.upper()}\n\n'

        # Load all result files from all build modes and convert them to
        # message buckets.
        self.result = []
        self.result_summary = MsgBuckets(self.message_buckets)
        for mode in self.build_modes:
            result_path = Path(
                subst_wildcards(self.build_dir, {'build_mode': mode.name}) +
                '/results.hjson')
            log.info('[results:hjson]: [%s]: [%s]', self.name, result_path)
            # TODO(#9079): replace this with native log parser
            msgs = MsgBuckets(self.message_buckets)
            msgs.load_hjson(result_path)
            self.result.append(msgs)
            # Aggregate with summary results
            self.result_summary += msgs

        # Construct Header
        labels = self.result_summary.get_labels(self.report_severities)
        header = ['Build Mode'] + labels
        colalign = ('center', ) * len(header)
        table = [header]
        fail_msgs = ''
        self.errors_seen = 0
        keys = self.result_summary.get_keys(self.report_severities)
        for mode, res in zip(self.build_modes, self.result):
            row = [mode.name] + res.get_counts_md(keys)
            table.append(row)
            self.errors_seen += res.has_signatures(self.fail_severities)
            fail_msgs += f"\n### Messages for Build Mode `'{mode.name}'`\n"
            fail_msgs += res.print_signatures_md(self.report_severities,
                                                 self.max_msg_count)

        if len(table) > 1:
            self.results_md = results_str + tabulate(
                table, headers='firstrow', tablefmt='pipe',
                colalign=colalign) + '\n'

            # The email and published reports will default to self.results_md
            # if they are empty. In case they need to be sanitized, override
            # them and do not append detailed messages.
            if self.sanitize_email_results:
                self.email_results_md = self.results_md
            if self.sanitize_publish_results:
                self.publish_results_md = self.results_md
            # Locally generated result always contains all details.
            self.results_md += fail_msgs
        else:
            self.results_md = f'{results_str}\nNo results to display.\n'
            self.email_results_md = self.results_md
            self.publish_results_md = self.results_md

        return self.results_md