def __test_skip_reports(self, plist_file_path: str,
                            expected_plist_file_path: str,
                            skip_handlers: SkipListHandlers):
        """ Test skipping reports from a plist file. """
        reports = report_file.get_reports(plist_file_path)
        reports = reports_handler.skip(reports, skip_handlers=skip_handlers)

        expected_reports = report_file.get_reports(expected_plist_file_path)

        self.assertEqual(reports, expected_reports)
Ejemplo n.º 2
0
    def test_clang37_plist(self):
        """
        Check plist generated by clang 3.7 checker name should be in the plist
        file generating a report hash is still needed.
        """
        clang37_plist = os.path.join(self.__plist_test_files,
                                     'clang-3.7.plist')
        reports = report_file.get_reports(clang37_plist)
        self.assertEqual(len(reports), 3)

        files = get_mentioned_original_files(reports)
        self.assertEqual(files, set(SRC_FILES))

        for report in reports:
            # Checker name should be available for all the reports.
            self.assertNotEqual(report.checker_name, 'NOT FOUND')

            if report.checker_name == 'core.DivideZero':
                skel = deepcopy(div_zero_skel)
                skel.report_hash = '51bd152830c2599e98c89cfc78890d0b'

                self.assertEqual(report, skel)

            if report.checker_name == 'core.StackAddressEscape':
                # core.StackAddressEscape hash is changed because the checker
                # name is available and it is included in the hash.
                skel = deepcopy(stack_addr_skel)
                skel.report_hash = '3439d5e09aeb5b69a835a6f0a307dfb6'

                self.assertEqual(report, skel)
Ejemplo n.º 3
0
    def test_clang50_trunk_plist(self):
        """
        Check plist generated by clang 5.0 trunk checker name and report hash
        should be in the plist file.
        """
        clang50_trunk_plist = os.path.join(self.__plist_test_files,
                                           'clang-5.0-trunk.plist')
        reports = report_file.get_reports(clang50_trunk_plist)
        self.assertEqual(len(reports), 3)

        files = get_mentioned_original_files(reports)
        self.assertEqual(files, set(SRC_FILES))

        for report in reports:
            # Checker name should be in the plist file.
            self.assertNotEqual(report.checker_name, 'NOT FOUND')
            self.assertIn(report.checker_name, self.__found_checker_names)

            if report.checker_name == 'core.DivideZero':
                # Test data is still valid for this version.
                self.assertEqual(report, div_zero_skel)

                self.assertEqual(report.bug_path_events,
                                 div_zero_skel.bug_path_events)
                self.assertEqual(report.bug_path_positions,
                                 div_zero_skel.bug_path_positions)

            if report.checker_name == 'core.StackAddressEscape':
                skel = deepcopy(stack_addr_skel)
                skel.report_hash = 'a6d3464f8aab9eb31a8ea7e167e84322'

                self.assertEqual(report, skel)
                self.assertEqual(report.bug_path_events, skel.bug_path_events)
                self.assertEqual(report.bug_path_positions,
                                 skel.bug_path_positions)
Ejemplo n.º 4
0
    def __test_html_builder(self, proj: str):
        """
        Test building html file from the given proj's plist file.
        """
        proj_dir = os.path.join(self.test_workspace, 'test_files', proj)
        plist_file = os.path.join(proj_dir, f"{proj}.plist")

        reports = report_file.get_reports(plist_file)

        output_dir = os.path.join(proj_dir, 'html')
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        output_path = os.path.join(output_dir, f"{proj}.plist.html")

        html_builder = report_to_html.HtmlBuilder(self.layout_dir)
        report_to_html.convert(plist_file, reports, output_dir, html_builder)

        self.assertTrue(os.path.exists(output_path))

        html_builder.create_index_html(output_dir)
        html_builder.create_statistics_html(output_dir)

        index_html = os.path.join(output_dir, 'index.html')
        self.assertTrue(os.path.exists(index_html))
Ejemplo n.º 5
0
    def test_get_report_data_simple(self):
        """ Get report data for plist which contains simple reports. """
        proj_simple = os.path.join(self.test_workspace, 'test_files', 'simple')
        plist_file = os.path.join(proj_simple, 'simple.plist')

        reports = report_file.get_reports(plist_file)

        html_builder = report_to_html.HtmlBuilder(self.layout_dir)
        html_builder._add_html_reports(reports)

        self.assertEqual(len(html_builder.files), 1)

        html_reports = html_builder.html_reports
        self.assertEqual(len(html_reports), 2)

        dead_stores = [r for r in html_reports if
                       r['checkerName'] == 'deadcode.DeadStores'][0]
        self.assertEqual(len(dead_stores['notes']), 0)
        self.assertEqual(len(dead_stores['macros']), 0)
        self.assertGreaterEqual(len(dead_stores['events']), 1)

        divide_zero = [r for r in html_reports if
                       r['checkerName'] == 'core.DivideZero'][0]
        self.assertEqual(len(divide_zero['notes']), 0)
        self.assertEqual(len(divide_zero['macros']), 0)
        self.assertGreaterEqual(len(divide_zero['events']), 1)
Ejemplo n.º 6
0
def get_reports(analyzer_result_file_path: str,
                checker_labels: CheckerLabels) -> List[Report]:
    """ Get reports from the given analyzer result file. """
    reports = report_file.get_reports(analyzer_result_file_path,
                                      checker_labels)

    # CppCheck generates a '0' value for the report hash. In case all of the
    # reports in a result file contain only a hash with '0' value, overwrite
    # the hash values in the report files with a context free hash value.
    if all(r.report_hash == '0' for r in reports):
        report_file.replace_report_hash(analyzer_result_file_path,
                                        HashType.CONTEXT_FREE)

        reports = report_file.get_reports(analyzer_result_file_path,
                                          checker_labels)

    return reports
Ejemplo n.º 7
0
    def test_compilation_db_relative_file_path(self):
        """
        Test relative path in compilation database.

        If the file/directory paths in the compilation database are relative
        ClangSA analyzer will generate plist files where the file paths are
        also relative to the current directory where the analyzer was executed.
        After the plist files are created, report converter will try to
        post-process these files and creates absolute paths from the relative
        paths. This test will check whether these files paths are exist.
        """
        test_dir = os.path.join(self.test_workspace, "test_rel_file_path")
        os.makedirs(test_dir)

        source_file_name = "success.c"
        shutil.copy(os.path.join(self.test_dir, source_file_name), test_dir)

        cc_files_dir_path = os.path.join(test_dir, "codechecker_files")
        os.makedirs(cc_files_dir_path, exist_ok=True)

        build_json = os.path.join(cc_files_dir_path, "build.json")
        report_dir = os.path.join(cc_files_dir_path, "reports")

        # Create a compilation database.
        build_log = [{
            "directory": ".",
            "command": f"cc -c {source_file_name} -o /dev/null",
            "file": source_file_name
        }]

        with open(build_json, 'w', encoding="utf-8",
                  errors="ignore") as outfile:
            json.dump(build_log, outfile)

        # Analyze the project
        analyze_cmd = [
            self._codechecker_cmd, "analyze", build_json, "--report-hash",
            "context-free-v2", "-o", report_dir, "--clean"
        ]

        process = subprocess.Popen(analyze_cmd,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   cwd=test_dir,
                                   encoding="utf-8",
                                   errors="ignore")
        process.communicate()

        errcode = process.returncode
        self.assertEqual(errcode, 0)

        # Test that file paths in plist files are exist.
        plist_files = glob.glob(os.path.join(report_dir, '*.plist'))
        for plist_file in plist_files:
            reports = report_file.get_reports(plist_file)
            for r in reports:
                for file in r.files:
                    self.assertTrue(os.path.exists(file.original_path))
Ejemplo n.º 8
0
def parse(input_path: str,
          output_path: str,
          layout_dir: str,
          html_builder: Optional[HtmlBuilder] = None) -> Set[str]:
    """
    Parses analyzer result files from the given input directory to the output
    directory.

    Return a set of changed files.
    """
    files = []
    input_path = os.path.abspath(input_path)
    output_dir = os.path.abspath(output_path)

    if os.path.exists(output_path):
        LOG.info(
            "Previous analysis results in '%s' have been removed, "
            "overwriting with current results.", output_dir)
        shutil.rmtree(output_path)

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if os.path.isfile(input_path):
        files.append(input_path)
    elif os.path.isdir(input_path):
        _, _, file_names = next(os.walk(input_path), ([], [], []))
        files = [
            os.path.join(input_path, file_name) for file_name in file_names
        ]

    # Source files which modification time changed since the last analysis.
    changed_source_files: Set[str] = set()

    if not html_builder:
        html_builder = HtmlBuilder(layout_dir)

    for file_path in files:
        if not report_file.is_supported(file_path):
            LOG.info(
                "\nSkipping input file %s as it is not supported "
                "analyzer result file.", file_path)
            continue

        LOG.info(f"\nParsing input file '%s'", file_path)

        reports = report_file.get_reports(file_path)
        changed_source = convert(file_path, reports, output_path, html_builder)

        if changed_source:
            changed_source_files.update(changed_source)

    return changed_source_files
    def test_gen_report_path_hash(self):
        """ Test path hash generation for multiple errors. """
        test_plist = os.path.join(self.test_file_dir, 'cpp',
                                  'multi_error.plist')

        expected_path_hash = {
            'f48840093ef89e291fb68a95a5181612':
            '93cb93bdcee10434f9cf9f486947c88e',
            'e4907182b363faf2ec905fc32cc5a4ab':
            '71a4dc24bf88af2b13be83d8d15bd6f0'
        }

        reports = get_reports(test_plist)
        for report in reports:
            report_hash = get_report_path_hash(report)
            self.assertEqual(report_hash,
                             expected_path_hash[report.report_hash])
    def test_gen_report_hash_context_free(self):
        """ Test context free hash generation for multi errors. """
        test_plist = os.path.join(self.test_file_dir, 'cpp',
                                  'multi_error.plist')

        expected_report_hash = {
            'f48840093ef89e291fb68a95a5181612':
            'c2a2856f566ed67ed1c3596ad06d42db',
            'e4907182b363faf2ec905fc32cc5a4ab':
            '5a92e13f07c81c6d3197e7d910827e6e'
        }

        reports = get_reports(test_plist)
        for report in reports:
            report_hash = get_report_hash(report, HashType.CONTEXT_FREE)
            self.assertEqual(report_hash,
                             expected_report_hash[report.report_hash])
    def test_gen_report_hash_path_sensitive(self):
        """ Test path sensitive report hash generation for multiple errors. """
        test_plist = os.path.join(self.test_file_dir, 'cpp',
                                  'multi_error.plist')

        expected_report_hash = {
            'f48840093ef89e291fb68a95a5181612':
            'fdf11db1183dba2da4cd188e70d142e5',
            'e4907182b363faf2ec905fc32cc5a4ab':
            '774799eb31f5fb8514988a7f6736b33e'
        }

        reports = get_reports(test_plist)
        for report in reports:
            report_hash = get_report_hash(report, HashType.PATH_SENSITIVE)
            self.assertEqual(report_hash,
                             expected_report_hash[report.report_hash])
    def test_gen_report_hash_diag_messages(self):
        """ Test diagnostic message hash generation for multi errors. """
        test_plist = os.path.join(self.test_file_dir, 'cpp',
                                  'multi_error.plist')

        expected_report_hash = {
            'f48840093ef89e291fb68a95a5181612':
            'c137804816bf2d5a67b6c067cd2ab5e8',
            'e4907182b363faf2ec905fc32cc5a4ab':
            'd08c2f8c5c4d8533e1de3fa88241f813'
        }

        reports = get_reports(test_plist)
        for report in reports:
            report_hash = get_report_hash(report, HashType.DIAGNOSTIC_MESSAGE)
            self.assertEqual(report_hash,
                             expected_report_hash[report.report_hash])
Ejemplo n.º 13
0
    def test_get_report_data_notes(self):
        """ Get report data for plist which contains notes. """
        proj_notes = os.path.join(self.test_workspace, 'test_files', 'notes')
        plist_file = os.path.join(proj_notes, 'notes.plist')

        reports = report_file.get_reports(plist_file)

        html_builder = report_to_html.HtmlBuilder(self.layout_dir)
        html_reports, files = html_builder._get_html_reports(reports)

        self.assertEqual(len(files), 1)
        self.assertEqual(len(html_reports), 1)

        report = html_reports[0]
        self.assertEqual(len(report['notes']), 1)
        self.assertEqual(len(report['macros']), 0)
        self.assertGreaterEqual(len(report['events']), 1)
        self.assertEqual(report['checker']['name'], 'alpha.clone.CloneChecker')
Ejemplo n.º 14
0
    def test_get_report_data_macros(self):
        """ Get report data for plist which contains macro expansion. """
        proj_macros = os.path.join(self.test_workspace, 'test_files', 'macros')
        plist_file = os.path.join(proj_macros, 'macros.plist')

        reports = report_file.get_reports(plist_file)

        html_builder = report_to_html.HtmlBuilder(self.layout_dir)
        html_reports, files = html_builder._get_html_reports(reports)

        self.assertEqual(len(files), 1)
        self.assertEqual(len(html_reports), 1)

        report = html_reports[0]
        self.assertEqual(len(reports), 1)

        report = html_reports[0]
        self.assertEqual(len(report['notes']), 0)
        self.assertEqual(len(report['macros']), 1)
        self.assertGreaterEqual(len(report['events']), 1)
        self.assertEqual(report['checker']['name'], 'core.NullDereference')
Ejemplo n.º 15
0
    def postprocess_result(self, skip_handler: Optional[SkipListHandler]):
        """
        Generate analyzer result output file which can be parsed and stored
        into the database.
        """
        if os.path.exists(self.analyzer_result_file):
            reports = report_file.get_reports(self.analyzer_result_file,
                                              self.checker_labels)
            reports = [r for r in reports if not r.skip(skip_handler)]

            hash_type = None
            if self.report_hash_type in ['context-free', 'context-free-v2']:
                hash_type = HashType.CONTEXT_FREE
            elif self.report_hash_type == 'diagnostic-message':
                hash_type = HashType.DIAGNOSTIC_MESSAGE

            if hash_type is not None:
                for report in reports:
                    report.report_hash = get_report_hash(report, hash_type)

            report_file.create(self.analyzer_result_file, reports,
                               self.checker_labels, self.analyzer_info)
Ejemplo n.º 16
0
    def test_clang38_trunk_plist(self):
        """
        Check plist generated by clang 3.8 trunk checker name and report hash
        should be in the plist file.
        """
        clang38_plist = os.path.join(self.__plist_test_files,
                                     'clang-3.8-trunk.plist')
        reports = report_file.get_reports(clang38_plist)
        self.assertEqual(len(reports), 3)

        files = get_mentioned_original_files(reports)
        self.assertEqual(files, set(SRC_FILES))

        for report in reports:
            self.assertIn(report.checker_name, self.__found_checker_names)

            if report.checker_name == 'core.DivideZero':
                # Test data is still valid for this version.
                self.assertEqual(report, div_zero_skel)

            if report.checker_name == 'core.StackAddressEscape':
                self.assertEqual(report, stack_addr_skel)
Ejemplo n.º 17
0
    def get_reports(self, analyzer_result_path: str) -> List[Report]:
        """ Get reports from the given analyzer result. """
        reports: List[Report] = []

        plist_files = []
        if os.path.isdir(analyzer_result_path):
            plist_files = glob.glob(os.path.join(
                analyzer_result_path, "*.plist"))
        elif os.path.isfile(analyzer_result_path) and \
                analyzer_result_path.endswith(".plist"):
            plist_files = [analyzer_result_path]
        else:
            LOG.error("The given input should be an existing CppCheck result "
                      "directory or a plist file.")
            return reports

        file_cache: Dict[str, File] = {}
        for plist_file in plist_files:
            plist_reports = report_file.get_reports(
                plist_file, None, file_cache)
            reports.extend(plist_reports)

        return reports
Ejemplo n.º 18
0
    def __test_html_builder(self, proj: str) -> str:
        """
        Test building html file from the given proj's plist file.
        """
        html_builder = report_to_html.HtmlBuilder(self.layout_dir)

        proj_dir = os.path.join(self.test_workspace, 'test_files', proj)
        output_dir = os.path.join(proj_dir, 'html')
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        processed_path_hashes = set()
        for file_path in glob.glob(os.path.join(proj_dir, f"*.plist")):
            file_name = os.path.basename(file_path)
            output_path = os.path.join(output_dir, f"{file_name}.html")

            reports = report_file.get_reports(file_path)
            reports = reports_helper.skip(
                reports, processed_path_hashes)

            report_to_html.convert(
                file_path, reports, output_dir, html_builder)

            html_file_exists = os.path.exists(output_path)
            if reports:
                self.assertTrue(html_file_exists)
            else:
                self.assertFalse(html_file_exists)

        html_builder.create_index_html(output_dir)
        html_builder.create_statistics_html(output_dir)

        index_html = os.path.join(output_dir, 'index.html')
        self.assertTrue(os.path.exists(index_html))

        return output_dir
Ejemplo n.º 19
0
 def test_empty_file(self):
     """Plist file is empty."""
     empty_plist = os.path.join(self.__plist_test_files, 'empty_file')
     reports = report_file.get_reports(empty_plist)
     self.assertEqual(reports, [])
Ejemplo n.º 20
0
    def __process_report_file(
        self,
        report_file_path: str,
        session: DBSession,
        source_root: str,
        run_id: int,
        file_path_to_id: Dict[str, int],
        run_history_time: datetime,
        skip_handler: skiplist_handler.SkipListHandler,
        hash_map_reports: Dict[str, List[Any]]
    ) -> bool:
        """
        Process and save reports from the given report file to the database.
        """
        reports = report_file.get_reports(report_file_path)

        if not reports:
            return True

        def set_review_status(report: Report):
            """
            Set review status for the given report if there is any source code
            comment.
            """
            checker_name = report.checker_name
            last_report_event = report.bug_path_events[-1]

            # The original file path is needed here, not the trimmed, because
            # the source files are extracted as the original file path.
            file_name = report.file.original_path

            source_file_name = os.path.realpath(
                os.path.join(source_root, file_name.strip("/")))

            # Check and store source code comments.
            if not os.path.isfile(source_file_name):
                return

            report_line = last_report_event.range.end_line
            source_file = os.path.basename(file_name)

            src_comment_data = parse_codechecker_review_comment(
                source_file_name, report_line, checker_name)

            if len(src_comment_data) == 1:
                status = src_comment_data[0].status
                rw_status = ttypes.ReviewStatus.FALSE_POSITIVE
                if status == 'confirmed':
                    rw_status = ttypes.ReviewStatus.CONFIRMED
                elif status == 'intentional':
                    rw_status = ttypes.ReviewStatus.INTENTIONAL

                self.__report_server._setReviewStatus(
                    session, report.report_hash, rw_status,
                    src_comment_data[0].message, run_history_time)
            elif len(src_comment_data) > 1:
                LOG.warning(
                    "Multiple source code comment can be found "
                    "for '%s' checker in '%s' at line %s. "
                    "This bug will not be suppressed!",
                    checker_name, source_file, report_line)

                self.__wrong_src_code_comments.append(
                    f"{source_file}|{report_line}|{checker_name}")

        def get_missing_file_ids(report: Report) -> List[str]:
            """ Returns file paths which database file id is missing. """
            missing_ids_for_files = []
            for file_path in report.trimmed_files:
                file_id = file_path_to_id.get(file_path, -1)
                if file_id == -1:
                    missing_ids_for_files.append(file_path)

            return missing_ids_for_files

        root_dir_path = os.path.dirname(report_file_path)
        mip = self.__mips[root_dir_path]
        analysis_info = self.__analysis_info.get(root_dir_path)

        for report in reports:
            report.trim_path_prefixes(self.__trim_path_prefixes)

            missing_ids_for_files = get_missing_file_ids(report)
            if missing_ids_for_files:
                LOG.warning("Failed to get database id for file path '%s'! "
                            "Skip adding report: %s:%d:%d [%s]",
                            ' '.join(missing_ids_for_files), report.file.path,
                            report.line, report.column, report.checker_name)
                continue

            self.__all_report_checkers.add(report.checker_name)

            if skip_handler.should_skip(report.file.original_path):
                continue

            report_path_hash = get_report_path_hash(report)
            if report_path_hash in self.__already_added_report_hashes:
                LOG.debug('Not storing report. Already added: %s', report)
                continue

            LOG.debug("Storing report to the database...")

            detection_status = 'new'
            detected_at = run_history_time

            if report.report_hash in hash_map_reports:
                old_report = hash_map_reports[report.report_hash][0]
                old_status = old_report.detection_status
                detection_status = 'reopened' \
                    if old_status == 'resolved' else 'unresolved'
                detected_at = old_report.detected_at

            analyzer_name = mip.checker_to_analyzer.get(
                report.checker_name, report.analyzer_name)

            report_id = self.__add_report(
                session, run_id, report, file_path_to_id,
                detection_status, detected_at, analysis_info, analyzer_name)

            self.__new_report_hashes.add(report.report_hash)
            self.__already_added_report_hashes.add(report_path_hash)

            set_review_status(report)

            LOG.debug("Storing report done. ID=%d", report_id)

        return True
Ejemplo n.º 21
0
def main(args):
    """
    Entry point for parsing some analysis results and printing them to the
    stdout in a human-readable format.
    """
    # If the given output format is not 'table', redirect logger's output to
    # the stderr.
    stream = None
    if 'export' in args and args.export not in [None, 'table', 'html']:
        stream = 'stderr'

    init_logger(args.verbose if 'verbose' in args else None, stream)

    try:
        cmd_config.check_config_file(args)
    except FileNotFoundError as fnerr:
        LOG.error(fnerr)
        sys.exit(1)

    export = args.export if 'export' in args else None
    if export == 'html' and 'output_path' not in args:
        LOG.error("Argument --export not allowed without argument --output "
                  "when exporting to HTML.")
        sys.exit(1)

    if export == 'gerrit' and not gerrit.mandatory_env_var_is_set():
        sys.exit(1)

    if export and export not in EXPORT_TYPES:
        LOG.error("Unknown export format: %s", export)
        sys.exit(1)

    context = analyzer_context.get_context()

    # To ensure the help message prints the default folder properly,
    # the 'default' for 'args.input' is a string, not a list.
    # But we need lists for the foreach here to work.
    if isinstance(args.input, str):
        args.input = [args.input]

    src_comment_status_filter = args.review_status

    suppr_handler = None
    if 'suppress' in args:
        __make_handler = False
        if not os.path.isfile(args.suppress):
            if 'create_suppress' in args:
                with open(args.suppress,
                          'w',
                          encoding='utf-8',
                          errors='ignore') as _:
                    # Just create the file.
                    __make_handler = True
                    LOG.info(
                        "Will write source-code suppressions to "
                        "suppress file: %s", args.suppress)
            else:
                LOG.warning(
                    "Suppress file '%s' given, but it does not exist"
                    " -- will not suppress anything.", args.suppress)
        else:
            __make_handler = True

        if __make_handler:
            suppr_handler = suppress_handler.\
                GenericSuppressHandler(args.suppress,
                                       'create_suppress' in args,
                                       src_comment_status_filter)
    elif 'create_suppress' in args:
        LOG.error("Can't use '--export-source-suppress' unless '--suppress "
                  "SUPPRESS_FILE' is also given.")
        sys.exit(1)

    output_dir_path = None
    output_file_path = None
    if 'output_path' in args:
        output_path = os.path.abspath(args.output_path)

        if export == 'html':
            output_dir_path = output_path
        else:
            if os.path.exists(output_path) and os.path.isdir(output_path):
                # For backward compatibility reason we handle the use case
                # when directory is provided to this command.
                LOG.error(
                    "Please provide a file path instead of a directory "
                    "for '%s' export type!", export)
                sys.exit(1)

            if export == 'baseline' and not baseline.check(output_path):
                LOG.error("Baseline files must have '.baseline' extensions.")
                sys.exit(1)

            output_file_path = output_path
            output_dir_path = os.path.dirname(output_file_path)

        if not os.path.exists(output_dir_path):
            os.makedirs(output_dir_path)

    def get_output_file_path(default_file_name: str) -> Optional[str]:
        """ Return an output file path. """
        if output_file_path:
            return output_file_path

        if output_dir_path:
            return os.path.join(output_dir_path, default_file_name)

    skip_handlers = SkipListHandlers()
    if 'files' in args:
        items = [f"+{file_path}" for file_path in args.files]
        items.append("-*")
        skip_handlers.append(SkipListHandler("\n".join(items)))
    if 'skipfile' in args:
        with open(args.skipfile, 'r', encoding='utf-8', errors='ignore') as f:
            skip_handlers.append(SkipListHandler(f.read()))

    trim_path_prefixes = args.trim_path_prefix if \
        'trim_path_prefix' in args else None

    all_reports = []
    statistics = Statistics()
    file_cache = {}  # For memory effiency.
    changed_files: Set[str] = set()
    processed_path_hashes = set()
    processed_file_paths = set()
    print_steps = 'print_steps' in args

    html_builder: Optional[report_to_html.HtmlBuilder] = None
    if export == 'html':
        html_builder = report_to_html.HtmlBuilder(
            context.path_plist_to_html_dist, context.checker_labels)

    for dir_path, file_paths in report_file.analyzer_result_files(args.input):
        metadata = get_metadata(dir_path)
        for file_path in file_paths:
            reports = report_file.get_reports(file_path,
                                              context.checker_labels,
                                              file_cache)

            reports = reports_helper.skip(reports, processed_path_hashes,
                                          skip_handlers, suppr_handler,
                                          src_comment_status_filter)

            statistics.num_of_analyzer_result_files += 1
            for report in reports:
                if report.changed_files:
                    changed_files.update(report.changed_files)

                statistics.add_report(report)

                if trim_path_prefixes:
                    report.trim_path_prefixes(trim_path_prefixes)

            all_reports.extend(reports)

            # Print reports continously.
            if not export:
                file_report_map = plaintext.get_file_report_map(
                    reports, file_path, metadata)
                plaintext.convert(file_report_map, processed_file_paths,
                                  print_steps)
            elif export == 'html':
                print(f"Parsing input file '{file_path}'.")
                report_to_html.convert(file_path, reports, output_dir_path,
                                       html_builder)

    if export is None:  # Plain text output
        statistics.write()
    elif export == 'html':
        html_builder.finish(output_dir_path, statistics)
    elif export == 'json':
        data = report_to_json.convert(all_reports)
        dump_json_output(data, get_output_file_path("reports.json"))
    elif export == 'codeclimate':
        data = codeclimate.convert(all_reports)
        dump_json_output(data, get_output_file_path("reports.json"))
    elif export == 'gerrit':
        data = gerrit.convert(all_reports)
        dump_json_output(data, get_output_file_path("reports.json"))
    elif export == 'baseline':
        data = baseline.convert(all_reports)
        output_path = get_output_file_path("reports.baseline")
        if output_path:
            baseline.write(output_path, data)

    reports_helper.dump_changed_files(changed_files)

    if statistics.num_of_reports:
        sys.exit(2)
Ejemplo n.º 22
0
 def test_no_bug_file(self):
     """There was no bug in the checked file."""
     no_bug_plist = os.path.join(self.__plist_test_files,
                                 'clang-3.7-noerror.plist')
     reports = report_file.get_reports(no_bug_plist)
     self.assertEqual(reports, [])