Esempio n. 1
0
    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments. This function will return a tuple. The first element
        will decide whether the report should be skipped or not and the second
        element will be a list of source code comments related to the actual
        report.
        """
        files_dict = {k: v for k, v in enumerate(files)}
        report = Report({'check_name': checker_name},
                        diag['path'],
                        files_dict,
                        metadata=None)
        path_hash = get_report_path_hash(report)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True, []

        skip, source_code_comments = skip_report(report_hash, source_file,
                                                 report_line, checker_name,
                                                 suppr_handler,
                                                 src_comment_status_filter)

        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip, source_code_comments
Esempio n. 2
0
    def skip_html_report_data_handler(report_hash, source_file, report_line,
                                      checker_name, diag, files):
        """
        Report handler which skips bugs which were suppressed by source code
        comments.
        """
        report = Report(None, diag['path'], files)
        path_hash = get_report_path_hash(report, files)
        if path_hash in processed_path_hashes:
            LOG.debug("Skip report because it is a deduplication of an "
                      "already processed report!")
            LOG.debug("Path hash: %s", path_hash)
            LOG.debug(diag)
            return True

        skip = skip_report(report_hash,
                           source_file,
                           report_line,
                           checker_name,
                           suppr_handler)
        if skip_handler:
            skip |= skip_handler.should_skip(source_file)

        if not skip:
            processed_path_hashes.add(path_hash)

        return skip
Esempio n. 3
0
    def test_report_to_gerrit_conversion_report_url(self):
        """Conversion report with absolute filepath and CC_REPORT_URL env"""

        main = {
            "location": {
                "file": 0,
                "line": 10,
                "col": 10,
            },
            "description": "some description",
            "check_name": "my_checker",
            "issue_hash_content_of_line_in_context": "dummy_hash",
            "notes": [],
            "macro_expansions": [],
        }
        bugpath = {}
        files = {0: "/home/src/lib/main.cpp"}
        metadata = {}

        report_to_convert = Report(main, bugpath, files, metadata)
        os.environ["CC_REPORT_URL"] = "localhost:8080/index.html"
        got = gerrit.convert([report_to_convert], self.severity_map)

        # Remove environment variable not to influence the other tests.
        os.environ.pop("CC_REPORT_URL")

        expected = {
            "tag": "jenkins",
            "message": "CodeChecker found 1 issue(s) in the code. "
            "See: 'localhost:8080/index.html'",
            "labels": {
                "Code-Review": -1,
                "Verified": -1
            },
            "comments": {
                "/home/src/lib/main.cpp": [{
                    "range": {
                        "start_line": 10,
                        "start_character": 10,
                        "end_line": 10,
                        "end_character": 10,
                    },
                    "message":
                    "[LOW] /home/src/lib/main.cpp:10:10: "
                    "some description [my_checker]\n10",
                }]
            },
        }
        self.assertEquals(got, expected)
Esempio n. 4
0
def reportData_to_report(report_data: ReportData) -> Report:
    """Create a report object from the given thrift report data."""
    main = {
        "check_name": report_data.checkerId,
        "description": report_data.checkerMsg,
        "issue_hash_content_of_line_in_context": report_data.bugHash,
        "location": {
            "line": report_data.line,
            "col": report_data.column,
            "file": 0,
        },
    }
    bug_path = None
    files = {0: report_data.checkedFile}
    # TODO Can not reconstruct because only the analyzer name was stored
    # it should be a analyzer_name analyzer_version
    return Report(main, bug_path, files, metadata=None)
Esempio n. 5
0
    def test_report_to_gerrit_conversion_abs_filepath(self):
        """Conversion report with absolute filepath"""

        main = {
            "location": {
                "file": 0,
                "line": 10,
                "col": 10,
            },
            "description": "some description",
            "check_name": "my_checker",
            "issue_hash_content_of_line_in_context": "dummy_hash",
            "notes": [],
            "macro_expansions": [],
        }
        bugpath = {}
        files = {0: "/home/src/lib/main.cpp"}
        metadata = {}

        report_to_convert = Report(main, bugpath, files, metadata)

        got = gerrit.convert([report_to_convert], self.severity_map)
        expected = {
            "tag": "jenkins",
            "message": "CodeChecker found 1 issue(s) in the code.",
            "labels": {
                "Code-Review": -1,
                "Verified": -1
            },
            "comments": {
                "/home/src/lib/main.cpp": [{
                    "range": {
                        "start_line": 10,
                        "start_character": 10,
                        "end_line": 10,
                        "end_character": 10,
                    },
                    "message":
                    "[LOW] /home/src/lib/main.cpp:10:10: "
                    "some description [my_checker]\n10",
                }]
            },
        }
        self.assertEquals(got, expected)
Esempio n. 6
0
def parse_plist_file(path, source_root=None, allow_plist_update=True):
    """
    Parse the reports from a plist file.
    One plist file can contain multiple reports.
    """
    LOG.debug("Parsing plist: %s", path)

    reports = []
    files = []
    try:
        plist = None
        with open(path, 'rb') as plist_file_obj:
            plist = parse_plist(plist_file_obj)

        if not plist:
            LOG.error("Failed to parse plist %s", path)
            return files, reports

        files = plist['files']

        diag_changed = False
        for diag in plist['diagnostics']:

            available_keys = list(diag.keys())

            main_section = {}
            for key in available_keys:
                # Skip path it is handled separately.
                if key != 'path':
                    main_section.update({key: diag[key]})

            # We need to extend information for plist files generated
            # by older clang version (before 3.7).
            main_section['check_name'] = get_checker_name(diag, path)

            # We need to extend information for plist files generated
            # by older clang version (before 3.8).
            file_path = files[diag['location']['file']]
            if source_root:
                file_path = os.path.join(source_root, file_path.lstrip('/'))

            report_hash = diag.get('issue_hash_content_of_line_in_context')

            if not report_hash:
                # Generate hash value if it is missing from the report.
                report_hash = get_report_hash(diag, file_path,
                                              HashType.PATH_SENSITIVE)

                main_section['issue_hash_content_of_line_in_context'] = \
                    report_hash

            if 'issue_hash_content_of_line_in_context' not in diag:
                # If the report hash was not in the plist, we set it in the
                # diagnostic section for later update.
                diag['issue_hash_content_of_line_in_context'] = report_hash
                diag_changed = True

            bug_path_items = [item for item in diag['path']]

            report = Report(main_section, bug_path_items, files)
            reports.append(report)

        if diag_changed and allow_plist_update:
            # If the diagnostic section has changed we update the plist file.
            # This way the client will always send a plist file where the
            # report hash field is filled.
            plistlib.dump(plist, path)
    except IndexError as iex:
        LOG.warning('Indexing error during processing plist file %s', path)
        LOG.warning(type(iex))
        LOG.warning(repr(iex))
        _, _, exc_traceback = sys.exc_info()
        traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
    except Exception as ex:
        LOG.warning('Error during processing reports from the plist file: %s',
                    path)
        traceback.print_exc()
        LOG.warning(type(ex))
        LOG.warning(ex)
    finally:
        return files, reports
Esempio n. 7
0
    def test_report_to_gerrit_conversion_filter_changed_files(self):
        """Conversion report with changed files filter.

        Reports from the other.cpp file should be not in the converted list.
        """

        reports_to_convert = []

        # Empty for all reports.
        bugpath = {}
        metadata = {}

        main = {
            "location": {
                "file": 0,
                "line": 10,
                "col": 10,
            },
            "description": "some description",
            "check_name": "my_checker",
            "issue_hash_content_of_line_in_context": "dummy_hash",
            "notes": [],
            "macro_expansions": [],
        }
        files = {0: "/home/src/lib/main.cpp"}

        main_report = Report(main, bugpath, files, metadata)
        reports_to_convert.append(main_report)
        reports_to_convert.append(main_report)

        main = {
            "location": {
                "file": 0,
                "line": 10,
                "col": 10,
            },
            "description": "some description",
            "check_name": "my_checker",
            "issue_hash_content_of_line_in_context": "dummy_hash",
            "notes": [],
            "macro_expansions": [],
        }
        files = {0: "/home/src/lib/lib.cpp"}

        lib_report = Report(main, bugpath, files, metadata)
        reports_to_convert.append(lib_report)

        main = {
            "location": {
                "file": 0,
                "line": 10,
                "col": 10,
            },
            "description": "some description",
            "check_name": "my_checker",
            "issue_hash_content_of_line_in_context": "dummy_hash",
            "notes": [],
            "macro_expansions": [],
        }
        files = {0: "/home/src/lib/other.cpp"}

        other_report = Report(main, bugpath, files, metadata)
        reports_to_convert.append(other_report)

        dummy_changed_files_content = {
            "/COMMIT_MSG": {
                "status": "A",
                "lines_inserted": 1,
                "size_delta": 1,
                "size": 100,
            },
            "main.cpp": {
                "lines_inserted": 1,
                "lines_deleted": 1,
                "size_delta": 1,
                "size": 100,
            },
            "lib.cpp": {
                "lines_inserted": 1,
                "size_delta": 1,
                "size": 100
            },
        }
        fd, changed_files_file = tempfile.mkstemp()
        os.write(fd, json.dumps(dummy_changed_files_content).encode("utf-8"))
        os.close(fd)

        os.environ["CC_CHANGED_FILES"] = changed_files_file

        got = gerrit.convert(reports_to_convert, self.severity_map)
        os.remove(os.environ["CC_CHANGED_FILES"])

        # Remove environment variable not to influence the other tests.
        os.environ.pop("CC_CHANGED_FILES")

        review_comments = got["comments"]

        # Reports were found in two source files.
        self.assertEquals(len(review_comments), 2)

        # Two reports in the main.cpp file.
        self.assertEquals(len(review_comments["/home/src/lib/main.cpp"]), 2)

        self.assertEquals("CodeChecker found 3 issue(s) in the code.",
                          got["message"])
        self.assertNotIn("/home/src/lib/other.cpp", review_comments.keys())
Esempio n. 8
0
def parse_plist_file(path: str,
                     allow_plist_update=True) \
                             -> Tuple[Dict[int, str], List[Report]]:
    """
    Parse the reports from a plist file.
    One plist file can contain multiple reports.
    """
    LOG.debug("Parsing plist: %s", path)

    reports = []
    source_files = {}

    try:
        plist = None
        with open(path, 'rb') as plist_file_obj:
            plist = parse_plist(plist_file_obj)

        if not plist:
            LOG.error("Failed to parse plist %s", path)
            return {}, []

        metadata = plist.get('metadata')

        mentioned_files = plist.get('files', [])

        # file index to filepath that bugpath events refer to
        source_files = \
            {i: filepath for i, filepath in enumerate(mentioned_files)}
        diag_changed = False
        for diag in plist.get('diagnostics', []):

            available_keys = list(diag.keys())

            main_section = {}
            for key in available_keys:
                # Skip path it is handled separately.
                if key != 'path':
                    main_section.update({key: diag[key]})

            # We need to extend information for plist files generated
            # by older clang version (before 3.7).
            main_section['check_name'] = get_checker_name(diag, path)

            report_hash = diag.get('issue_hash_content_of_line_in_context')

            if not report_hash:
                file_path = mentioned_files[diag['location']['file']]

                # Generate hash value if it is missing from the report.
                report_hash = get_report_hash(diag, file_path,
                                              HashType.PATH_SENSITIVE)

                main_section['issue_hash_content_of_line_in_context'] = \
                    report_hash

            if 'issue_hash_content_of_line_in_context' not in diag:
                # If the report hash was not in the plist, we set it in the
                # diagnostic section for later update.
                diag['issue_hash_content_of_line_in_context'] = report_hash
                diag_changed = True

            bug_path_items = [item for item in diag['path']]
            reports.append(
                Report(main_section, bug_path_items, source_files, metadata))

        if diag_changed and allow_plist_update:
            # If the diagnostic section has changed we update the plist file.
            # This way the client will always send a plist file where the
            # report hash field is filled.
            with open(path, 'wb') as plist_file:
                plistlib.dump(plist, plist_file)
    except IndexError as iex:
        LOG.warning('Indexing error during processing plist file %s', path)
        LOG.warning(type(iex))
        LOG.warning(repr(iex))
        _, _, exc_traceback = sys.exc_info()
        traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
    except Exception as ex:
        LOG.warning('Error during processing reports from the plist file: %s',
                    path)
        traceback.print_exc()
        LOG.warning(type(ex))
        LOG.warning(ex)
    finally:
        return source_files, reports