예제 #1
0
    def test_get_run_results_severity_sort(self):
        """ Get the run results and sort them by severity and filename ASC. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))
        sort_mode1 = SortMode(SortType.SEVERITY, Order.ASC)
        sort_mode2 = SortMode(SortType.FILENAME, Order.ASC)
        sort_types = [sort_mode1, sort_mode2]

        run_result_count = self._cc_client.getRunResultCount(runid, [])
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid, sort_types,
                                          [])
        self.assertIsNotNone(run_results)

        for i in range(run_result_count - 1):
            bug1 = run_results[i]
            bug2 = run_results[i + 1]
            self.assertTrue(bug1.severity <= bug2.severity)
            self.assertTrue((bug1.severity != bug2.severity)
                            or (bug1.checkedFile <= bug2.checkedFile))

        print_run_results(run_results)

        self.assertEqual(run_result_count, len(run_results))
예제 #2
0
    def test_get_run_results_checker_id_and_file_path(self):
        """ Test if all the bugs are found based
            on the test project configuration. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_result_count = self._cc_client.getRunResultCount(runid, [])
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid)
        self.assertIsNotNone(run_results)
        self.assertEqual(run_result_count, len(run_results))

        test_project_results = self._testproject_data[
            self._clang_to_test]['bugs']
        for r in test_project_results:
            print(r)

        not_found = find_all(run_results, test_project_results)

        print_run_results(run_results)

        if not_found:
            print("===================")
            print('Not found bugs:')
            for bug in not_found:
                print(bug)
            print("===================")

        self.assertEqual(len(not_found), 0)
예제 #3
0
    def test_get_run_results_sorted2(self):
        """ Get the run results and sort them by file name and
            checker name ASC. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))
        sortMode1 = SortMode(SortType.FILENAME, Order.ASC)
        sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC)
        sort_types = [sortMode1, sortMode2]

        run_result_count = self._cc_client.getRunResultCount(runid, [])
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid, sort_types,
                                          [])
        self.assertIsNotNone(run_results)

        print_run_results(run_results)

        self.assertEqual(run_result_count, len(run_results))

        for i in range(run_result_count - 1):
            bug1 = run_results[i]
            bug2 = run_results[i + 1]
            self.assertTrue(bug1.checkedFile <= bug2.checkedFile)
            self.assertTrue((bug1.checkedFile != bug2.checkedFile)
                            or (bug1.lastBugPosition.startLine <=
                                bug2.lastBugPosition.startLine)
                            or (bug1.checkerId <= bug2.checkerId))
예제 #4
0
    def test_get_diff_res_count_unresolved(self):
        """
        Count the unresolved results with no filter.
        """
        base_run_id = self._base_runid
        new_run_id = self._new_runid

        base_count = self._cc_client.getRunResultCount([base_run_id],
                                                       None,
                                                       None)
        logging.debug("Base run id: %d", base_run_id)
        logging.debug("Base count: %d", base_count)

        base_run_res = get_all_run_results(self._cc_client, base_run_id)

        print_run_results(base_run_res)

        new_count = self._cc_client.getRunResultCount([new_run_id],
                                                      None,
                                                      None)
        logging.debug("New run id: %d", new_run_id)
        logging.debug("New count: %d", new_count)

        new_run_res = get_all_run_results(self._cc_client, new_run_id)

        print_run_results(new_run_res)

        cmp_data = CompareData(runIds=[new_run_id],
                               diffType=DiffType.UNRESOLVED)

        diff_res = self._cc_client.getRunResultCount([base_run_id],
                                                     None,
                                                     cmp_data)

        self.assertEqual(diff_res, 26)
예제 #5
0
    def test_get_diff_res_count_unresolved(self):
        """
        Count the unresolved results with no filter.
        """
        base_run_id = self._base_runid
        new_run_id = self._new_runid

        base_count = self._cc_client.getRunResultCount([base_run_id],
                                                       None,
                                                       None)
        print("Base run id: %d", base_run_id)
        print("Base count: %d", base_count)

        base_run_res = get_all_run_results(self._cc_client, base_run_id)

        print_run_results(base_run_res)

        new_count = self._cc_client.getRunResultCount([new_run_id],
                                                      None,
                                                      None)
        print("New run id: %d", new_run_id)
        print("New count: %d", new_count)

        new_run_res = get_all_run_results(self._cc_client, new_run_id)

        print_run_results(new_run_res)

        cmp_data = CompareData(runIds=[new_run_id],
                               diffType=DiffType.UNRESOLVED)

        diff_res = self._cc_client.getRunResultCount([base_run_id],
                                                     None,
                                                     cmp_data)

        self.assertEqual(diff_res, 25)
예제 #6
0
    def test_get_diff_res_count_unresolved(self):
        """
        Count the unresolved results with no filter.
        """
        base_run_id = self._base_runid
        new_run_id = self._new_runid

        base_count = self._cc_client.getRunResultCount(base_run_id, [])
        logging.debug("Base run id: %d", base_run_id)
        logging.debug("Base count: %d", base_count)

        base_run_res = get_all_run_results(self._cc_client, base_run_id)

        print_run_results(base_run_res)

        new_count = self._cc_client.getRunResultCount(new_run_id, [])
        logging.debug("New run id: %d", new_run_id)
        logging.debug("New count: %d", new_count)

        new_run_res = get_all_run_results(self._cc_client, new_run_id)

        print_run_results(new_run_res)

        diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id,
                                                      DiffType.UNRESOLVED, [])
        # Nothing is resolved.
        self.assertEqual(diff_res, base_count)
예제 #7
0
    def test_get_run_results_checker_id_and_file_path(self):
        """ Test if all the bugs are found based
            on the test project configuration. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_result_count = self._cc_client.getRunResultCount([runid],
                                                             None,
                                                             None)
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid)
        self.assertIsNotNone(run_results)
        self.assertEqual(run_result_count, len(run_results))

        test_project_results = self._testproject_data[
            self._clang_to_test]['bugs']
        for r in test_project_results:
            print(r)

        not_found = find_all(run_results, test_project_results)

        print_run_results(run_results)

        if not_found:
            print("===================")
            print('Not found bugs:')
            for bug in not_found:
                print(bug)
            print("===================")

        self.assertEqual(len(not_found), 0)
예제 #8
0
    def test_get_run_results_sorted2(self):
        """ Get the run results and sort them by file name and
            checker name ASC. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))
        sortMode1 = SortMode(SortType.FILENAME, Order.ASC)
        sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC)
        sort_types = [sortMode1, sortMode2]

        run_result_count = self._cc_client.getRunResultCount([runid],
                                                             None,
                                                             None)
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client,
                                          runid,
                                          sort_types,
                                          None)
        self.assertIsNotNone(run_results)

        print_run_results(run_results)

        self.assertEqual(run_result_count, len(run_results))

        for i in range(run_result_count - 1):
            bug1 = run_results[i]
            bug2 = run_results[i + 1]
            self.assertTrue(bug1.checkedFile <= bug2.checkedFile)
            self.assertTrue((bug1.checkedFile != bug2.checkedFile) or
                            (bug1.line <=
                             bug2.line) or
                            (bug1.checkerId <= bug2.checkerId))
예제 #9
0
    def test_disable_checker(self):
        """
        The test depends on a run which was configured for update mode.
        Compared to the original test analysis in this run
        the deadcode.Deadstores checker was disabled.
        """

        run_results = get_all_run_results(self._cc_client, self._runid)

        print_run_results(run_results)

        # Get check command for the first storage.
        original_check_command = \
            self._cc_client.getCheckCommand(None, self._runid)

        # Run the anaysis again with different setup.
        test_project_path = self._testproject_data['project_path']
        ret = project.clean(test_project_path)
        if ret:
            sys.exit(ret)

        initial_codechecker_cfg = env.import_test_cfg(
            self._test_workspace)['codechecker_cfg']

        # Disable some checkers for the analysis.
        deadcode = 'deadcode.DeadStores'
        initial_codechecker_cfg['checkers'] = ['-d', deadcode]

        initial_test_project_name = self._run_name

        ret = codechecker.check_and_store(initial_codechecker_cfg,
                                          initial_test_project_name,
                                          test_project_path)
        if ret:
            sys.exit(1)

        # Get the results to compare.
        updated_results = get_all_run_results(self._cc_client, self._runid)

        all_bugs = self._testproject_data[self._clang_to_test]['bugs']
        deadcode_bugs = \
            [bug['hash'] for bug in all_bugs if bug['checker'] == deadcode]

        self.assertEquals(len(updated_results), len(all_bugs))
        self.assertTrue(all(map(
            lambda b: b.detectionStatus == 'unresolved',
            filter(lambda x: x in deadcode_bugs, updated_results))))

        # Get check command for the updated storage.
        updated_check_command = \
            self._cc_client.getCheckCommand(None, self._runid)

        # Check that the check command is changed.
        self.assertNotEqual(original_check_command, updated_check_command)
        self.assertTrue(deadcode not in original_check_command)
        self.assertTrue(deadcode in updated_check_command)
예제 #10
0
    def test_skip(self):
        """ There should be no results from the skipped file. """

        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_results = get_all_run_results(self._cc_client, runid)
        self.assertIsNotNone(run_results)

        skipped_files = ["file_to_be_skipped.cpp", "skip.h", "path_end.h"]

        # IMPORTANT: This test is checking whether some reports are really not
        # stored because they were skipped during analysis with --skip flag.
        # However, since clang-tidy is not run, there will be no reports from
        # "clang-diagnostic" checker. These are handled separately here,
        # otherwise the test would believe they're missing because of --skip
        # which is not the case.

        test_proj_res = self._testproject_data[self._clang_to_test]['bugs']
        skipped = [
            x for x in test_proj_res if x['file'] in skipped_files
            or x['checker'].startswith('clang-diagnostic-')
        ]

        print("Analysis:")
        for res in run_results:
            print(res)

        print("\nTest config results:")
        for res in test_proj_res:
            print(res)

        print("\nTest config skipped results:")
        for res in skipped:
            print(res)

        missing_results = find_all(run_results, test_proj_res)

        print_run_results(run_results)

        print('Missing results:')
        for mr in missing_results:
            print(mr)

        if missing_results:
            for bug in missing_results:
                if not bug['checker'].startswith('clang-diagnostic-'):
                    self.assertIn(bug['file'], skipped_files)
        else:
            self.assertTrue(
                True, "There should be missing results because"
                "using skip")

        self.assertEqual(len(run_results), len(test_proj_res) - len(skipped))
예제 #11
0
    def test_get_run_results_no_filter(self):
        """ Get all the run results without any filtering. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_result_count = self._cc_client.getRunResultCount(runid, [])
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid)

        print_run_results(run_results)

        self.assertIsNotNone(run_results)
        self.assertEqual(run_result_count, len(run_results))
예제 #12
0
    def test_get_run_results_no_filter(self):
        """ Get all the run results without any filtering. """
        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_result_count = self._cc_client.getRunResultCount([runid],
                                                             None,
                                                             None)
        self.assertTrue(run_result_count)

        run_results = get_all_run_results(self._cc_client, runid)

        print_run_results(run_results)

        self.assertIsNotNone(run_results)
        self.assertEqual(run_result_count, len(run_results))
예제 #13
0
    def test_skip(self):
        """ There should be no results from the skipped file. """

        runid = self._runid
        logging.debug('Get all run results from the db for runid: ' +
                      str(runid))

        run_results = get_all_run_results(self._cc_client, runid)
        self.assertIsNotNone(run_results)

        skipped_files = ["file_to_be_skipped.cpp", "skip.h"]

        test_proj_res = self._testproject_data[self._clang_to_test]['bugs']
        skipped = [x for x in test_proj_res if x['file'] in skipped_files]

        print("Analysis:")
        for res in run_results:
            print(res)

        print("\nTest config results:")
        for res in test_proj_res:
            print(res)

        print("\nTest config skipped results:")
        for res in skipped:
            print(res)

        missing_results = find_all(run_results, test_proj_res)

        print_run_results(run_results)

        print('Missing results:')
        for mr in missing_results:
            print(mr)

        if missing_results:
            for bug in missing_results:
                self.assertIn(bug['file'], skipped_files)
        else:
            self.assertTrue(True,
                            "There should be missing results because"
                            "using skip")

        self.assertEqual(len(run_results), len(test_proj_res) - len(skipped))
예제 #14
0
    def test_disable_checker(self):
        """
        The test depends on a run which was configured for update mode.
        Compared to the original test analysis in this run
        the deadcode.Deadstores checker was disabled.
        """

        run_results = get_all_run_results(self._cc_client, self._runid)

        print_run_results(run_results)

        # Run the anaysis again with different setup.
        test_project_path = self._testproject_data['project_path']
        ret = project.clean(test_project_path)
        if ret:
            sys.exit(ret)

        initial_codechecker_cfg = env.import_test_cfg(
            self._test_workspace)['codechecker_cfg']

        # Disable some checkers for the analysis.
        deadcode = 'deadcode.DeadStores'
        initial_codechecker_cfg['checkers'] = ['-d', deadcode]

        initial_test_project_name = self._run_name

        ret = codechecker.check(initial_codechecker_cfg,
                                initial_test_project_name,
                                test_project_path)
        if ret:
            sys.exit(1)

        # Get the results to compare.
        updated_results = get_all_run_results(self._cc_client, self._runid)

        all_bugs = self._testproject_data[self._clang_to_test]['bugs']
        deadcode_bugs = \
            [bug['hash'] for bug in all_bugs if bug['checker'] == deadcode]

        self.assertEquals(len(updated_results), len(all_bugs))
        self.assertTrue(all(map(
            lambda b: b.detectionStatus == 'unresolved',
            filter(lambda x: x in deadcode_bugs, updated_results))))
예제 #15
0
    def test_disable_checker(self):
        """
        The test depends on a run which was configured for update mode.
        Compared to the original test analysis in this run
        the deadcode.Deadstores checker was disabled.
        """

        run_results = get_all_run_results(self._cc_client, self._runid)

        print_run_results(run_results)

        # Run the anaysis again with different setup.
        test_project_path = self._testproject_data['project_path']
        ret = project.clean(test_project_path)
        if ret:
            sys.exit(ret)

        initial_codechecker_cfg = env.import_test_cfg(
            self._test_workspace)['codechecker_cfg']

        # Disable some checkers for the analysis.
        deadcode = 'deadcode.DeadStores'
        initial_codechecker_cfg['checkers'] = ['-d', deadcode]

        initial_test_project_name = self._run_name

        ret = codechecker.check(initial_codechecker_cfg,
                                initial_test_project_name, test_project_path)
        if ret:
            sys.exit(1)

        # Get the results to compare.
        updated_results = get_all_run_results(self._cc_client, self._runid)

        all_bugs = self._testproject_data[self._clang_to_test]['bugs']
        deadcode_bugs = [bug for bug in all_bugs if bug['checker'] == deadcode]

        self.assertEquals(len(updated_results),
                          len(all_bugs) - len(deadcode_bugs))
예제 #16
0
    def test_disable_checker(self):
        """
        The test depends on a run which was configured for update mode.
        Compared to the original test analysis in this run
        the deadcode.Deadstores checker was disabled.
        In this case the reports are marked as resolved.
        """

        run_results = get_all_run_results(self._cc_client, self._runid)

        print_run_results(run_results)

        # Get check command for the first storage.
        original_check_command = \
            self._cc_client.getCheckCommand(None, self._runid)

        self.assertEqual(original_check_command, "")

        initial_codechecker_cfg = env.import_test_cfg(
            self._test_workspace)['codechecker_cfg']

        initial_test_project_name = self._run_name

        disabled_reports = os.path.join(
            self._testproject_data['test_project_reports'], 'disabled')

        initial_codechecker_cfg['reportdir'] = disabled_reports
        ret = codechecker.store(initial_codechecker_cfg,
                                initial_test_project_name)
        if ret:
            sys.exit(1)

        # Get the results to compare.
        updated_results = get_all_run_results(self._cc_client, self._runid)

        for report in updated_results:
            self.assertEqual(report.detectionStatus, DetectionStatus.RESOLVED)