def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri)
def test_privileged_access(self): """Tests that initially, a non-authenticating server is accessible, but an authenticating one is not.""" auth_client = CCAuthHelper(self.host, self.port, self.uri, True, None) handshake = auth_client.getAuthParameters() self.assertTrue(handshake.requiresAuthentication, "Privileged server " + "did not report that it requires authentication.") self.assertFalse(handshake.sessionStillActive, "Empty session was " + "reported to be still active.") sessionToken = auth_client.performLogin("Username:Password", "invalid:invalid") self.assertIsNone(sessionToken, "Invalid credentials gave us a token!") self.sessionToken = auth_client.performLogin("Username:Password", "cc:test") self.assertIsNotNone(self.sessionToken, "Valid credentials didn't give us a token!") handshake = auth_client.getAuthParameters() self.assertTrue(handshake.requiresAuthentication, "Privileged server " + "did not report that it requires authentication.") self.assertFalse(handshake.sessionStillActive, "Valid session was " + "reported not to be active.") client = CCViewerHelper(self.host, self.port, '/', True, self.sessionToken) self.assertIsNotNone(client.getAPIVersion(), "Privileged server didn't respond properly.") auth_client = CCAuthHelper(self.host, self.port, self.uri, True, self.sessionToken) result = auth_client.destroySession() self.assertTrue(result, "Server did not allow us to destroy session.") try: client.getAPIVersion() success = False except TProtocolException as tpe: # The server reports a HTTP 401 error which # is not a valid Thrift response. # But if it does so, it passes the test! success = True self.assertTrue(success, "Privileged client allowed access after logout.") handshake = auth_client.getAuthParameters() self.assertFalse(handshake.sessionStillActive, "Destroyed session was " + "reported to be still active.")
class UpdateMode(unittest.TestCase): def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) # ----------------------------------------------------- def test_get_update_run_res(self): """ The test depends on a run which was configured for update mode. Compared to the original test analysis in this run the deadcode.Deadstores checker was disabled. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) update_run_name = "update_test" updated_run = None for run in runs: print(run.name) if run.name == update_run_name: updated_run = run break print('There should be a run with this name: ' + update_run_name) self.assertIsNotNone(updated_run) # get all the results from the test project config bugs = self._testproject_data['bugs'] all_results = len(bugs) deadcode_results = [b for b in bugs if b['checker'] == 'deadcode.DeadStores'] deadcode_count = len(deadcode_results) update_res_count = None results = self._cc_client.getRunResults(updated_run.runId, 500, 0, [], []) update_res_count = len(results) self.assertEqual(all_results - deadcode_count, update_res_count)
def test_initial_access(self): """Tests that initially, a non-authenticating server is accessible, but an authenticating one is not.""" viewer_port = int(os.environ['CC_TEST_VIEWER_PORT']) client_unprivileged = CCViewerHelper(self.host, viewer_port, '/', True, None) client_privileged = CCViewerHelper(self.host, self.port, '/', True, None) self.assertIsNotNone(client_unprivileged.getAPIVersion(), "Unprivileged client was not accessible.") try: client_privileged.getAPIVersion() success = False except TProtocolException as tpe: # The server reports a HTTP 401 error which # is not a valid Thrift response. # But if it does so, it passes the test! success = True self.assertTrue(success, "Privileged client allowed access without session.")
def setUp(self): host = "localhost" port = int(os.environ["CC_TEST_VIEWER_PORT"]) uri = "/" self._testproject_data = json.loads(os.environ["CC_TEST_PROJECT_INFO"]) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) self._runid = self._select_one_runid()
def delete_results(host, port, run_id, performance_data): """ Remove run result for a run and measure remove time. """ with CCViewerHelper(host, port, '/') as viewer_client: run_perf = {} with Timer('removeRunResults', run_perf): viewer_client.removeRunResults([run_id]) add_measurement_data(run_id, performance_data, run_perf)
class RunResults(unittest.TestCase): _ccClient = None # selected runid for running the tests _runid = None def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) # ----------------------------------------------------- def test_suppress_file_set_in_cmd(self): """ server is started with a suppress file check if the api returns a non empty string tempfile is used for suppress file so name will change for each run """ self.assertNotEquals(self._cc_client.getSuppressFile(), '')
class RunResults(unittest.TestCase): _ccClient = None # selected runid for running the tests _runid = None def _select_one_runid(self): runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) # select one random run idx = 0 return runs[idx].runId def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) self._runid = self._select_one_runid() def test_get_run_results_no_filter(self): """ Get the run results without filtering just the result count is checked. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_checker_id_and_file_path(self): """ Get all the run results and compare with the results in the project config. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) found_all = True not_found = [] for bug in self._testproject_data['bugs']: found = False for run_res in run_results: found |= ((run_res.checkedFile.endswith(bug['file'])) and (run_res.lastBugPosition.startLine == bug['line']) and (run_res.checkerId == bug['checker']) and (run_res.bugHash == bug['hash'])) found_all &= found if not found: not_found.append(bug) print('Not found bugs:') for bug in not_found: print(bug) self.assertTrue(found_all) def test_get_source_file_content(self): # also for testing Unicode support """ Get the stored source file content from the database and compare it with the original version, unicode encoding decoding is tested during the storage and retrieving the data. """ runid = self._runid simple_filters = [ReportFilter(checkerId='*', filepath='*.c*')] run_result_count = self._cc_client.getRunResultCount( runid, simple_filters) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], simple_filters) self.assertIsNotNone(run_results) for run_res in run_results: self.assertTrue(re.match(r'.*\.c(pp)?$', run_res.checkedFile)) logging.debug('Getting the content of ' + run_res.checkedFile) file_data = self._cc_client.getSourceFileData(run_res.fileId, True) self.assertIsNotNone(file_data) file_content1 = file_data.fileContent self.assertIsNotNone(file_content1) with open(run_res.checkedFile) as source_file: file_content2 = source_file.read() self.assertEqual(file_content1, file_content2) logging.debug('got ' + str(len(run_results)) + ' files') self.assertEqual(run_result_count, len(run_results)) def test_zzzzz_get_run_results_checker_msg_filter_suppressed(self): """ This test must run for the last, suppresses some results that could cause some other tests to fail which depend on the result count. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) simple_filters = [ReportFilter(suppressed=False)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) suppress_msg = r'My beautiful Unicode comment.' bug = run_results[0] success = self._cc_client.suppressBug([runid], bug.reportId, suppress_msg) self.assertTrue(success) logging.debug('Bug suppressed successfully') simple_filters = [ReportFilter(suppressed=True)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) filtered_run_results = filter( lambda result: (result.reportId == bug.reportId) and result.suppressed, run_results) self.assertEqual(len(filtered_run_results), 1) suppressed_bug = filtered_run_results[0] self.assertEqual(suppressed_bug.suppressComment, suppress_msg) success = self._cc_client.unSuppressBug([runid], suppressed_bug.reportId) self.assertTrue(success) logging.debug('Bug unsuppressed successfully') simple_filters = [ReportFilter(suppressed=False)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) filtered_run_results = filter( lambda result: (result.reportId == bug.reportId) and not result.suppressed, run_results) self.assertEqual(len(filtered_run_results), 1) logging.debug('Done.\n') def test_get_run_results_severity_sort(self): """ Get the results and sort them by severity and filename. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sort_mode1 = SortMode(SortType.SEVERITY, Order.ASC) sort_mode2 = SortMode(SortType.FILENAME, Order.ASC) sort_types = [sort_mode1, sort_mode2] run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, []) self.assertIsNotNone(run_results) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.severity <= bug2.severity) self.assertTrue((bug1.severity != bug2.severity) or (bug1.checkedFile <= bug2.checkedFile)) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_sorted2(self): """ Get the results and sort them by filename and checkername. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sortMode1 = SortMode(SortType.FILENAME, Order.ASC) sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC) sort_types = [sortMode1, sortMode2] run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.checkedFile <= bug2.checkedFile) self.assertTrue((bug1.checkedFile != bug2.checkedFile) or (bug1.lastBugPosition.startLine <= bug2.lastBugPosition.startLine) or (bug1.checkerId <= bug2.checkerId))
def main(): # handle argument parsing parseArgs() #--- main part ------------------------------------------------------------- reportTimeList = [] getTimeList = [] with CCReportHelper(args.address, args.check_port) as ccReporter, \ CCViewerHelper(args.address, args.view_port, '/') as ccViewer, \ open(args.output, 'r+') as outFile: outFile.truncate() for runCount in range(runNumber): before = datetime.datetime.now() run_id = ccReporter.addCheckerRun('command', 'name_' + str(runCount) + '_' + str(uuid4()), 'version', False) report_ids = [] for fileCount in range(fileNumber): print('\nrun: ' + str(runCount + 1) + '/' + str(runNumber) + '\nfile: ' + str(fileCount + 1) + '/' + str(fileNumber)) file_id = ccReporter.needFileContent(run_id, 'file_' + str(fileCount)).fileId ccReporter.addFileContent(file_id, fileContent) build_action_id = ccReporter.addBuildAction(run_id, 'build_cmd_' + str(fileCount), 'check_cmd_' + str(fileCount), 'target_' + str(fileCount)) ccReporter.finishBuildAction(build_action_id, '') for bugCount in range(bugPerFile): bug_pathes = [] bug_events = [] for bugElementCount in range(bugLength): line = bugCount * bugLength + bugElementCount + 1 bug_pathes.append(BugPathPos(line, 1, line, 10, file_id)) bug_events.append(BugPathEvent(line, 1, line, 10, 'event_msg', file_id)) report_id = ccReporter.addReport(build_action_id, file_id, 'hash_' + str(run_id) + '_' + str(fileCount) + '_' + str(bugCount), 1, 'checker_message', bug_pathes, bug_events, 'checker_name', 'checker_cat', 'bug_type', 1) report_ids.append(report_id) #ccReporter.moduleToReport(run_id, 'module_id', report_ids) ccReporter.finishCheckerRun(run_id) after = datetime.datetime.now() time = (after - before).total_seconds() reportTimeList.append(time) before = datetime.datetime.now() runIDs = [rundata.runId for rundata in ccViewer.getRunData()] after = datetime.datetime.now() time = (after - before).total_seconds() getTimeList.append(time) before = datetime.datetime.now() res = ccViewer.getAllRunResults(runIDs[-1]) after = datetime.datetime.now() time = (after - before).total_seconds() getTimeList.append(time) before = datetime.datetime.now() ccViewer.getReportDetails(res[-1].reportId) after = datetime.datetime.now() time = (after - before).total_seconds() getTimeList.append(time) s = str(runCount) + ';' + str(reportTimeList[-1]) + ';' + str(getTimeList[-3]) + ';' + str(getTimeList[-2]) + ';' + str(getTimeList[-1]) print(s) outFile.write(s + '\n')
class RunResults(unittest.TestCase): _ccClient = None # selected runid for running the tests _runid = None def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) # ----------------------------------------------------- def test_get_diff_res_count_new(self): """ count the new results with no filter """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.NEW, []) self.assertEqual(diff_res, 0) # ----------------------------------------------------- def test_get_diff_res_count_resolved(self): """ count the resolved results with no filter """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.RESOLVED, []) self.assertEqual(diff_res, 0) # ----------------------------------------------------- def test_get_diff_res_count_unresolved(self): """ count the unresolved results with no filter """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId base_count = self._cc_client.getRunResultCount(base_run_id, []) logging.debug("Base run id: %d", base_run_id) logging.debug("Base count: %d", base_count) base_run_res = self._cc_client.getAllRunResults(base_run_id, [], []) print_run_results(base_run_res) new_count = self._cc_client.getRunResultCount(new_run_id, []) logging.debug("New run id: %d", new_run_id) logging.debug("New count: %d", new_count) new_run_res = self._cc_client.getAllRunResults(new_run_id, [], []) print_run_results(new_run_res) diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.UNRESOLVED, []) self.assertEqual(diff_res, 23) # ----------------------------------------------------- def test_get_diff_res_count_unresolved_filter(self): """ This test asumes nothing has been resolved between the two checker runs The the same severity levels and numbers are used as in a simple filter test for only one run from the project config """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId # severity levels used for filtering filter_severity_levels = self._testproject_data[ 'filter_severity_levels'] for level in filter_severity_levels: for severity_level, test_result_count in level.iteritems(): simple_filters = [] sev = get_severity_level(severity_level) simple_filter = ReportFilter(severity=sev) simple_filters.append(simple_filter) diff_result_count = self._cc_client.getDiffResultCount( base_run_id, new_run_id, DiffType.UNRESOLVED, simple_filters) self.assertEqual(test_result_count, diff_result_count) # ----------------------------------------------------- def test_get_diff_res_types_new(self): """ test diff result types for new results """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.NEW, []) self.assertEqual(len(diff_res), 0) # ----------------------------------------------------- def test_get_diff_res_types_resolved(self): """ test diff result types for resolved results """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.RESOLVED, []) self.assertEqual(len(diff_res), 0) # ----------------------------------------------------- def test_get_diff_res_types_unresolved(self): """ test diff result types for unresolved results with no filter on the api """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.UNRESOLVED, []) diff_res_types_filter = self._testproject_data['diff_res_types_filter'] for level in diff_res_types_filter: for checker_name, test_result_count in level.iteritems(): diff_res = self._cc_client.getDiffResultTypes( base_run_id, new_run_id, DiffType.UNRESOLVED, []) res = [r for r in diff_res if r.checkerId == checker_name] # there should be only one result for each checker name self.assertEqual(len(res), 1) self.assertEqual(test_result_count, res[0].count) self.assertEqual(checker_name, res[0].checkerId) # ----------------------------------------------------- def test_get_diff_res_types_unresolved_filter(self): """ test diff result types for unresolved results with checker name filter on the api """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res_types_filter = self._testproject_data['diff_res_types_filter'] for level in diff_res_types_filter: for checker_name, test_result_count in level.iteritems(): simple_filters = [] simple_filter = ReportFilter(checkerId=checker_name) simple_filters.append(simple_filter) diff_res = self._cc_client.getDiffResultTypes( base_run_id, new_run_id, DiffType.UNRESOLVED, simple_filters) # there should be only one for each checker name self.assertEqual(len(diff_res), 1) self.assertEqual(test_result_count, diff_res[0].count) self.assertEqual(checker_name, diff_res[0].checkerId)
class RunResults(unittest.TestCase): _ccClient = None # selected runid for running the tests _runid = None def _select_one_runid(self): runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) # select one random run idx = 0 return runs[idx].runId def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) self._runid = self._select_one_runid() def test_filter_none(self): ''' Filter value is None should return all results''' runid = self._runid sort_types = None simple_filters = None run_result_count = self._cc_client.getRunResultCount(runid, simple_filters) self.assertIsNotNone(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(run_result_count, len(run_results)) def test_filter_empty(self): ''' Filter value is empty list should return all results''' runid = self._runid sort_types = None simple_filters = [] run_result_count = self._cc_client.getRunResultCount(runid, simple_filters) self.assertIsNotNone(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(run_result_count, len(run_results)) def test_filter_severity(self): ''' Filter by severity levels''' runid = self._runid severity_test_data = self._testproject_data['filter_severity_levels'] for level in severity_test_data: for severity_level, test_result_count in level.iteritems(): logging.debug('Severity level filter ' + severity_level + ' test result count: ' + str(test_result_count)) sort_types = None simple_filters = [] sev = get_severity_level(severity_level) simple_filter = ReportFilter(severity=sev) simple_filters.append(simple_filter) run_result_count = self._cc_client.getRunResultCount( runid, simple_filters) run_results = self._cc_client.getRunResults( runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(test_result_count, len(run_results)) def test_filter_checker_id(self): ''' Filter by checker id''' runid = self._runid severity_test_data = self._testproject_data['filter_checker_id'] for level in severity_test_data: for checker_id_filter, test_result_count in level.iteritems(): logging.debug('Checker id filter ' + checker_id_filter + ' test result count: ' + str(test_result_count)) sort_types = None simple_filters = [] simple_filter = ReportFilter(checkerId=checker_id_filter) simple_filters.append(simple_filter) run_result_count = self._cc_client.getRunResultCount( runid, simple_filters) run_results = self._cc_client.getRunResults( runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(test_result_count, len(run_results)) def test_filter_file_path(self): ''' Filter by checker id''' runid = self._runid severity_test_data = self._testproject_data['filter_filepath'] for level in severity_test_data: for filepath_filter, test_result_count in level.iteritems(): logging.debug('File path filter ' + filepath_filter + ' test result count: ' + str(test_result_count)) sort_types = None simple_filters = [] simple_filter = ReportFilter(filepath=filepath_filter) simple_filters.append(simple_filter) run_result_count = self._cc_client.getRunResultCount( runid, simple_filters) run_results = self._cc_client.getRunResults( runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(test_result_count, len(run_results)) def test_filter_case_insensitive_file_path(self): ''' Filter by file path case insensitive''' runid = self._runid filter_test_data = self._testproject_data['filter_filepath_case_insensitive'] for level in filter_test_data: for filepath_filter, test_result_count in level.iteritems(): logging.debug('File path filter ' + filepath_filter + ' test result count: ' + str(test_result_count)) sort_types = None simple_filters = [] simple_filter = ReportFilter(filepath=filepath_filter) simple_filters.append(simple_filter) run_result_count = self._cc_client.getRunResultCount( runid, simple_filters) run_results = self._cc_client.getRunResults( runid, run_result_count, 0, sort_types, simple_filters) self.assertIsNotNone(run_results) self.assertEqual(test_result_count, len(run_results))
class Suppress(unittest.TestCase): """ Test suppress functionality """ _ccClient = None # selected runid for running the tests _runid = None def _select_one_runid(self): """ Select one run id for the test. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) idx = 0 return runs[idx].runId def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) self._runid = self._select_one_runid() def test_double_suppress(self): """ Suppressing the same bug for the second time should be successfull. The second suppress should not overwrite the already stored suppress comment. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_results = get_all_run_results(self._cc_client, runid) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) suppress_comment = r'First suppress msg' bug = run_results[0] success = self._cc_client.suppressBug([runid], bug.reportId, suppress_comment) self.assertTrue(success) logging.debug('Bug suppressed successfully') # try to suppress the same bug again second_suppress_msg = r'Second suppress msg' success = self._cc_client.suppressBug([runid], bug.reportId, second_suppress_msg) self.assertTrue(success) logging.debug('Same bug suppressed successfully for the second time') simple_filters = [ReportFilter(suppressed=True)] run_results_suppressed = get_all_run_results(self._cc_client, runid, filters=simple_filters) self.assertIsNotNone(run_results_suppressed) self.assertEqual(len(run_results_suppressed), 1) bug_to_suppress = bug bug_to_suppress.suppressed = True bug_to_suppress.suppressComment = suppress_comment # The only one suppressed bug should be returned. self.assertEqual(bug_to_suppress, run_results_suppressed[0]) success = self._cc_client.unSuppressBug([runid], bug_to_suppress.reportId) self.assertTrue(success) logging.debug('Bug unsuppressed successfully') simple_filters = [ReportFilter(suppressed=False)] run_results_unsuppressed = get_all_run_results(self._cc_client, runid, filters=simple_filters) self.assertIsNotNone(run_results_unsuppressed) self.assertEqual(len(run_results), len(run_results_unsuppressed))
class RunResults(unittest.TestCase): _ccClient = None # selected runid for running the tests _runid = None def _select_one_runid(self): runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) # select one random run idx = 0 return runs[idx].runId def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) self._runid = self._select_one_runid() def test_get_run_results_no_filter(self): """ Get the run results without filtering just the result count is checked. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_checker_id_and_file_path(self): """ Get all the run results and compare with the results in the project config. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) found_all = True not_found = [] for bug in self._testproject_data['bugs']: found = False for run_res in run_results: found |= ((run_res.checkedFile.endswith(bug['file'])) and (run_res.lastBugPosition.startLine == bug['line']) and (run_res.checkerId == bug['checker']) and (run_res.bugHash == bug['hash'])) found_all &= found if not found: not_found.append(bug) print('Not found bugs:') for bug in not_found: print(bug) self.assertTrue(found_all) def test_get_source_file_content(self): # also for testing Unicode support """ Get the stored source file content from the database and compare it with the original version, unicode encoding decoding is tested during the storage and retrieving the data. """ runid = self._runid simple_filters = [ReportFilter(checkerId='*', filepath='*.c*')] run_result_count = self._cc_client.getRunResultCount(runid, simple_filters) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, [], simple_filters) self.assertIsNotNone(run_results) for run_res in run_results: self.assertTrue(re.match(r'.*\.c(pp)?$', run_res.checkedFile)) logging.debug('Getting the content of ' + run_res.checkedFile) file_data = self._cc_client.getSourceFileData(run_res.fileId, True) self.assertIsNotNone(file_data) file_content1 = file_data.fileContent self.assertIsNotNone(file_content1) with open(run_res.checkedFile) as source_file: file_content2 = source_file.read() self.assertEqual(file_content1, file_content2) logging.debug('got ' + str(len(run_results)) + ' files') self.assertEqual(run_result_count, len(run_results)) def test_zzzzz_get_run_results_checker_msg_filter_suppressed(self): """ This test must run for the last, suppresses some results that could cause some other tests to fail which depend on the result count. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) simple_filters = [ReportFilter(suppressed=False)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) suppress_msg = r'My beautiful Unicode comment.' bug = run_results[0] success = self._cc_client.suppressBug([runid], bug.reportId, suppress_msg) self.assertTrue(success) logging.debug('Bug suppressed successfully') simple_filters = [ReportFilter(suppressed=True)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) filtered_run_results = filter( lambda result: (result.reportId == bug.reportId) and result.suppressed, run_results) self.assertEqual(len(filtered_run_results), 1) suppressed_bug = filtered_run_results[0] self.assertEqual(suppressed_bug.suppressComment, suppress_msg) success = self._cc_client.unSuppressBug([runid], suppressed_bug.reportId) self.assertTrue(success) logging.debug('Bug unsuppressed successfully') simple_filters = [ReportFilter(suppressed=False)] run_results = self._cc_client.getRunResults(runid, 50, 0, [], simple_filters) self.assertIsNotNone(run_results) self.assertNotEqual(len(run_results), 0) filtered_run_results = filter( lambda result: (result.reportId == bug.reportId) and not result.suppressed, run_results) self.assertEqual(len(filtered_run_results), 1) logging.debug('Done.\n') def test_get_run_results_severity_sort(self): """ Get the results and sort them by severity and filename. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sort_mode1 = SortMode(SortType.SEVERITY, Order.ASC) sort_mode2 = SortMode(SortType.FILENAME, Order.ASC) sort_types = [sort_mode1, sort_mode2] run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, []) self.assertIsNotNone(run_results) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.severity <= bug2.severity) self.assertTrue((bug1.severity != bug2.severity) or (bug1.checkedFile <= bug2.checkedFile)) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) def test_get_run_results_sorted2(self): """ Get the results and sort them by filename and checkername. """ runid = self._runid logging.debug('Get all run results from the db for runid: ' + str(runid)) sortMode1 = SortMode(SortType.FILENAME, Order.ASC) sortMode2 = SortMode(SortType.CHECKER_NAME, Order.ASC) sort_types = [sortMode1, sortMode2] run_result_count = self._cc_client.getRunResultCount(runid, []) self.assertTrue(run_result_count) run_results = self._cc_client.getRunResults(runid, run_result_count, 0, sort_types, []) self.assertIsNotNone(run_results) print_run_results(run_results) self.assertEqual(run_result_count, len(run_results)) for i in range(run_result_count - 1): bug1 = run_results[i] bug2 = run_results[i + 1] self.assertTrue(bug1.checkedFile <= bug2.checkedFile) self.assertTrue((bug1.checkedFile != bug2.checkedFile) or (bug1.lastBugPosition.startLine <= bug2.lastBugPosition.startLine) or (bug1.checkerId <= bug2.checkerId))
def measure(test_conf, performance_data, store_done, proc_done_counter, proc_counter_lock, keep): """ Fill up a run with the configured values. """ try: log.debug("Generating and storing results ...") number_of_runs = test_conf.get("number_of_runs", 1) # TODO simulate append by using the same run name in multiple threads run_perf = {} for run_count in range(number_of_runs): run_name = 'name_' + str(run_count) + '_' + str(uuid4()) file_line_size = test_conf.get("file_line_size") file_content = zlib.compress('\n'.join(['A' * 80] * file_line_size), zlib.Z_BEST_COMPRESSION) with Timer('store', run_perf): run_id = store(test_conf, run_name, file_content) log.debug("Storing results for run " + str(run_id) + " done.") with proc_counter_lock: proc_done_counter.value -= 1 # wait here for other processes to finish storing results # before measuiring queries store_done.wait() view_host, view_port = get_viewer_host_port(test_conf) with CCViewerHelper(view_host, view_port, '/') as viewer_client: with Timer('getRunData', run_perf): run_data = viewer_client.getRunData() with Timer('getAllRunResulst', run_perf): res = get_all_run_results(viewer_client, run_id) with Timer('getReportDetails', run_perf): viewer_client.getReportDetails(res[-1].reportId) add_measurement_data(run_id, performance_data, run_perf) clean_after_fill = test_conf.get("clean_after_fill", True) if clean_after_fill and not keep: delete_results(view_host, view_port, run_id, performance_data) except Exception as ex: log.error(ex) with proc_counter_lock: proc_done_counter.value -= 1 sys.exit(1)
class RunResults(unittest.TestCase): _ccClient = None # Selected runid for running the tests. _runid = None def setUp(self): host = 'localhost' port = int(os.environ['CC_TEST_VIEWER_PORT']) uri = '/' self._testproject_data = json.loads(os.environ['CC_TEST_PROJECT_INFO']) self.assertIsNotNone(self._testproject_data) self._cc_client = CCViewerHelper(host, port, uri) # ----------------------------------------------------- def test_get_diff_res_count_new(self): """ Count the new results with no filter. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.NEW, []) self.assertEqual(diff_res, 0) # ----------------------------------------------------- def test_get_diff_res_count_resolved(self): """ Count the resolved results with no filter. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.RESOLVED, []) self.assertEqual(diff_res, 0) # ----------------------------------------------------- def test_get_diff_res_count_unresolved(self): """ Count the unresolved results with no filter. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId base_count = self._cc_client.getRunResultCount(base_run_id, []) logging.debug("Base run id: %d", base_run_id) logging.debug("Base count: %d", base_count) base_run_res = get_all_run_results(self._cc_client, base_run_id) print_run_results(base_run_res) new_count = self._cc_client.getRunResultCount(new_run_id, []) logging.debug("New run id: %d", new_run_id) logging.debug("New count: %d", new_count) new_run_res = get_all_run_results(self._cc_client, new_run_id) print_run_results(new_run_res) diff_res = self._cc_client.getDiffResultCount(base_run_id, new_run_id, DiffType.UNRESOLVED, []) self.assertEqual(diff_res, 23) # ----------------------------------------------------- def test_get_diff_res_count_unresolved_filter(self): """ This test asumes nothing has been resolved between the two checker runs. The the same severity levels and numbers are used as in a simple filter test for only one run from the project config. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId # Severity levels used for filtering. filter_severity_levels = self._testproject_data[ 'filter_severity_levels'] for level in filter_severity_levels: for severity_level, test_result_count in level.items(): simple_filters = [] sev = get_severity_level(severity_level) simple_filter = ReportFilter(severity=sev) simple_filters.append(simple_filter) diff_result_count = self._cc_client.getDiffResultCount( base_run_id, new_run_id, DiffType.UNRESOLVED, simple_filters) self.assertEqual(test_result_count, diff_result_count) # ----------------------------------------------------- def test_get_diff_res_types_new(self): """ Test diff result types for new results. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.NEW, []) self.assertEqual(len(diff_res), 0) # ----------------------------------------------------- def test_get_diff_res_types_resolved(self): """ Test diff result types for resolved results. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res = self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.RESOLVED, []) self.assertEqual(len(diff_res), 0) # ----------------------------------------------------- def test_get_diff_res_types_unresolved(self): """ Test diff result types for unresolved results with no filter on the api. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res_types_filter = self._testproject_data['diff_res_types_filter'] for level in diff_res_types_filter: for checker_name, test_result_count in level.items(): diff_res = \ self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.UNRESOLVED, []) res = [r for r in diff_res if r.checkerId == checker_name] # There should be only one result for each checker name. self.assertEqual(len(res), 1) self.assertEqual(test_result_count, res[0].count) self.assertEqual(checker_name, res[0].checkerId) # ----------------------------------------------------- def test_get_diff_res_types_unresolved_filter(self): """ Test diff result types for unresolved results with checker name filter on the api. """ runs = self._cc_client.getRunData() self.assertIsNotNone(runs) self.assertNotEqual(len(runs), 0) self.assertGreaterEqual(len(runs), 2) base_run_id = runs[0].runId new_run_id = runs[1].runId diff_res_types_filter = self._testproject_data['diff_res_types_filter'] for level in diff_res_types_filter: for checker_name, test_result_count in level.items(): simple_filters = [] simple_filter = ReportFilter(checkerId=checker_name) simple_filters.append(simple_filter) diff_res = \ self._cc_client.getDiffResultTypes(base_run_id, new_run_id, DiffType.UNRESOLVED, simple_filters) # There should be only one for each checker name. self.assertEqual(len(diff_res), 1) self.assertEqual(test_result_count, diff_res[0].count) self.assertEqual(checker_name, diff_res[0].checkerId)