def test_local_compare_res_html_output_unresolved(self): """Check that html files will be generated by using diff command.""" html_reports = os.path.join(self._local_reports, "html_reports") get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'html', ["--url", self._url, '-e', html_reports, "--verbose", "debug"]) checked_files = set() for res in self.get_local_remote_diff(None, 'json'): checked_files.add(os.path.basename(res['file']['path'])) # Check if index.html file was generated. html_index = os.path.join(html_reports, "index.html") self.assertTrue(os.path.exists(html_index)) html_statistics = os.path.join(html_reports, "statistics.html") self.assertTrue(os.path.exists(html_statistics)) # Check that html files were generated for each reports. for html_file_names in os.listdir(html_reports): suffix = html_file_names.rfind("_") file_name = html_file_names[:suffix] \ if suffix != -1 else html_file_names if file_name in ["index.html", "statistics.html"]: continue self.assertIn(file_name, checked_files)
def test_diff_no_trim_codeclimate_output(self): """ Test codeclimate output when using diff and don't set env vars. """ export_dir_path = os.path.join(self._local_reports, "export_dir") get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', "codeclimate", ["-e", export_dir_path, "--url", self._url], self._env) issues_file_path = os.path.join(export_dir_path, 'codeclimate_issues.json') self.assertTrue(os.path.exists(issues_file_path)) with open(issues_file_path, 'r', encoding="utf-8", errors="ignore") as f: issues = json.load(f) malloc_issues = [i for i in issues if i["check_name"] == "unix.Malloc"] self.assertNotEqual(len(malloc_issues), 0) file_path = malloc_issues[0]["location"]["path"] self.assertTrue(os.path.isabs(file_path)) self.assertTrue(file_path.endswith(f"/new_delete.cpp")) shutil.rmtree(export_dir_path, ignore_errors=True)
def test_newname_baseline_file_json(self): """ Get reports based on a baseline file given to the newname option. """ baseline_file_path = create_baseline_file(self.new_reports) # Get new results. new_results, err, returncode = get_diff_results([self.base_reports], [baseline_file_path], '--new', 'json') self.assertFalse(new_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err) # Get unresolved results. unresolved_results, err, returncode = get_diff_results( [self.base_reports], [baseline_file_path], '--unresolved', 'json') self.assertFalse(unresolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err) # Get resolved results. resolved_results, _, _ = get_diff_results([self.base_reports], [baseline_file_path], '--resolved', 'json') for report in resolved_results: self.assertEqual(report['checker_name'], "core.CallAndMessage")
def test_diff_codeclimate_output(self): """ Test codeclimate output when using diff and set env vars. """ export_dir = os.path.join(self._local_reports, "export_dir") env = self._env.copy() env["CC_REPO_DIR"] = self._local_test_project get_diff_results([self._run_names[0]], [self._local_reports], '--unresolved', 'codeclimate', ["--url", self._url, "-e", export_dir], env) codeclimate_issues_file = os.path.join(export_dir, 'codeclimate_issues.json') self.assertTrue(os.path.exists(codeclimate_issues_file)) with open(codeclimate_issues_file, 'r', encoding="utf-8", errors="ignore") as rw_file: issues = json.load(rw_file) for issue in issues: self.assertEqual(issue["type"], "issue") self.assertTrue(issue["check_name"]) self.assertEqual(issue["categories"], ["Bug Risk"]) self.assertTrue(issue["fingerprint"]) self.assertTrue(issue["location"]["path"]) self.assertTrue(issue["location"]["lines"]["begin"]) malloc_issues = [i for i in issues if i["check_name"] == "unix.Malloc"] self.assertEqual( malloc_issues, [{ "type": "issue", "check_name": "unix.Malloc", "description": "Memory allocated by alloca() should not be " "deallocated", "categories": ["Bug Risk"], "fingerprint": "c2132f78ef0e01bdb5eacf616048625f", "severity": "minor", "location": { "path": "new_delete.cpp", "lines": { "begin": 31 } } }]) shutil.rmtree(export_dir, ignore_errors=True)
def test_diff_gerrit_output(self): """Test gerrit output. Every report should be in the gerrit review json. """ export_dir = os.path.join(self._local_reports, "export_dir1") env = self._env.copy() env["CC_REPO_DIR"] = '' env["CC_CHANGED_FILES"] = '' get_diff_results( [self._run_names[0]], [self._local_reports], '--new', 'gerrit', ["--url", self._url, "-e", export_dir], env) gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) with open(gerrit_review_file, 'r', encoding="utf-8", errors="ignore") as rw_file: review_data = json.load(rw_file) lbls = review_data["labels"] self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], "CodeChecker found 5 issue(s) in the code.") self.assertEqual(review_data["tag"], "jenkins") comments = review_data["comments"] self.assertEqual(len(comments), 1) file_path = next(iter(comments)) reports = comments[file_path] self.assertEqual(len(reports), 5) for report in reports: self.assertIn("message", report) self.assertIn("range", report) range = report["range"] self.assertIn("start_line", range) self.assertIn("start_character", range) self.assertIn("end_line", range) self.assertIn("end_character", range) shutil.rmtree(export_dir, ignore_errors=True)
def test_source_line_content(self): """ Check that line / file contents are set properly for different output types. """ base_run_name = self._test_runs[0].name new_run_name = self._test_runs[1].name html_reports = os.path.join(self.test_workspace, "html_reports") base_run_names = [base_run_name, new_run_name] new_run_names = [new_run_name, base_run_name] extra_args = [ "--url", self._url, "--file", "*/divide_zero.cpp", "--checker-name", "core.DivideZero", "--output", "plaintext", "html", '--export-dir', html_reports ] # Check plain text output. out, _, _ = \ get_diff_results(base_run_names, new_run_names, '--unresolved', None, extra_args) lines = out.split(os.linesep) for idx, line in enumerate(lines): if '[core.DivideZero]' in line: self.assertTrue(lines[idx + 1].strip(), "Invalid line content") # Check HTML output for file_path in os.listdir(html_reports): with open(os.path.join(html_reports, file_path)) as f: self.assertNotIn(InvalidFileContentMsg, f.read()) shutil.rmtree(html_reports, ignore_errors=True)
def test_local_to_remote_compare_count_new(self): """Count the new results with no filter in local compare mode.""" out = get_diff_results([self._local_reports], [self._run_names[0]], '--new', None, ["--url", self._url]) count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 4)
def test_filter_severity_high_low_text(self): """Get the high and low severity unresolved reports.""" out, _, _ = get_diff_results([self.base_reports], [self.new_reports], '--unresolved', None, ['--severity', 'high', 'low']) self.assertEqual(len(re.findall(r'\[HIGH\]', out)), 18) self.assertEqual(len(re.findall(r'\[LOW\]', out)), 6)
def test_diff_multiple_output(self): """ Test multiple output type for diff command. """ export_dir = os.path.join(self._local_reports, "export_dir3") env = self._env.copy() env["CC_REPO_DIR"] = '' env["CC_CHANGED_FILES"] = '' out, _, _ = get_diff_results([self._run_names[0]], [self._local_reports], '--resolved', None, [ "-o", "html", "gerrit", "plaintext", "-e", export_dir, "--url", self._url ], env) print(out) # Check the plaintext output. count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 4) # Check that the gerrit output json file was generated. gerrit_review_file = os.path.join(export_dir, 'gerrit_review.json') self.assertTrue(os.path.exists(gerrit_review_file)) # Check that index.html output was generated. index_html = os.path.join(export_dir, 'index.html') self.assertTrue(os.path.exists(index_html)) shutil.rmtree(export_dir, ignore_errors=True)
def test_multiple_dir(self): """ Get unresolved reports from muliple local directories. """ unresolved_results, _, _ = get_diff_results( [self.base_reports, self.new_reports], [self.new_reports, self.base_reports], '--unresolved', 'json', ['--severity', 'high', 'low']) self.assertNotEqual(len(unresolved_results), 0)
def test_print_bug_steps(self): """ Test printing the steps the analyzers took. """ out, _, ret = get_diff_results([self.base_reports], [self.new_reports], '--unresolved', None, ['--print-steps']) self.assertTrue("Steps:" in out) self.assertTrue("Report hash:" in out) self.assertEqual(ret, 2)
def test_diff_between_literal_colon_in_name(self): """Count remote diff compared to a name which contains a literal ':' but does not refer to a tag. """ # Name order matters from __init__ ! base_to_new = get_diff_results([self._test_runs[0].name], [self._test_runs[1].name], '--new', 'json', ["--url", self._url]) colon_base_name = self._test_runs[0].name + r"\:base" colon_new_name = self._test_runs[1].name + r"\:new" colon_base_to_new = get_diff_results([colon_base_name], [colon_new_name], '--new', 'json', ["--url", self._url]) self.assertEqual(len(base_to_new[0]), len(colon_base_to_new[0]))
def test_non_existent_reports_directory(self): """Hadles non existent directory well Displays detailed information about base and new directories when any of them are not exist. """ error_output = '' return_code = 0 try: get_diff_results([self.base_reports], ['unexistent-dir-name'], '--new') except subprocess.CalledProcessError as process_error: return_code = process_error.returncode error_output = process_error.stderr self.assertEqual(return_code, 1, "Exit code should be 1 if directory does not exist.") self.assertIn("Failed to get remote runs from server", error_output)
def test_print_bug_steps(self): """ Test printing the steps the analyzers took. """ out, _, ret = get_diff_results([self._run_names[0]], [self._local_reports], '--resolved', None, ["--url", self._url, "--print-steps"]) self.assertTrue("Steps:" in out) self.assertTrue("Report hash:" in out) self.assertEqual(ret, 2)
def test_filter_severity_high_json(self): """Get the high severity new reports. core.StackAddressEscape checker (high severity) was enabled in the new run, those reports should be listed. """ high_severity_res = get_diff_results([self.base_reports], [self.new_reports], '--new', 'json', ['--severity', 'high']) self.assertEqual((len(high_severity_res)), 4)
def test_basename_baseline_file_json(self): """ Get reports based on a baseline file given to the basename option. """ baseline_file_path = create_baseline_file(self.base_reports) # Get new results. new_results, _, _ = get_diff_results([baseline_file_path], [self.new_reports], '--new', 'json') print(new_results) for new_result in new_results: self.assertEqual(new_result['checker_name'], "core.NullDereference") # Get unresolved results. unresolved_results, _, _ = get_diff_results([baseline_file_path], [self.new_reports], '--unresolved', 'json') print(unresolved_results) self.assertTrue( any(r for r in unresolved_results if r['checker_name'] == 'core.DivideZero')) self.assertFalse( any(r for r in unresolved_results if r['checker_name'] == 'core.NullDereference' or r['checker_name'] == 'core.CallAndMessage')) # Get resolved results. resolved_results, err, returncode = get_diff_results( [baseline_file_path], [self.new_reports], '--resolved', 'json') self.assertFalse(resolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err)
def test_filter_severity_high_text(self): """Get the high severity new reports. core.StackAddressEscape checker (high severity) was enabled in the new run, those reports should be listed. """ out, _, _ = get_diff_results([self.base_reports], [self.new_reports], '--new', None, ['--severity', 'high']) print(out) self.assertEqual(len(re.findall(r'\[HIGH\]', out)), 4) self.assertEqual(len(re.findall(r'\[LOW\]', out)), 0)
def test_multiple_runs(self): """ Count the unresolved results in multiple runs without filter. """ base_run_name = self._test_runs[0].name new_run_name = self._test_runs[1].name unresolved_results = \ get_diff_results([base_run_name, new_run_name], [new_run_name, base_run_name], '--unresolved', 'json', ["--url", self._url]) self.assertNotEqual(len(unresolved_results[0]), 0)
def test_local_to_remote_with_baseline_file(self): """ Get reports based on a baseline file given to the basename option. """ baseline_file_path = create_baseline_file(self._local_reports) # Get new reports. new_results, _, returncode = get_diff_results([baseline_file_path], [self._run_names[0]], '--new', 'json', ["--url", self._url]) print(new_results) for report in new_results: self.assertEqual(report['checker_name'], "core.NullDereference") self.assertEqual(returncode, 2) # Get unresolved reports. unresolved_results, err, returncode = get_diff_results( [baseline_file_path], [self._run_names[0]], '--unresolved', 'json', ["--url", self._url]) print(unresolved_results) self.assertTrue(unresolved_results) self.assertFalse( any(r for r in unresolved_results if r['checker_name'] == 'core.CallAndMessage')) self.assertEqual(returncode, 2) # Get resolved reports. resolved_results, err, returncode = get_diff_results( [baseline_file_path], [self._run_names[0]], '--resolved', 'json', ["--url", self._url]) print(resolved_results) self.assertFalse(resolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err)
def test_print_bug_steps(self): """ Test printing the steps the analyzers took. """ base_run_name = self._test_runs[0].name new_run_name = self._test_runs[1].name out, _, ret = get_diff_results([base_run_name], [new_run_name], '--resolved', None, ["--url", self._url, "--print-steps"]) self.assertTrue("Steps:" in out) self.assertTrue("Report hash:" in out) self.assertEqual(ret, 2)
def test_new_json(self): """Get the new reports. core.StackAddressEscape checker was enabled in the new run, those reports should be listed as new. """ new_results = get_diff_results([self.base_reports], [self.new_reports], '--new', 'json') print(new_results) for new_result in new_results: self.assertEqual(new_result['checkerId'], "core.NullDereference")
def test_filter_severity_low_json(self): """Get the low severity new reports. core.StackAddressEscape checker was enabled in the new run, those reports should be listed as new. """ low_severity_res = get_diff_results([self.base_reports], [self.new_reports], '--new', 'json', ['--severity', 'low']) print(low_severity_res) self.assertEqual((len(low_severity_res)), 0)
def test_diff_to_tag(self): """Count remote diff compared to tag.""" report_dir = os.path.join( self._testproject_data['project_path_update'], 'reports') run_name = self._test_runs[2].name out = get_diff_results([f'{run_name}:t1'], [report_dir], '--new', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 5) out = get_diff_results([f'{run_name}:t2'], [report_dir], '--new', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 0) out = get_diff_results([f'{run_name}:t1'], [report_dir], '--unresolved', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 26) out = get_diff_results([f'{run_name}:t2'], [report_dir], '--unresolved', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 31) out = get_diff_results([f'{run_name}:t1'], [report_dir], '--resolved', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 0) out = get_diff_results([f'{run_name}:t2'], [report_dir], '--resolved', 'json', ["--url", self._url]) self.assertEqual(len(out[0]), 0)
def test_local_compare_res_html_output_unresolved(self): """Check that html files will be generated by using diff command.""" html_reports = os.path.join(self._local_reports, "html_reports") get_diff_results( [self._run_names[0]], [self._local_reports], '--unresolved', 'html', ["--url", self._url, '-e', html_reports, "--verbose", "debug"]) checked_files = set() for res in self.get_local_remote_diff(None, 'json'): checked_files.add(os.path.basename(res['file']['path'])) # Check if index.html file was generated. html_index = os.path.join(html_reports, "index.html") self.assertTrue(os.path.exists(html_index)) html_statistics = os.path.join(html_reports, "statistics.html") self.assertTrue(os.path.exists(html_statistics)) # Check that html files were generated for each reports. for html_file_names in os.listdir(html_reports): suffix = html_file_names.rfind("_") file_name = html_file_names[:suffix] \ if suffix != -1 else html_file_names if file_name in ["index.html", "statistics.html"]: continue self.assertIn(file_name, checked_files) # Check reports in the index.html file. index_html = os.path.join(html_reports, 'index.html') divide_zero_count = 0 with open(index_html, 'r', encoding="utf-8", errors="ignore") as f: for line in f: if re.search("core.DivideZero", line): divide_zero_count += 1 self.assertEqual(divide_zero_count, 10)
def test_resolved_json(self): """Get the resolved reports. core.CallAndMessage checker was disabled in the new run, those reports should be listed as resolved. """ resolved_results, _, _ = get_diff_results([self.base_reports], [self.new_reports], '--resolved', 'json') print(resolved_results) for resolved in resolved_results: self.assertEqual(resolved['checkerId'], "core.CallAndMessage")
def test_non_existent_reports_directory(self): """Handles non existent directory well Displays detailed information about base and new directories when any of them are not exist. """ _, error_output, return_code = get_diff_results( [self.base_reports], ['unexistent-dir-name'], '--new', extra_args=['--url', f"localhost:{env.get_free_port()}/Default"]) self.assertEqual(return_code, 1, "Exit code should be 1 if directory does not exist.") self.assertIn("Failed to get remote runs from server", error_output)
def test_cmd_compare_remote_res_count_new_rgx(self): """Count the new results with no filter, use regex in the run name.""" base_run_name = self._test_runs[0].name new_run_name = self._test_runs[1].name # Change test_files_blablabla to test_*_blablabla new_run_name = new_run_name.replace('files', '*') out = get_diff_results([base_run_name], [new_run_name], '--resolved', None, ["--url", self._url], self._env) # 4 disappeared core.CallAndMessage issues count = len(re.findall(r'\[core\.CallAndMessage\]', out[0])) self.assertEqual(count, 4)
def test_remote_to_local_compare_count_new(self): """Count the new results with no filter.""" out = get_diff_results([self._run_names[0]], [self._local_reports], '--new', None, ["--url", self._url]) # 5 new core.CallAndMessage issues. # 1 is suppressed in code count = len(re.findall(r'\[core\.CallAndMessage\]', out)) self.assertEqual(count, 4) # core.NullDereference was disabled in the remote analysis # so no results are new comapared to the local analysis. count = len(re.findall(r'\[core\.NullDereference\]', out)) self.assertEqual(count, 0)
def test_multiple_baseline_file_json(self): """ Test multiple baseline file for basename option. """ baseline_file_paths = [ create_baseline_file(self.base_reports), create_baseline_file(self.new_reports) ] # Get new results. new_results, _, returncode = get_diff_results(baseline_file_paths, [self.new_reports], '--new', 'json') print(new_results) self.assertFalse(new_results) self.assertFalse(returncode) # Get unresolved results. unresolved_results, _, returncode = get_diff_results( baseline_file_paths, [self.new_reports], '--unresolved', 'json') print(unresolved_results) self.assertTrue( any(r for r in unresolved_results if r['checker_name'] == 'core.DivideZero')) # Get resolved results. resolved_results, err, returncode = get_diff_results( baseline_file_paths, [self.new_reports], '--resolved', 'json') print(resolved_results) self.assertFalse(resolved_results) self.assertEqual(returncode, 2) self.assertIn( "Couldn't get local reports for the following baseline report " "hashes: ", err)
def get_local_remote_diff(self, extra_args=None, format_type=None): """Return the unresolved results comparing local to a remote. Returns the text output of the diff command comparing the local reports to a remote run in the database. extra_args: can be used to add list of additional arguments to the diff command. Like filter arguments or to change output format. """ if not extra_args: extra_args = [] return get_diff_results([self._local_reports], [self._run_names[0]], '--unresolved', format_type, ['--url', self._url, *extra_args])[0]