def test_invalid_baseline_file_extension(self): """ Test invalid baseline file extension for parse. """ output_path = self.test_workspaces['OUTPUT'] out_file_path = os.path.join(output_path, "cc_reports.invalid") # Analyze the first project. test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") # Try to create baseline file with invalid extension. parse_cmd = [ "CodeChecker", "parse", "-e", "baseline", "-o", out_file_path, test_project_notes ] _, err, result = call_command(parse_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 1) self.assertIn("Baseline files must have '.baseline' extensions", err) # Try to create baseline file in a directory which exists. os.makedirs(output_path) parse_cmd = [ "CodeChecker", "parse", "-e", "baseline", "-o", output_path, test_project_notes ] _, err, result = call_command(parse_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 1) self.assertIn("Please provide a file path instead of a directory", err)
def test_stats_use(self): """ Use the already collected statistics for the analysis. """ if not self.stats_capable: self.skipTest(NO_STATISTICS_MESSAGE) test_project_path = self._testproject_data['project_path'] stats_dir = os.path.join(test_project_path, 'stats') cmd = [self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '-o', 'reports'] out, err = call_command(cmd, cwd=test_project_path, env=self.env) print(out) print(err) analyze_msg = "Starting static analysis" self.assertNotIn(analyze_msg, out) cmd = [self._codechecker_cmd, 'analyze', '--stats-use', stats_dir, 'compile_command.json', '-o', 'reports'] output, err = call_command(cmd, cwd=test_project_path, env=self.env) print(output) print(err) self.assertIn(analyze_msg, output) stat_files = os.listdir(stats_dir) self.assertIn('SpecialReturn.yaml', stat_files) self.assertIn('UncheckedReturn.yaml', stat_files)
def __do_ctu_collect(self): """ Execute CTU collect phase. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-collect'] cmd.append(self.buildlog) call_command(cmd, cwd=self.test_dir, env=self.env)
def test_html_export_exit_code(self): """ Test exit code while HTML output generation. """ test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") extract_cmd = ['CodeChecker', 'parse', '--export', 'html', test_project_macros] out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 1, "HTML parsing requires output directory.") self.assertTrue("export not allowed without argument" in out) output_path = os.path.join(self.test_workspaces['OUTPUT'], 'html') extract_cmd.extend(['--output', output_path]) out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") self.assertTrue('Html file was generated' in out) self.assertTrue('Summary' in out) self.assertTrue('Statistics' in out) skip_file_path = os.path.join(self.test_dir, 'skipall.txt') extract_cmd.extend(["--skip", skip_file_path]) out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Parsing should not found any issue.")
def test_stats_use(self): """ Use the already collected statistics for the analysis. """ if not self.stats_capable: self.skipTest(NO_STATISTICS_MESSAGE) test_project_path = self._testproject_data['project_path'] stats_dir = os.path.join(test_project_path, 'stats') cmd = [ self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '-o', 'reports' ] out, err = call_command(cmd, cwd=test_project_path, env=self.env) print(out) print(err) analyze_msg = "Starting static analysis" self.assertNotIn(analyze_msg, out) cmd = [ self._codechecker_cmd, 'analyze', '--stats-use', stats_dir, 'compile_command.json', '-o', 'reports' ] output, err = call_command(cmd, cwd=test_project_path, env=self.env) print(output) print(err) self.assertIn(analyze_msg, output) stat_files = os.listdir(stats_dir) self.assertIn('SpecialReturn.yaml', stat_files) self.assertIn('UncheckedReturn.yaml', stat_files)
def __do_ctu_collect(self): """ Execute CTU collect phase. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-collect'] cmd.append(self.buildlog) call_command(cmd, cwd=self.test_dir, env=self.env)
def __do_ctu_collect(self, on_demand): """ Execute CTU collect phase. """ cmd = [ self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-collect' ] if on_demand: cmd.append('--ctu-ast-mode=parse-on-demand') cmd.append(self.buildlog) call_command(cmd, cwd=self.test_dir, env=self.env)
def __do_ctu_collect(self, on_demand): """ Execute CTU collect phase. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-collect'] if getattr(self, ON_DEMAND_ATTR): cmd.extend(['--ctu-ast-mode', 'parse-on-demand' if on_demand else 'load-from-pch']) cmd.append(self.buildlog) call_command(cmd, cwd=self.test_dir, env=self.env)
def setUp(self): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + test_workspace) # Get the test project configuration from the prepared test workspace. self._testproject_data = env.setup_test_proj_cfg(test_workspace) self.assertIsNotNone(self._testproject_data) # Get the CodeChecker cmd if needed for the tests. self._codechecker_cmd = env.codechecker_cmd() self.env = env.codechecker_env() # Get if the package is able to collect statistics or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=test_workspace, env=self.env) self.stats_capable = '--stats' in output print("'analyze' reported statistics collector-compatibility? " + str(self.stats_capable)) if not self.stats_capable: try: self.stats_capable = bool( util.strtobool(os.environ['CC_TEST_FORCE_STATS_CAPABLE'])) except (ValueError, KeyError): pass test_project_path = self._testproject_data['project_path'] test_project_build = shlex.split(self._testproject_data['build_cmd']) test_project_clean = shlex.split(self._testproject_data['clean_cmd']) # Clean the test project before logging the compiler commands. output, err = call_command(test_project_clean, cwd=test_project_path, env=self.env) print(output) print(err) # Create compilation log used in the tests. log_cmd = [ self._codechecker_cmd, 'log', '-o', 'compile_command.json', '-b' ] log_cmd.extend(test_project_build) output, err = call_command(log_cmd, cwd=test_project_path, env=self.env) print(output) print(err)
def test_parse_exit_code(self): """ Test exit code of parsing. """ test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") extract_cmd = ['CodeChecker', 'parse', test_project_notes, '--trim-path-prefix', test_project_notes] _, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") skip_file_path = os.path.join(self.test_dir, 'skipall.txt') extract_cmd.extend(["--skip", skip_file_path]) _, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Parsing should not found any issue.")
def setUp(self): """ Set up workspace.""" # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + self.test_workspace) # Get the CodeChecker cmd if needed for the tests. self._codechecker_cmd = env.codechecker_cmd() self.env = env.codechecker_env() self.report_dir = os.path.join(self.test_workspace, 'reports') os.makedirs(self.report_dir) self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files') # Get if clang is CTU-capable or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.ctu_capable = '--ctu-' in output print("'analyze' reported CTU-compatibility? " + str(self.ctu_capable)) # Fix the "template" build JSONs to contain a proper directory # so the tests work. raw_buildlog = os.path.join(self.test_dir, 'buildlog.json') with open(raw_buildlog) as log_file: build_json = json.load(log_file) for command in build_json: command['directory'] = self.test_dir self.__old_pwd = os.getcwd() os.chdir(self.test_workspace) self.buildlog = os.path.join(self.test_workspace, 'buildlog.json') with open(self.buildlog, 'w') as log_file: json.dump(build_json, log_file)
def test_stats_collect_params(self): """ Testing collection parameters """ if not self.stats_capable: self.skipTest(NO_STATISTICS_MESSAGE) test_project_path = self._testproject_data['project_path'] stats_dir = os.path.join(test_project_path, 'stats') cmd = [self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '--stats-min-sample-count', '10', '--stats-relevance-threshold', '0.8', '-o', 'reports'] output, err = call_command(cmd, cwd=test_project_path, env=self.env) print(output) print(err) analyze_msg = "Starting static analysis" self.assertNotIn(analyze_msg, output) stat_files = os.listdir(stats_dir) print(stat_files) self.assertIn('SpecialReturn.yaml', stat_files) self.assertIn('UncheckedReturn.yaml', stat_files) with open(os.path.join(stats_dir, 'UncheckedReturn.yaml'), 'r') as statfile: unchecked_stats = statfile.read() self.assertIn("c:@F@readFromFile#*1C#*C#", unchecked_stats)
def setUp(self): """ Set up workspace.""" # TEST_WORKSPACE is automatically set by test package __init__.py . self.test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + self.test_workspace) # Get the CodeChecker cmd if needed for the tests. self._codechecker_cmd = env.codechecker_cmd() self.env = env.codechecker_env() self.report_dir = os.path.join(self.test_workspace, 'reports') os.makedirs(self.report_dir) self.test_dir = os.path.join(os.path.dirname(__file__), 'test_files') # Get if clang is CTU-capable or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.ctu_capable = '--ctu-' in output print("'analyze' reported CTU-compatibility? " + str(self.ctu_capable)) # Fix the "template" build JSONs to contain a proper directory # so the tests work. raw_buildlog = os.path.join(self.test_dir, 'buildlog.json') with open(raw_buildlog) as log_file: build_json = json.load(log_file) for command in build_json: command['directory'] = self.test_dir self.__old_pwd = os.getcwd() os.chdir(self.test_workspace) self.buildlog = os.path.join(self.test_workspace, 'buildlog.json') with open(self.buildlog, 'w') as log_file: json.dump(build_json, log_file)
def __set_up_test_dir(self, project_path): self.test_dir = project.path(project_path) # Get if clang is CTU-capable or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.ctu_capable = '--ctu-' in output print("'analyze' reported CTU-compatibility? " + str(self.ctu_capable)) self.ctu_has_analyzer_display_ctu_progress = \ host_check.has_analyzer_feature(self.__getClangSaPath(), '-analyzer-display-ctu-progress') print("Has -analyzer-display-ctu-progress? " + str(self.ctu_has_analyzer_display_ctu_progress)) # Fix the "template" build JSONs to contain a proper directory # so the tests work. raw_buildlog = os.path.join(self.test_dir, 'buildlog.json') with open(raw_buildlog) as log_file: build_json = json.load(log_file) for command in build_json: command['directory'] = self.test_dir self.__old_pwd = os.getcwd() os.chdir(self.test_workspace) self.buildlog = os.path.join(self.test_workspace, 'buildlog.json') with open(self.buildlog, 'w') as log_file: json.dump(build_json, log_file)
def test_json_output_for_macros(self): """ Test parse json output for macros. """ test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") extract_cmd = ['CodeChecker', 'parse', "-e", "json", test_project_macros] out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") res = json.loads(out) self.assertEqual(len(res), 1) res = res[0] self.assertIn('check_name', res) self.assertIn('issue_hash_content_of_line_in_context', res) self.assertIn('files', res) self.assertEqual(len(res['files']), 1) self.assertIn('path', res) self.assertTrue(res['path']) self.assertIn('macro_expansions', res) self.assertTrue(res['macro_expansions'])
def __check_ctu_analyze(self, output): """ Check artifacts of CTU analyze phase. """ self.assertNotIn("Failed to analyze", output) self.assertIn("analyzed lib.c successfully", output) self.assertIn("analyzed main.c successfully", output) cmd = [self._codechecker_cmd, 'parse', self.report_dir] output, _, result = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing could not found the expected bug.") self.assertIn("defect(s) in lib.c", output) self.assertIn("no defects in main.c", output) self.assertIn("lib.c:3:", output) self.assertIn("[core.NullDereference]", output) # We assume that only main.c has been analyzed with CTU and it involves # lib.c during its analysis. connections_dir = os.path.join(self.report_dir, 'ctu_connections') connections_files = os.listdir(connections_dir) self.assertEqual(len(connections_files), 1) connections_file = connections_files[0] self.assertTrue(connections_file.startswith('main.c')) with open(os.path.join(connections_dir, connections_file)) as f: self.assertTrue(f.readline().endswith('lib.c'))
def test_stats_collect_params(self): """ Testing collection parameters """ if not self.stats_capable: self.skipTest(NO_STATISTICS_MESSAGE) test_project_path = self._testproject_data['project_path'] stats_dir = os.path.join(test_project_path, 'stats') cmd = [ self._codechecker_cmd, 'analyze', '--stats-collect', stats_dir, 'compile_command.json', '--stats-min-sample-count', '10', '--stats-relevance-threshold', '0.8', '-o', 'reports' ] output, err = call_command(cmd, cwd=test_project_path, env=self.env) print(output) print(err) analyze_msg = "Starting static analysis" self.assertNotIn(analyze_msg, output) stat_files = os.listdir(stats_dir) print(stat_files) self.assertIn('SpecialReturn.yaml', stat_files) self.assertIn('UncheckedReturn.yaml', stat_files) with open(os.path.join(stats_dir, 'UncheckedReturn.yaml'), 'r') as statfile: unchecked_stats = statfile.read() self.assertIn("c:@F@readFromFile#*1C#*C#", unchecked_stats)
def test_json_output_for_notes(self): """ Test parse json output for notes. """ test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") extract_cmd = [ 'CodeChecker', 'parse', "-e", "json", test_project_notes ] out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") res = json.loads(out) reports = res["reports"] self.assertEqual(len(reports), 1) res = reports[0] self.assertIn('checker_name', res) self.assertIn('report_hash', res) self.assertIn('file', res) self.assertIn('bug_path_events', res) self.assertTrue(res['bug_path_events']) self.assertIn('notes', res) self.assertTrue(res['notes'])
def test_codeclimate_output(self): """ Test parse codeclimate output. """ test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") extract_cmd = [ 'CodeChecker', 'parse', "-e", "codeclimate", test_project_notes, '--trim-path-prefix', test_project_notes ] out, _ = call_command(extract_cmd, cwd=self.test_dir, env=self.env) res = json.loads(out) self.assertEqual(res, [{ 'type': 'issue', 'check_name': 'alpha.clone.CloneChecker', 'description': 'Duplicate code detected', 'categories': ['Bug Risk'], 'fingerprint': '3d15184f38c5fa57e479b744fe3f5035', 'location': { 'path': 'notes.cpp', 'lines': { 'begin': 3 } } }])
def test_json_output_for_notes(self): """ Test parse json output for notes. """ test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") extract_cmd = [ 'CodeChecker', 'parse', "-e", "json", test_project_notes ] out, _ = call_command(extract_cmd, cwd=self.test_dir, env=self.env) res = json.loads(out) self.assertEqual(len(res), 1) res = res[0] self.assertIn('check_name', res) self.assertIn('issue_hash_content_of_line_in_context', res) self.assertIn('files', res) self.assertEqual(len(res['files']), 1) self.assertIn('path', res) self.assertTrue(res['path']) self.assertIn('notes', res) self.assertTrue(res['notes'])
def test_ctu_makefile_generation(self): """ Test makefile generation in CTU mode. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu', '--makefile'] cmd.append(self.buildlog) call_command(cmd, cwd=self.test_dir, env=self.env) call_command(["make"], cwd=self.report_dir, env=self.env) # Check the output. cmd = [self._codechecker_cmd, 'parse', self.report_dir] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertIn("defect(s) in lib.c", output) self.assertIn("lib.c:3:", output) self.assertIn("[core.NullDereference]", output)
def __set_up_test_dir(self, project_path): self.test_dir = project.path(project_path) # Get if clang is CTU-capable or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.ctu_capable = '--ctu-' in output print("'analyze' reported CTU-compatibility? " + str(self.ctu_capable)) self.ctu_has_analyzer_display_ctu_progress = \ host_check.has_analyzer_config_option(self.__getClangSaPath(), 'display-ctu-progress', self.env) print("Has display-ctu-progress=true? " + str(self.ctu_has_analyzer_display_ctu_progress)) # Fix the "template" build JSONs to contain a proper directory # so the tests work. raw_buildlog = os.path.join(self.test_dir, 'buildlog.json') with open(raw_buildlog, encoding="utf-8", errors="ignore") as log_file: build_json = json.load(log_file) for command in build_json: command['directory'] = self.test_dir self.__old_pwd = os.getcwd() os.chdir(self.test_workspace) self.buildlog = os.path.join(self.test_workspace, 'buildlog.json') with open(self.buildlog, 'w', encoding="utf-8", errors="ignore") as log_file: json.dump(build_json, log_file)
def test_html_checker_url(self): """ Test whether checker documentation urls are generated properly. """ with tempfile.TemporaryDirectory() as tmp_dir: notes_plist = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes", "notes.plist") macros_plist = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros", "macros.plist") shutil.copy(notes_plist, tmp_dir) shutil.copy(macros_plist, tmp_dir) macros_plist = os.path.join(tmp_dir, 'macros.plist') with open(macros_plist, 'r+', encoding="utf-8", errors="ignore") as f: content = f.read() new_content = content.replace("core.NullDereference", "UNKNOWN CHECKER NAME") f.seek(0) f.truncate() f.write(new_content) output_path = os.path.join(tmp_dir, 'html') extract_cmd = [ 'CodeChecker', 'parse', '-e', 'html', '-o', output_path, tmp_dir ] _, err, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") self.assertFalse(err) # Test whether documentation urls are set properly for known # checkers in the index.html file. index_html = os.path.join(output_path, "index.html") with open(index_html, 'r', encoding="utf-8", errors="ignore") as f: content = f.read() self.assertTrue( re.search('<a href=".*>alpha.clone.CloneChecker', content)) self.assertFalse( re.search('<a href=".*>UNKNOWN CHECKER NAME', content)) self.assertTrue(re.search('UNKNOWN CHECKER NAME', content)) # Test whether documentation urls are set properly for known # checkers in the generated HTML report file. report_html = os.path.join(output_path, "notes.plist.html") with open(report_html, 'r', encoding="utf-8", errors="ignore") as f: content = f.read() self.assertTrue(re.search('"url": ".+"', content)) # Test whether documentation urls are not set for unknown checkers # in the generated HTML report file. report_html = os.path.join(output_path, "macros.plist.html") with open(report_html, 'r', encoding="utf-8", errors="ignore") as f: content = f.read() self.assertTrue(re.search('"url": null', content))
def __do_ctu_analyze(self): """ Execute CTU analyze phase. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-analyze'] cmd.append(self.buildlog) out, _ = call_command(cmd, cwd=self.test_dir, env=self.env) return out
def __getClangSaPath(self): cmd = [self._codechecker_cmd, 'analyzers', '--details', '-o', 'json'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) json_data = json.loads(output) if json_data[0]["name"] == "clangsa": return json_data[0]["path"] if json_data[1]["name"] == "clangsa": return json_data[1]["path"]
def __do_ctu_analyze(self): """ Execute CTU analyze phase. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-analyze'] cmd.append(self.buildlog) out, _ = call_command(cmd, cwd=self.test_dir, env=self.env) return out
def __getClangSaPath(self): cmd = [self._codechecker_cmd, 'analyzers', '--details', '-o', 'json'] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) json_data = json.loads(output) if json_data[0]["name"] == "clangsa": return json_data[0]["path"] if json_data[1]["name"] == "clangsa": return json_data[1]["path"]
def test_ctu_ondemand_yaml_format(self): """ Test the generated YAML used in CTU on-demand mode. The YAML file should not contain newlines in individual entries in the generated textual format. """ # Copy test files to a directory which file path will be longer than # 128 chars to test the yaml parser. test_dir = os.path.join( self.test_workspace, os.path.join( *[''.join('0' for _ in range(43)) for _ in range(0, 3)])) shutil.copytree(self.test_dir, test_dir) complex_buildlog = os.path.join(test_dir, 'complex_buildlog.json') shutil.copy(self.complex_buildlog, complex_buildlog) env.adjust_buildlog('complex_buildlog.json', test_dir, test_dir) cmd = [ self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-collect', # ctu-directory is needed, and it remains # intact only if a single ctu-phase is # specified '--ctu-ast-mode', 'parse-on-demand', complex_buildlog ] _, _, result = call_command(cmd, cwd=test_dir, env=self.env) self.assertEqual(result, 0, "Analyzing failed.") ctu_dir = os.path.join(self.report_dir, 'ctu-dir') # In order to be architecture-invariant, ctu directory is searched for # invocation list files. invocation_list_paths = list( glob.glob(os.path.join(ctu_dir, '*', 'invocation-list.yml'))) # At least one invocation list should exist. self.assertGreaterEqual(len(invocation_list_paths), 1) # Assert that every line begins with either - or / to approximate that # the line is not a line-broken list entry. If there is no newline in # the textual representation, then every line either starts with a / # (if it is an absolute path posing as a key) or - (if it is a list # entry). This requirement of format is a workaround for the LLVM YAML # parser. def assert_no_linebreak(invocation_list_file: IO): invocation_lines = invocation_list_file.readlines() for line in invocation_lines: self.assertRegex(line, '^ *[-/]') for invocation_list_path in invocation_list_paths: with open(invocation_list_path) as invocation_list_file: assert_no_linebreak(invocation_list_file)
def __getClangSaPath(self): cmd = [self._codechecker_cmd, 'analyzers', '--details', '-o', 'json'] output, _, result = call_command(cmd, cwd=self.test_workspace, env=self.env) self.assertEqual(result, 0, "Failed to run analyzer.") json_data = json.loads(output) if json_data[0]["name"] == "clangsa": return json_data[0]["path"] if json_data[1]["name"] == "clangsa": return json_data[1]["path"]
def test_baseline_output(self): """ Test parse baseline output. """ output_path = self.test_workspaces['OUTPUT'] out_file_path = os.path.join(output_path, "reports.baseline") # Analyze the first project. test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") extract_cmd = [ 'CodeChecker', 'parse', "-e", "baseline", "-o", out_file_path, test_project_notes, '--trim-path-prefix', test_project_notes ] _, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") report_hashes = baseline.get_report_hashes([out_file_path]) self.assertEqual(report_hashes, {'3d15184f38c5fa57e479b744fe3f5035'}) # Analyze the second project and see whether the baseline file is # merged. test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") extract_cmd = [ 'CodeChecker', 'parse', "-e", "baseline", "-o", out_file_path, test_project_macros, '--trim-path-prefix', test_project_macros ] _, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing not found any issue.") report_hashes = baseline.get_report_hashes([out_file_path]) self.assertSetEqual(report_hashes, { '3d15184f38c5fa57e479b744fe3f5035', 'f8fbc46cc5afbb056d92bd3d3d702781' })
def test_ctu_makefile_generation(self): """ Test makefile generation in CTU mode. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu', '--makefile'] cmd.append(self.buildlog) _, _, result = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Analyzing failed.") _, _, result = call_command(["make"], cwd=self.report_dir, env=self.env) self.assertEqual(result, 0, "Performing generated Makefile failed.") # Check the output. cmd = [self._codechecker_cmd, 'parse', self.report_dir] output, _, result = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 2, "Parsing could not found the expected bug.") self.assertIn("defect(s) in lib.c", output) self.assertIn("lib.c:3:", output) self.assertIn("[core.NullDereference]", output)
def test_codeclimate_export_exit_code(self): """ Test exporting codeclimate output into the filesystem. """ test_project_notes = os.path.join(self.test_workspaces['NORMAL'], "test_files", "notes") skip_file_path = os.path.join(self.test_dir, 'skipall.txt') extract_cmd = ['CodeChecker', 'parse', "--export", "codeclimate", test_project_notes, "--skip", skip_file_path, '--trim-path-prefix', test_project_notes] _, _, result = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertEqual(result, 0, "Parsing should not found any issue.")
def __do_ctu_all(self, on_demand): """ Execute a full CTU run. """ cmd = [ self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-all' ] if on_demand: cmd.append('--ctu-ast-mode=parse-on-demand') cmd.append(self.buildlog) out, _ = call_command(cmd, cwd=self.test_dir, env=self.env) return out
def test_gerrit_output(self): """ Test gerrit output of the parse command. """ env = self.env.copy() report_url = "localhost:8080/index.html" env["CC_REPORT_URL"] = report_url changed_file_path = os.path.join(self.test_dir, 'files_changed') with open(changed_file_path, 'w', encoding="utf-8", errors="ignore") as changed_file: # Print some garbage value to the file. changed_file.write(")]}'\n") changed_files = { "/COMMIT_MSG": {}, "macros.cpp": {}} changed_file.write(json.dumps(changed_files)) env["CC_CHANGED_FILES"] = changed_file_path test_project_macros = os.path.join(self.test_workspaces['NORMAL'], "test_files", "macros") env["CC_REPO_DIR"] = test_project_macros extract_cmd = ['CodeChecker', 'parse', test_project_macros, '-e', 'gerrit'] print(" ".join(extract_cmd)) out, _, result = call_command(extract_cmd, cwd=self.test_dir, env=env) self.assertEqual(result, 2, "Parsing not found any issue.") print(out) review_data = json.loads(out) lbls = review_data["labels"] self.assertEqual(lbls["Verified"], -1) self.assertEqual(lbls["Code-Review"], -1) self.assertEqual(review_data["message"], "CodeChecker found 1 issue(s) in the code. " "See: '{0}'".format(report_url)) self.assertEqual(review_data["tag"], "jenkins") # Because the CC_CHANGED_FILES is set we will see reports only for # the macro.cpp file. comments = review_data["comments"] self.assertEqual(len(comments), 1) reports = comments["macros.cpp"] self.assertEqual(len(reports), 1) os.remove(changed_file_path)
def __check_ctu_analyze(self, output): """ Check artifacts of CTU analyze phase. """ self.assertNotIn("Failed to analyze", output) self.assertIn("analyzed lib.c successfully", output) self.assertIn("analyzed main.c successfully", output) cmd = [self._codechecker_cmd, 'parse', self.report_dir] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertIn("defect(s) in lib.c", output) self.assertIn("no defects in main.c", output) self.assertIn("lib.c:3:", output) self.assertIn("[core.NullDereference]", output)
def __do_ctu_all(self, on_demand): """ Execute a full CTU run. """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-all'] if getattr(self, ON_DEMAND_ATTR): cmd.extend(['--ctu-ast-mode', 'parse-on-demand' if on_demand else 'load-from-pch']) cmd.append(self.buildlog) out, _ = call_command(cmd, cwd=self.test_dir, env=self.env) return out
def __check_ctu_analyze(self, output): """ Check artifacts of CTU analyze phase. """ self.assertNotIn("Failed to analyze", output) self.assertIn("analyzed lib.c successfully", output) self.assertIn("analyzed main.c successfully", output) cmd = [self._codechecker_cmd, 'parse', self.report_dir] output, _ = call_command(cmd, cwd=self.test_dir, env=self.env) self.assertIn("defect(s) in lib.c", output) self.assertIn("no defects in main.c", output) self.assertIn("lib.c:3:", output) self.assertIn("[core.NullDereference]", output)
def test_invalid_plist_file(self): """ Test parsing invalid plist file. """ invalid_plist_file = os.path.join(self.test_workspaces['NORMAL'], "test_files", "invalid.plist") with open(invalid_plist_file, "w+", encoding="utf-8", errors="ignore") as invalid_plist_f: invalid_plist_f.write("Invalid plist file.") extract_cmd = ['CodeChecker', 'parse', invalid_plist_file] out, _ = call_command(extract_cmd, cwd=self.test_dir, env=self.env) self.assertTrue("Invalid plist file" in out)
def setUp(self): # TEST_WORKSPACE is automatically set by test package __init__.py . test_workspace = os.environ['TEST_WORKSPACE'] test_class = self.__class__.__name__ print('Running ' + test_class + ' tests in ' + test_workspace) # Get the test project configuration from the prepared test workspace. self._testproject_data = env.setup_test_proj_cfg(test_workspace) self.assertIsNotNone(self._testproject_data) # Get the CodeChecker cmd if needed for the tests. self._codechecker_cmd = env.codechecker_cmd() self.env = env.codechecker_env() # Get if the package is able to collect statistics or not. cmd = [self._codechecker_cmd, 'analyze', '-h'] output, _ = call_command(cmd, cwd=test_workspace, env=self.env) self.stats_capable = '--stats' in output print("'analyze' reported statistics collector-compatibility? " + str(self.stats_capable)) test_project_path = self._testproject_data['project_path'] test_project_build = shlex.split(self._testproject_data['build_cmd']) # Create compilation log used in the tests. log_cmd = [self._codechecker_cmd, 'log', '-o', 'compile_command.json', '-b'] log_cmd.extend(test_project_build) output, err = call_command(log_cmd, cwd=test_project_path, env=self.env) print(output) print(err)
def __do_ctu_all(self, reparse, extra_args=None): """ Execute a full CTU run. @param extra_args: list of additional arguments """ cmd = [self._codechecker_cmd, 'analyze', '-o', self.report_dir, '--analyzers', 'clangsa', '--ctu-all'] if reparse: cmd.append('--ctu-on-the-fly') if extra_args is not None: cmd.extend(extra_args) cmd.append(self.buildlog) out, _ = call_command(cmd, cwd=self.test_dir, env=self.env) return out
def test_stats(self): """ Enable statistics collection for the analysis. """ if not self.stats_capable: self.skipTest(NO_STATISTICS_MESSAGE) test_project_path = self._testproject_data['project_path'] cmd = [self._codechecker_cmd, 'analyze', '-o', 'reports', '--stats', 'compile_command.json'] output, err = call_command(cmd, cwd=test_project_path, env=self.env) print(output) print(err) collect_msg = "Collecting data for statistical analysis." self.assertIn(collect_msg, output)