def test_file_existence(self): compile_json = os.path.join(self._test_proj_dir, 'compile_command.json') compile_cmd_data = {} with open(compile_json, mode='rb') as cmpjson: compile_cmd_data = json.load(cmpjson) # Overwrite the directory paths. # This is needed because the tests run on different machines # so the directory path changes in each case. for cmp in compile_cmd_data: cmp['directory'] = self._test_proj_dir zip_file_name = tempfile.mkstemp(suffix='.zip')[1] tu_collector.zip_tu_files(zip_file_name, compile_cmd_data) with zipfile.ZipFile(zip_file_name) as archive: files = archive.namelist() os.remove(zip_file_name) self.assertTrue( any( map(lambda path: path.endswith(os.path.join('/', 'main.cpp')), files))) self.assertTrue( any( map(lambda path: path.endswith(os.path.join('/', 'vector')), files)))
def test_file_existence(self): source_file = os.path.join(self._test_proj_dir, 'main.cpp') build_json = tempfile.mkstemp('.json')[1] proc = subprocess.Popen([ self._codechecker_cmd, 'log', '-b', 'g++ -o /dev/null ' + source_file, '-o', build_json ]) proc.communicate() zip_file_name = tempfile.mkstemp(suffix='.zip')[1] tu_collector.zip_tu_files(zip_file_name, build_json) with zipfile.ZipFile(zip_file_name) as archive: files = archive.namelist() os.remove(build_json) os.remove(zip_file_name) self.assertTrue( any( map(lambda path: path.endswith(os.path.join('/', 'main.cpp')), files))) self.assertTrue( any( map(lambda path: path.endswith(os.path.join('/', 'vector')), files)))
def test_ctu_collection(self): with tempfile.TemporaryDirectory() as ctu_deps_dir: ctu_action = next(filter(lambda ba: ba['file'] == 'ctu.cpp', self.compile_cmd_data)) hash_fun = dict( inspect.getmembers(tu_collector))['__analyzer_action_hash'] with open(os.path.join(ctu_deps_dir, hash_fun(ctu_action)), 'w') \ as f: f.write(os.path.join(self._test_proj_dir, 'zero.cpp')) with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file: tu_collector.zip_tu_files(zip_file.name, self.compile_cmd_data, file_filter='ctu.cpp', ctu_deps_dir=ctu_deps_dir) with zipfile.ZipFile(zip_file.name) as archive: files = archive.namelist() self.assertTrue(any( [path.endswith(os.path.join('/', 'vector')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'ctu.cpp')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'zero.cpp')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'zero.h')) for path in files]))
def test_file_existence(self): zip_file_name = tempfile.mkstemp(suffix='.zip')[1] tu_collector.zip_tu_files(zip_file_name, self.compile_cmd_data) with zipfile.ZipFile(zip_file_name) as archive: files = archive.namelist() os.remove(zip_file_name) self.assertTrue(any( [path.endswith(os.path.join('/', 'main.c')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'main.cpp')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'vector')) for path in files])) self.assertTrue(any( [path.endswith(os.path.join('/', 'hello.c')) for path in files])) self.assertIn('compilation_database.json', files)
def handle_failure(source_analyzer, rh, zip_file, result_base, actions_map): """ If the analysis fails a debug zip is packed together which contains build, analysis information and source files to be able to reproduce the failed analysis. """ other_files = set() action = rh.buildaction try: LOG.debug("Fetching other dependent files from analyzer " "output...") other_files.update( source_analyzer.get_analyzer_mentioned_files(rh.analyzer_stdout)) other_files.update( source_analyzer.get_analyzer_mentioned_files(rh.analyzer_stderr)) except Exception as ex: LOG.debug("Couldn't generate list of other files " "from analyzer output:") LOG.debug(str(ex)) LOG.debug("Collecting debug data") buildactions = [{ 'file': action.source, 'command': action.original_command, 'directory': action.directory }] for of in other_files: mentioned_file = os.path.abspath(os.path.join(action.directory, of)) key = mentioned_file, action.target mentioned_file_action = actions_map.get(key) if mentioned_file_action is not None: buildactions.append({ 'file': mentioned_file_action.source, 'command': mentioned_file_action.original_command, 'directory': mentioned_file_action.directory }) else: LOG.debug("Could not find %s in build actions.", key) tu_collector.zip_tu_files(zip_file, buildactions) # TODO: What about the dependencies of the other_files? tu_collector.add_sources_to_zip( zip_file, map(lambda path: os.path.join(action.directory, path), other_files)) with zipfile.ZipFile(zip_file, 'a') as archive: LOG.debug("[ZIP] Writing analyzer STDOUT to /stdout") archive.writestr("stdout", rh.analyzer_stdout) LOG.debug("[ZIP] Writing analyzer STDERR to /stderr") archive.writestr("stderr", rh.analyzer_stderr) LOG.debug("[ZIP] Writing extra information...") archive.writestr("build-action", action.original_command) archive.writestr("analyzer-command", ' '.join(rh.analyzer_cmd)) archive.writestr("return-code", str(rh.analyzer_returncode)) toolchain = gcc_toolchain.toolchain_in_args( shlex.split(action.original_command)) if toolchain: archive.writestr("gcc-toolchain-path", toolchain) LOG.debug("ZIP file written at '%s'", zip_file) # Remove files that successfully analyzed earlier on. plist_file = result_base + ".plist" if os.path.exists(plist_file): os.remove(plist_file)
def handle_failure(source_analyzer, rh, zip_file, result_base, actions_map): """ If the analysis fails a debug zip is packed together which contains build, analysis information and source files to be able to reproduce the failed analysis. """ other_files = set() action = rh.buildaction try: LOG.debug("Fetching other dependent files from analyzer " "output...") other_files.update( source_analyzer.get_analyzer_mentioned_files(rh.analyzer_stdout)) other_files.update( source_analyzer.get_analyzer_mentioned_files(rh.analyzer_stderr)) except Exception as ex: LOG.debug("Couldn't generate list of other files " "from analyzer output:") LOG.debug(str(ex)) LOG.debug("Collecting debug data") buildactions = [{ 'file': action.source, 'command': action.original_command, 'directory': action.directory }] for of in other_files: mentioned_file = os.path.abspath(os.path.join(action.directory, of)) key = mentioned_file, action.target[action.lang] mentioned_file_action = actions_map.get(key) if mentioned_file_action is not None: buildactions.append({ 'file': mentioned_file_action.source, 'command': mentioned_file_action.original_command, 'directory': mentioned_file_action.directory }) else: LOG.debug("Could not find %s in build actions.", key) from tu_collector import tu_collector tu_collector.zip_tu_files(zip_file, buildactions) # TODO: What about the dependencies of the other_files? tu_collector.add_sources_to_zip( zip_file, [os.path.join(action.directory, path) for path in other_files]) with zipfile.ZipFile(zip_file, 'a') as archive: LOG.debug("[ZIP] Writing analyzer STDOUT to /stdout") archive.writestr("stdout", rh.analyzer_stdout) LOG.debug("[ZIP] Writing analyzer STDERR to /stderr") archive.writestr("stderr", rh.analyzer_stderr) LOG.debug("[ZIP] Writing extra information...") archive.writestr("build-action", action.original_command) archive.writestr("analyzer-command", ' '.join(rh.analyzer_cmd)) archive.writestr("return-code", str(rh.analyzer_returncode)) toolchain = gcc_toolchain.toolchain_in_args( shlex.split(action.original_command)) if toolchain: archive.writestr("gcc-toolchain-path", toolchain) LOG.debug("ZIP file written at '%s'", zip_file) # In case of compiler errors the error message still needs to be collected # from the standard output by this postprocess phase so we can present them # as CodeChecker reports. checks = source_analyzer.config_handler.checks() state = checks.get('clang-diagnostic-error', (CheckerState.default, ''))[0] if state != CheckerState.disabled: rh.postprocess_result() # Remove files that successfully analyzed earlier on. plist_file = result_base + ".plist" if os.path.exists(plist_file): os.remove(plist_file)