def construct_config_handler(cls, args, context): environ = env.extend(context.path_env_extra, context.ld_lib_path_extra) handler = config_handler.ClangSAConfigHandler(environ) handler.analyzer_plugins_dir = context.checker_plugin handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) handler.compiler_resource_dir = \ host_check.get_resource_dir(handler.analyzer_binary, context) handler.report_hash = args.report_hash \ if 'report_hash' in args else None handler.enable_z3 = 'enable_z3' in args and args.enable_z3 == 'on' handler.enable_z3_refutation = 'enable_z3_refutation' in args and \ args.enable_z3_refutation == 'on' if 'ctu_phases' in args: handler.ctu_dir = os.path.join(args.output_path, args.ctu_dir) handler.log_file = args.logfile handler.path_env_extra = context.path_env_extra handler.ld_lib_path_extra = context.ld_lib_path_extra try: with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', env.replace_env_var(args.clangsa_args_cfg_file), sa_cfg.read().strip()) handler.analyzer_extra_arguments = \ shlex.split(handler.analyzer_extra_arguments) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clangsa arguments file was given in the command line. LOG.debug_analyzer(aerr) checkers = ClangSA.get_analyzer_checkers(handler, environ) # Read clang-sa checkers from the config file. clang_sa_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_sa_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def pre_analyze(params): action, context, analyzer_config_map, skip_handler, \ ctu_data, statistics_data = params analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) progress_checked_num.value += 1 if skip_handler and skip_handler.should_skip(action.source): return if action.analyzer_type != ClangSA.ANALYZER_NAME: return _, source_filename = os.path.split(action.source) LOG.info("[%d/%d] %s", progress_checked_num.value, progress_actions.value, source_filename) config = analyzer_config_map.get(ClangSA.ANALYZER_NAME) try: if ctu_data: LOG.debug("running CTU pre analysis") ctu_temp_fnmap_folder = ctu_data.get('ctu_temp_fnmap_folder') ctu_func_map_cmd = ctu_data.get('ctu_func_map_cmd') triple_arch = \ ctu_triple_arch.get_triple_arch(action, action.source, config, analyzer_environment) ctu_manager.generate_ast(triple_arch, action, action.source, config, analyzer_environment) ctu_manager.map_functions(triple_arch, action, action.source, config, analyzer_environment, ctu_func_map_cmd, ctu_temp_fnmap_folder) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise try: if statistics_data: LOG.debug("running statistics pre analysis") collect_statistics(action, action.source, config, analyzer_environment, statistics_data) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise
def is_z3_capable(context): """ Detects if the current clang is Z3 compatible. """ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) return host_check.has_analyzer_option( analyzer_binary, ['-Xclang', '-analyzer-constraints=z3'], analyzer_env)
def __print_checker_config(args: argparse.Namespace): """ Print checker config options according to the command line arguments to the standard output. The set of config options comes from the analyzers. """ if args.output_format == 'custom': args.output_format = 'rows' context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_types.check_available_analyzers(working_analyzers, errored) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) rows = [] analyzer_failures = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config(config_handler, analyzer_environment) if not configs: analyzer_failures.append(analyzer) continue rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) if rows: print(twodim.to_str(args.output_format, header, rows)) analyzer_types.print_unsupported_analyzers(errored) if analyzer_failures: LOG.error( "Failed to get checker configuration options for '%s' " "analyzer(s)! Please try to upgrade your analyzer " "version to use this feature.", ', '.join(analyzer_failures)) sys.exit(1)
def get_ctu_dir(self): """ Returns the path of the ctu directory (containing the triple). """ config = self.config_handler environ = env.extend(config.path_env_extra, config.ld_lib_path_extra) triple_arch = ctu_triple_arch.get_triple_arch(self.buildaction, self.source_file, config, environ) ctu_dir = os.path.join(config.ctu_dir, triple_arch) return ctu_dir
def is_z3_refutation_capable(context): """ Detects if the current clang is Z3 refutation compatible. """ check_supported_analyzers([ClangSA.ANALYZER_NAME], context) analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) return host_check.has_analyzer_config_option(analyzer_binary, 'crosscheck-with-z3', analyzer_env)
def is_statistics_capable(context): """ Detects if the current clang is Statistics compatible. """ # Resolve potentially missing binaries. check_supported_analyzers([ClangSA.ANALYZER_NAME], context) clangsa_cfg = ClangSA.construct_config_handler([], context) check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangSA.get_analyzer_checkers(clangsa_cfg, check_env) stat_checkers_pattern = re.compile(r'.+statisticscollector.+') for checker_name, _ in checkers: if stat_checkers_pattern.match(checker_name): return True return False
def is_z3_refutation_capable(context): """ Detects if the current clang is Z3 refutation compatible. """ # This function basically checks whether the corresponding analyzer config # option exists i.e. it is visible on analyzer config option help page. # However, it doesn't mean that Clang itself is compiled with Z3. if not is_z3_capable(context): return False check_supported_analyzers([ClangSA.ANALYZER_NAME], context) analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) return host_check.has_analyzer_config_option(analyzer_binary, 'crosscheck-with-z3', analyzer_env)
def construct_config_handler(cls, args, context): handler = config_handler.ClangTidyConfigHandler() handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) # FIXME We cannot get the resource dir from the clang-tidy binary, # therefore we get a sibling clang binary which of clang-tidy. # TODO Support "clang-tidy -print-resource-dir" . check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) # Overwrite PATH to contain only the parent of the clang binary. if os.path.isabs(handler.analyzer_binary): check_env['PATH'] = os.path.dirname(handler.analyzer_binary) clang_bin = ClangSA.resolve_missing_binary('clang', check_env) handler.compiler_resource_dir = \ host_check.get_resource_dir(clang_bin, context) try: with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', env.replace_env_var, tidy_cfg.read().strip()) handler.analyzer_extra_arguments = \ shlex.split(handler.analyzer_extra_arguments) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy arguments file was given in the command line. LOG.debug_analyzer(aerr) try: # The config file dumped by clang-tidy contains "..." at the end. # This has to be emitted, otherwise -config flag of clang-tidy # cannot consume it. with open(args.tidy_config, 'rb') as tidy_config: lines = tidy_config.readlines() lines = filter(lambda x: x != '...\n', lines) handler.checker_config = ''.join(lines) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy config file was given in the command line. LOG.debug_analyzer(aerr) check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangTidy.get_analyzer_checkers(handler, check_env) # Read clang-tidy checkers from the config file. clang_tidy_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_tidy_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def construct_config_handler(cls, args, context): handler = config_handler.ClangTidyConfigHandler() handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) handler.report_hash = args.report_hash \ if 'report_hash' in args else None # FIXME We cannot get the resource dir from the clang-tidy binary, # therefore we get a sibling clang binary which of clang-tidy. # TODO Support "clang-tidy -print-resource-dir" . check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) # Overwrite PATH to contain only the parent of the clang binary. if os.path.isabs(handler.analyzer_binary): check_env['PATH'] = os.path.dirname(handler.analyzer_binary) clang_bin = ClangSA.resolve_missing_binary('clang', check_env) handler.compiler_resource_dir = \ host_check.get_resource_dir(clang_bin, context) try: with open(args.tidy_args_cfg_file, 'r', encoding='utf-8', errors='ignore') as tidy_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', env.replace_env_var, tidy_cfg.read().strip()) handler.analyzer_extra_arguments = \ shlex.split(handler.analyzer_extra_arguments) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy arguments file was given in the command line. LOG.debug_analyzer(aerr) analyzer_config = {} # TODO: This extra "isinsrance" check is needed for # CodeChecker analyzers --analyzer-config. This command also # runs this function in order to construct a config handler. if 'analyzer_config' in args and \ isinstance(args.analyzer_config, list): r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)') for cfg in args.analyzer_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: analyzer_config[m.group('key')] = m.group('value') # TODO: This extra "isinsrance" check is needed for # CodeChecker checkers --checker-config. This command also # runs this function in order to construct a config handler. if 'checker_config' in args and \ isinstance(args.checker_config, list): r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)') check_options = [] for cfg in args.checker_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: check_options.append({ 'key': m.group('key'), 'value': m.group('value') }) analyzer_config['CheckOptions'] = check_options else: try: with open(args.tidy_config, 'r', encoding='utf-8', errors='ignore') as tidy_config: handler.checker_config = tidy_config.read() except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy config file was given in the command line. LOG.debug_analyzer(aerr) # 'take-config-from-directory' is a special option which let the user # to use the '.clang-tidy' config files. It will disable analyzer and # checker configuration options. if not handler.checker_config and \ analyzer_config.get('take-config-from-directory') != 'true': handler.checker_config = json.dumps(analyzer_config) check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangTidy.get_analyzer_checkers(handler, check_env) # Read clang-tidy checkers from the config file. clang_tidy_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_tidy_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def construct_config_handler(cls, args, context): handler = config_handler.ClangTidyConfigHandler() handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) handler.report_hash = args.report_hash \ if 'report_hash' in args else None # FIXME We cannot get the resource dir from the clang-tidy binary, # therefore we get a sibling clang binary which of clang-tidy. # TODO Support "clang-tidy -print-resource-dir" . check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) # Overwrite PATH to contain only the parent of the clang binary. if os.path.isabs(handler.analyzer_binary): check_env['PATH'] = os.path.dirname(handler.analyzer_binary) try: with open(args.tidy_args_cfg_file, 'r', encoding='utf-8', errors='ignore') as tidy_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', env.replace_env_var, tidy_cfg.read().strip()) handler.analyzer_extra_arguments = \ shlex.split(handler.analyzer_extra_arguments) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy arguments file was given in the command line. LOG.debug_analyzer(aerr) analyzer_config = {} # TODO: This extra "isinsrance" check is needed for # CodeChecker analyzers --analyzer-config. This command also # runs this function in order to construct a config handler. if 'analyzer_config' in args and \ isinstance(args.analyzer_config, list): r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)') for cfg in args.analyzer_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: analyzer_config[m.group('key')] = m.group('value') # If both --analyzer-config and -config (in --tidyargs) is given then # these need to be merged. Since "HeaderFilterRegex" has a default # value in --analyzer-config, we take --tidyargs stronger so user can # overwrite its value. for i, extra_arg in enumerate(handler.analyzer_extra_arguments): if not extra_arg.startswith('-config'): continue # -config flag can be together or separate from its argument: # "-config blabla" vs. "-config=blabla" if extra_arg == '-config': arg = handler.analyzer_extra_arguments[i + 1] arg_num = 2 else: arg = extra_arg[len('-config='):] arg_num = 1 analyzer_config.update(json.loads(arg)) del handler.analyzer_extra_arguments[i:i + arg_num] break # TODO: This extra "isinsrance" check is needed for # CodeChecker checkers --checker-config. This command also # runs this function in order to construct a config handler. if 'checker_config' in args and isinstance(args.checker_config, list): r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)') check_options = [] for cfg in args.checker_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: check_options.append({'key': m.group('key'), 'value': m.group('value')}) analyzer_config['CheckOptions'] = check_options else: try: with open(args.tidy_config, 'r', encoding='utf-8', errors='ignore') as tidy_config: handler.checker_config = tidy_config.read() except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy config file was given in the command line. LOG.debug_analyzer(aerr) # 'take-config-from-directory' is a special option which let the user # to use the '.clang-tidy' config files. It will disable analyzer and # checker configuration options. if not handler.checker_config and \ analyzer_config.get('take-config-from-directory') != 'true': handler.checker_config = json.dumps(analyzer_config) check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangTidy.get_analyzer_checkers(handler, check_env) try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer('No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = [] handler.initialize_checkers( context, checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def main(args): """ Perform analysis on the given logfiles and store the results in a machine- readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) check_config_file(args) if not os.path.exists(args.logfile): LOG.error("The specified logfile '%s' does not exist!", args.logfile) sys.exit(1) args.output_path = os.path.abspath(args.output_path) if os.path.exists(args.output_path) and \ not os.path.isdir(args.output_path): LOG.error("The given output path is not a directory: " + args.output_path) sys.exit(1) if 'enable_all' in args: LOG.info("'--enable-all' was supplied for this analysis.") # We clear the output directory in the following cases. ctu_dir = os.path.join(args.output_path, 'ctu-dir') if 'ctu_phases' in args and args.ctu_phases[0] and \ os.path.isdir(ctu_dir): # Clear the CTU-dir if the user turned on the collection phase. LOG.debug("Previous CTU contents have been deleted.") shutil.rmtree(ctu_dir) if 'clean' in args and os.path.isdir(args.output_path): LOG.info( "Previous analysis results in '%s' have been removed, " "overwriting with current result", args.output_path) shutil.rmtree(args.output_path) if not os.path.exists(args.output_path): os.makedirs(args.output_path) LOG.debug("args: " + str(args)) LOG.debug("Output will be stored to: '" + args.output_path + "'") config_option_re = re.compile(r'^({}):.+=.+$'.format('|'.join( analyzer_types.supported_analyzers))) # Check the format of analyzer options. if 'analyzer_config' in args: for config in args.analyzer_config: if not re.match(config_option_re, config): LOG.error("Analyzer option in wrong format: %s", config) sys.exit(1) # Check the format of checker options. if 'checker_config' in args: for config in args.checker_config: if not re.match(config_option_re, config): LOG.error("Checker option in wrong format: %s", config) sys.exit(1) compile_commands = load_json_or_empty(args.logfile, default={}) # Process the skip list if present. skip_handler = __get_skip_handler(args, compile_commands) # Enable alpha uniqueing by default if ctu analysis is used. if 'none' in args.compile_uniqueing and 'ctu_phases' in args: args.compile_uniqueing = "alpha" compiler_info_file = None if 'compiler_info_file' in args: LOG.debug("Compiler info is read from: %s", args.compiler_info_file) if not os.path.exists(args.compiler_info_file): LOG.error("Compiler info file %s does not exist", args.compiler_info_file) sys.exit(1) compiler_info_file = args.compiler_info_file ctu_or_stats_enabled = False # Skip list is applied only in pre-analysis # if --ctu-collect was called explicitly. pre_analysis_skip_handler = None if 'ctu_phases' in args: ctu_collect = args.ctu_phases[0] ctu_analyze = args.ctu_phases[1] if ctu_collect and not ctu_analyze: pre_analysis_skip_handler = skip_handler if ctu_collect or ctu_analyze: ctu_or_stats_enabled = True # Skip list is applied only in pre-analysis # if --stats-collect was called explicitly. if 'stats_output' in args and args.stats_output: pre_analysis_skip_handler = skip_handler ctu_or_stats_enabled = True if 'stats_enabled' in args and args.stats_enabled: ctu_or_stats_enabled = True context = analyzer_context.get_context() analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) # Number of all the compilation commands in the parsed log files, # logged by the logger. all_cmp_cmd_count = len(compile_commands) actions, skipped_cmp_cmd_count = log_parser.parse_unique_log( compile_commands, args.output_path, args.compile_uniqueing, compiler_info_file, args.keep_gcc_include_fixed, args.keep_gcc_intrin, skip_handler, pre_analysis_skip_handler, ctu_or_stats_enabled, analyzer_env) if not actions: LOG.info("No analysis is required.\nThere were no compilation " "commands in the provided compilation database or " "all of them were skipped.") sys.exit(0) uniqued_compilation_db_file = os.path.join(args.output_path, "unique_compile_commands.json") with open(uniqued_compilation_db_file, 'w', encoding="utf-8", errors="ignore") as f: json.dump(actions, f, cls=log_parser.CompileCommandEncoder) metadata = { 'version': 2, 'tools': [{ 'name': 'codechecker', 'action_num': len(actions), 'command': sys.argv, 'version': "{0} ({1})".format(context.package_git_tag, context.package_git_hash), 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {}, 'analyzers': {} }] } metadata_tool = metadata['tools'][0] if 'name' in args: metadata_tool['run_name'] = args.name # Update metadata dictionary with old values. metadata_file = os.path.join(args.output_path, 'metadata.json') metadata_prev = None if os.path.exists(metadata_file): metadata_prev = load_json_or_empty(metadata_file) metadata_tool['result_source_files'] = \ __get_result_source_files(metadata_prev) CompileCmdParseCount = \ collections.namedtuple('CompileCmdParseCount', 'total, analyze, skipped, removed_by_uniqueing') cmp_cmd_to_be_uniqued = all_cmp_cmd_count - skipped_cmp_cmd_count # Number of compile commands removed during uniqueing. removed_during_uniqueing = cmp_cmd_to_be_uniqued - len(actions) all_to_be_analyzed = cmp_cmd_to_be_uniqued - removed_during_uniqueing compile_cmd_count = CompileCmdParseCount( total=all_cmp_cmd_count, analyze=all_to_be_analyzed, skipped=skipped_cmp_cmd_count, removed_by_uniqueing=removed_during_uniqueing) LOG.debug_analyzer( "Total number of compile commands without " "skipping or uniqueing: %d", compile_cmd_count.total) LOG.debug_analyzer("Compile commands removed by uniqueing: %d", compile_cmd_count.removed_by_uniqueing) LOG.debug_analyzer("Compile commands skipped during log processing: %d", compile_cmd_count.skipped) LOG.debug_analyzer("Compile commands forwarded for analysis: %d", compile_cmd_count.analyze) analyzer.perform_analysis(args, skip_handler, context, actions, metadata_tool, compile_cmd_count) __update_skip_file(args) __cleanup_metadata(metadata_prev, metadata) LOG.debug("Analysis metadata write to '%s'", metadata_file) with open(metadata_file, 'w', encoding="utf-8", errors="ignore") as metafile: json.dump(metadata, metafile) # WARN: store command will search for this file!!!! compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json') try: source = os.path.abspath(args.logfile) target = os.path.abspath(compile_cmd_json) if source != target: shutil.copyfile(source, target) except shutil.Error: LOG.debug("Compilation database JSON file is the same.") except Exception: LOG.debug("Copying compilation database JSON file failed.") try: # pylint: disable=no-name-in-module from codechecker_analyzer import analyzer_statistics analyzer_statistics.collect(metadata, "analyze") except Exception: pass
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' logger.setup_logger(args.verbose if 'verbose' in args else None, stream) context = analyzer_context.get_context() working_analyzers, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*'], encoding="utf-8", errors="ignore") elif args.dump_config == 'clangsa': ret = subprocess.call([binary, '-cc1', '-analyzer-checker-option-help', '-analyzer-checker-option-help-alpha'], stderr=subprocess.PIPE, encoding="utf-8", errors="ignore") if ret: # This flag is supported from Clang 9. LOG.warning("'--dump-config clangsa' is not supported yet. " "Please make sure that you are using Clang 9 or " "newer.") return analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') if 'analyzer_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) analyzer = args.analyzer_config config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_analyzer_config(config_handler, analyzer_environment) rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])),) for c in configs] print(output_formatters.twodim_to_str(args.output_format, header, rows)) return if 'details' in args: header = ['Name', 'Path', 'Version'] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output( [binary, '--version'], encoding="utf-8", errors="ignore") except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([analyzer, context.analyzer_binaries.get(analyzer), err_reason]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ # If the given output format is not 'table', redirect logger's output to # the stderr. logger.setup_logger(args.verbose if 'verbose' in args else None, None if args.output_format == 'table' else 'stderr') context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') def match_guideline(checker_name, selected_guidelines): """ Returns True if checker_name gives reports related to any of the selected guideline rule. checker_name -- A full checker name. selected_guidelines -- A list of guideline names or guideline rule IDs. """ guideline = context.guideline_map.get(checker_name, {}) guideline_set = set(guideline) for value in guideline.values(): guideline_set |= set(value) return any(g in guideline_set for g in selected_guidelines) def format_guideline(guideline): """ Convert guideline rules to human-readable format. guideline -- Dictionary in the following format: {"guideline_1": ["rule_1", "rule_2"]} """ return ' '.join('Related {} rules: {}'.format(g, ', '.join(r)) for g, r in guideline.items()) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' in args: header = ['Profile name', 'Description'] rows = context.available_profiles.items() else: header = ['Profile name'] rows = [(key, "") for key in context.available_profiles.keys()] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # List checker config options. if 'checker_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config( config_handler, analyzer_environment) rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return if args.guideline is not None and len(args.guideline) == 0: result = defaultdict(set) for _, guidelines in context.guideline_map.items(): for guideline, rules in guidelines.items(): result[guideline] |= set(rules) header = ['Guideline', 'Rules'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) if args.output_format == 'json': rows = [(g, sorted(list(r))) for g, r in result.items()] else: rows = [(g, ', '.join(sorted(r))) for g, r in result.items()] if args.output_format == 'rows': for row in rows: print('Guideline: {}'.format(row[0])) print('Rules: {}'.format(row[1])) else: print( output_formatters.twodim_to_str(args.output_format, header, rows)) return # List available checkers. if 'details' in args: header = [ 'Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline', 'Description' ] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get(analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): state, description = value if state != CheckerState.enabled and 'profile' in args: continue if state == CheckerState.enabled and 'only_disabled' in args: continue elif state != CheckerState.enabled and 'only_enabled' in args: continue if args.output_format == 'json': state = state == CheckerState.enabled else: state = '+' if state == CheckerState.enabled else '-' if args.guideline is not None: if not match_guideline(checker_name, args.guideline): continue if 'details' in args: severity = context.severity_map.get(checker_name) guideline = context.guideline_map.get(checker_name, {}) if args.output_format != 'json': guideline = format_guideline(guideline) rows.append([ state, checker_name, analyzer, severity, guideline, description ]) else: rows.append([checker_name]) if 'show_warnings' in args: severity = context.severity_map.get('clang-diagnostic-') for warning in get_warnings(analyzer_environment): warning = 'clang-diagnostic-' + warning[2:] if 'guideline' in args: if not match_guideline(warning, args.guideline): continue guideline = context.guideline_map.get(warning, {}) if args.output_format != 'json': guideline = format_guideline(guideline) if 'details' in args: rows.append(['', warning, '-', severity, guideline, '-']) else: rows.append([warning]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error( "Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def construct_config_handler(cls, args, context): environ = env.extend(context.path_env_extra, context.ld_lib_path_extra) handler = config_handler.ClangSAConfigHandler(environ) handler.analyzer_plugins_dir = context.checker_plugin handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) handler.compiler_resource_dir = \ host_check.get_resource_dir(handler.analyzer_binary, context) handler.version_info = version.get(handler.analyzer_binary, environ) handler.report_hash = args.report_hash \ if 'report_hash' in args else None handler.enable_z3 = 'enable_z3' in args and args.enable_z3 == 'on' handler.enable_z3_refutation = 'enable_z3_refutation' in args and \ args.enable_z3_refutation == 'on' if 'ctu_phases' in args: handler.ctu_dir = os.path.join(args.output_path, args.ctu_dir) handler.log_file = args.logfile handler.path_env_extra = context.path_env_extra handler.ld_lib_path_extra = context.ld_lib_path_extra try: with open(args.clangsa_args_cfg_file, 'r', encoding='utf8', errors='ignore') as sa_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', env.replace_env_var(args.clangsa_args_cfg_file), sa_cfg.read().strip()) handler.analyzer_extra_arguments = \ shlex.split(handler.analyzer_extra_arguments) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clangsa arguments file was given in the command line. LOG.debug_analyzer(aerr) checkers = ClangSA.get_analyzer_checkers(handler, environ) # Read clang-sa checkers from the config file. clang_sa_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_sa_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) handler.checker_config = [] r = re.compile(r'(?P<analyzer>.+?):(?P<key>.+?)=(?P<value>.+)') # TODO: This extra "isinstance" check is needed for # CodeChecker checkers --checker-config. This command also runs # this function in order to construct a config handler. if 'checker_config' in args and \ isinstance(args.checker_config, list): for cfg in args.checker_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: handler.checker_config.append( m.group('key') + '=' + m.group('value')) # TODO: This extra "isinstance" check is needed for # CodeChecker analyzers --analyzer-config. This command also runs # this function in order to construct a config handler. if 'analyzer_config' in args and \ isinstance(args.analyzer_config, list): for cfg in args.analyzer_config: m = re.search(r, cfg) if m.group('analyzer') == cls.ANALYZER_NAME: handler.checker_config.append( m.group('key') + '=' + m.group('value')) return handler
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ # If the given output format is not 'table', redirect logger's output to # the stderr. logger.setup_logger(args.verbose if 'verbose' in args else None, None if args.output_format == 'table' else 'stderr') context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' in args: header = ['Profile name', 'Description'] rows = context.available_profiles.items() else: header = ['Profile name'] rows = [(key, "") for key in context.available_profiles.keys()] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # List checker config options. if 'checker_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config( config_handler, analyzer_environment) rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # List available checkers. if 'details' in args: header = ['Enabled', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get(analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): state, description = value if state != CheckerState.enabled and 'profile' in args: continue if state == CheckerState.enabled and 'only_disabled' in args: continue elif state != CheckerState.enabled and 'only_enabled' in args: continue if args.output_format == 'json': state = state == CheckerState.enabled else: state = '+' if state == CheckerState.enabled else '-' if 'details' in args: severity = context.severity_map.get(checker_name) rows.append( [state, checker_name, analyzer, severity, description]) else: rows.append([checker_name]) if 'show_warnings' in args: severity = context.severity_map.get('clang-diagnostic-') for warning in get_warnings(analyzer_environment): if 'details' in args: rows.append(['', warning, '-', severity, '-']) else: rows.append([warning]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error( "Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def start_workers(actions_map, actions, context, analyzer_config_map, jobs, output_path, skip_handler, metadata_tool, quiet_analyze, capture_analysis_output, timeout, ctu_reanalyze_on_failure, statistics_data, manager, compile_cmd_count): """ Start the workers in the process pool. For every build action there is worker which makes the analysis. """ # Handle SIGINT to stop this script running. def signal_handler(signum, frame): try: pool.terminate() manager.shutdown() finally: sys.exit(128 + signum) signal.signal(signal.SIGINT, signal_handler) actions, skipped_actions = skip_cpp(actions, skip_handler) # Start checking parallel. checked_var = multiprocessing.Value('i', 1) actions_num = multiprocessing.Value('i', len(actions)) pool = multiprocessing.Pool(jobs, initializer=init_worker, initargs=(checked_var, actions_num)) failed_dir = os.path.join(output_path, "failed") # If the analysis has failed, we help debugging. if not os.path.exists(failed_dir): os.makedirs(failed_dir) success_dir = os.path.join(output_path, "success") # Analysis was successful processing results. if not os.path.exists(success_dir): os.makedirs(success_dir) output_dirs = {'success': success_dir, 'failed': failed_dir} # Construct analyzer env. analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzed_actions = [ (actions_map, build_action, context, analyzer_config_map.get(build_action.analyzer_type), output_path, skip_handler, quiet_analyze, capture_analysis_output, timeout, analyzer_environment, ctu_reanalyze_on_failure, output_dirs, statistics_data) for build_action in actions ] if analyzed_actions: try: # Workaround, equivalent of map. # The main script does not get signal # while map or map_async function is running. # It is a python bug, this does not happen if a timeout is # specified, then receive the interrupt immediately. # FIXME: Ensure all shared data structures are wrapped in manager # proxy objects before passing them to other processes via # map_async. # Note that even deep-copying is known to be insufficient. pool.map_async(check, analyzed_actions, 1, callback=lambda results: worker_result_handler( results, metadata_tool, output_path, context. analyzer_binaries)).get(31557600) pool.close() except Exception: pool.terminate() raise finally: pool.join() else: pool.close() pool.join() LOG.info("----==== Summary ====----") for skp in skipped_actions: LOG.debug_analyzer("%s is skipped", skp.source) LOG.info("Total analyzed compilation commands: %d", compile_cmd_count.analyze) # Some compile commands are skipped during log processing, if nothing # is skipped there for pre-analysis step, files will be skipped only # during analysis. if skipped_actions or compile_cmd_count.skipped: LOG.info("Skipped compilation commands: %d", compile_cmd_count.skipped + len(skipped_actions)) LOG.info("----=================----") if not os.listdir(success_dir): shutil.rmtree(success_dir) if not os.listdir(failed_dir): shutil.rmtree(failed_dir)
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Show checkers only from the analyzers " "specified. Currently supported analyzers are: " + ', '.join(analyzer_types. supported_analyzers) + ".") context = analyzer_context.get_context() analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) if get_diagtool_bin(analyzer_environment): parser.add_argument('-w', '--warnings', dest='show_warnings', default=argparse.SUPPRESS, action='store_true', required=False, help="Show available warning flags.") parser.add_argument('--details', dest='details', default=argparse.SUPPRESS, action='store_true', required=False, help="Show details about the checker, such as " "description, if available.") parser.add_argument('--profile', dest='profile', metavar='PROFILE/list', required=False, default=argparse.SUPPRESS, help="List checkers enabled by the selected profile. " "'list' is a special option showing details " "about profiles collectively.") filters = parser.add_mutually_exclusive_group(required=False) filters.add_argument('--only-enabled', dest='only_enabled', default=argparse.SUPPRESS, action='store_true', help="Show only the enabled checkers.") filters.add_argument('--only-disabled', dest='only_disabled', default=argparse.SUPPRESS, action='store_true', help="Show only the disabled checkers.") parser.add_argument('-o', '--output', dest='output_format', required=False, default='rows', choices=output_formatters.USER_FORMATS, help="The format to list the applicable checkers as.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' logger.setup_logger(args.verbose if 'verbose' in args else None, stream) # If nothing is set, list checkers for all supported analyzers. analyzers = args.analyzers \ if 'analyzers' in args \ else analyzer_types.supported_analyzers context = analyzer_context.get_context() working, errored = analyzer_types.check_supported_analyzers(analyzers, context) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers(args, context, working) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Profile name'] else: header = ['profile_name'] else: if args.output_format not in ['csv', 'json']: header = ['Profile name', 'Description'] else: header = ['profile_name', 'description'] rows = [] for (profile, description) in context.available_profiles.items(): if 'details' not in args: rows.append([profile]) else: rows.append([profile, description]) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # Use good looking different headers based on format. if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Name'] else: header = ['name'] else: if args.output_format not in ['csv', 'json']: header = ['', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['enabled', 'name', 'analyzer', 'severity', 'description'] rows = [] for analyzer in working: config_handler = analyzer_config_map.get(analyzer) analyzer_class = \ analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers(config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get( analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): enabled, description = value if not enabled and 'profile' in args: continue if enabled and 'only_disabled' in args: continue elif not enabled and 'only_enabled' in args: continue if args.output_format != 'json': enabled = '+' if enabled else '-' if 'details' not in args: rows.append([checker_name]) else: severity = context.severity_map.get(checker_name) rows.append([enabled, checker_name, analyzer, severity, description]) show_warnings = True if 'show_warnings' in args and \ args.show_warnings else False if show_warnings: severity = context.severity_map.get('clang-diagnostic-') for warning in get_warnings(analyzer_environment): if 'details' not in args: rows.append([warning]) else: rows.append(['', warning, '-', severity, '-']) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error("Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def pre_analyze(params): action, context, clangsa_config, skip_handler, \ ctu_data, statistics_data = params analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) progress_checked_num.value += 1 if skip_handler and skip_handler.should_skip(action.source): return if action.analyzer_type != ClangSA.ANALYZER_NAME: return _, source_filename = os.path.split(action.source) LOG.info("[%d/%d] %s", progress_checked_num.value, progress_actions.value, source_filename) try: if ctu_data: LOG.debug("running CTU pre analysis") ctu_temp_fnmap_folder = ctu_data.get('ctu_temp_fnmap_folder') ctu_func_map_cmd = ctu_data.get('ctu_func_map_cmd') triple_arch = \ ctu_triple_arch.get_triple_arch(action, action.source, clangsa_config, analyzer_environment) # TODO: reorganize the various ctu modes parameters # Dump-based analysis requires serialized ASTs. if clangsa_config.ctu_on_demand: ctu_manager.generate_invocation_list(triple_arch, action, action.source, clangsa_config, analyzer_environment) else: ctu_manager.generate_ast(triple_arch, action, action.source, clangsa_config, analyzer_environment) # On-demand analysis does not require AST-dumps. # We map the function names to corresponding sources of ASTs. # In case of On-demand analysis this source is the original source # code. In case of AST-dump based analysis these sources are the # generated AST-dumps. ctu_manager.map_functions(triple_arch, action, action.source, clangsa_config, analyzer_environment, ctu_func_map_cmd, ctu_temp_fnmap_folder) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise try: if statistics_data: LOG.debug("running statistics pre analysis") collect_statistics(action, action.source, clangsa_config, analyzer_environment, statistics_data) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise
def check_supported_analyzers(analyzers, context): """ Checks the given analyzers in the current context for their executability and support in CodeChecker. This method also updates the given context.analyzer_binaries if the context's configuration is bogus but had been resolved. :return: (enabled, failed) where enabled is a list of analyzer names and failed is a list of (analyzer, reason) tuple. """ check_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_binaries = context.analyzer_binaries enabled_analyzers = set() failed_analyzers = set() for analyzer_name in analyzers: if analyzer_name not in supported_analyzers: failed_analyzers.add((analyzer_name, "Analyzer unsupported by CodeChecker!")) continue # Get the compiler binary to check if it can run. available_analyzer = True analyzer_bin = analyzer_binaries.get(analyzer_name) if not analyzer_bin: failed_analyzers.add((analyzer_name, "Failed to detect analyzer binary!")) continue elif not os.path.isabs(analyzer_bin): # If the analyzer is not in an absolute path, try to find it... found_bin = supported_analyzers[analyzer_name].\ resolve_missing_binary(analyzer_bin, check_env) # found_bin is an absolute path, an executable in one of the # PATH folders. # If found_bin is the same as the original binary, ie., normally # calling the binary without any search would have resulted in # the same binary being called, it's NOT a "not found". if found_bin and os.path.basename(found_bin) != analyzer_bin: LOG.debug("Configured binary '%s' for analyzer '%s' was " "not found, but environment PATH contains '%s'.", analyzer_bin, analyzer_name, found_bin) context.analyzer_binaries[analyzer_name] = \ os.path.realpath(found_bin) analyzer_bin = found_bin if not analyzer_bin or \ not host_check.check_clang(analyzer_bin, check_env): # Analyzers unavailable under absolute paths are deliberately a # configuration problem. failed_analyzers.add((analyzer_name, "Cannot execute analyzer binary!")) available_analyzer = False if available_analyzer: enabled_analyzers.add(analyzer_name) return enabled_analyzers, failed_analyzers
def __get_detailed_checker_info(args: argparse.Namespace, cl: CheckerLabels) -> Dict[str, list]: """ Returns a dictionary which maps analyzer names to the collection of their supported checkers. Checker information is described with tuples of this information: (status, checker name, analyzer name, description, labels). """ context = analyzer_context.get_context() working_analyzers, _ = analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) checker_info = defaultdict(list) for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) profile_checkers = [] if 'profile' in args: available_profiles = cl.get_description('profile') if args.profile not in available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers.append((f'profile:{args.profile}', True)) if 'label' in args: profile_checkers.extend((label, True) for label in args.label) if 'severity' in args: profile_checkers.append((f'severity:{args.severity}', True)) if 'guideline' in args: profile_checkers.append((__guideline_to_label(args, cl), True)) config_handler.initialize_checkers(context, checkers, profile_checkers) for checker, (state, description) in config_handler.checks().items(): # severity = cl.severity(checker) # guideline = guideline_rules_for_checker(checker, context) # checker_info[analyzer].append( # (state, checker, analyzer, severity, guideline, description)) checker_info[analyzer].append( (state, checker, analyzer, description, sorted(cl.labels_of_checker(checker, analyzer)))) if 'show_warnings' in args: for warning in get_warnings(analyzer_environment): warning = 'clang-diagnostic-' + warning # guideline = guideline_rules_for_checker(warning, context) # checker_info[ClangTidy.ANALYZER_NAME].append( # (CheckerState.default, warning, ClangTidy.ANALYZER_NAME, # 'MEDIUM', guideline, '')) checker_info[ClangTidy.ANALYZER_NAME].append( (CheckerState.default, warning, ClangTidy.ANALYZER_NAME, '', sorted(cl.labels_of_checker(warning, ClangTidy.ANALYZER_NAME)))) return checker_info