def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker checkers', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Get the list of checkers available and their enabled " "status in the supported analyzers. Currently " "supported analyzers are: " + ', '.join(analyzer_types.supported_analyzers) + ".", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': "The list of checkers that are enabled of disabled by " "default can be edited by editing the file '" + os.path.join(analyzer_context.get_context() .package_root, 'config', 'config.json') + "'.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "List the checkers available for code analysis." }
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ package_root = analyzer_context.get_context().package_root return { 'prog': 'CodeChecker parse', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': """ Parse and pretty-print the summary and results from one or more 'codechecker-analyze' result files. Bugs which are commented by using "false_positive", "suppress" and "intentional" source code comments will not be printed by the `parse` command.""", 'epilog': """ environment variables: CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file. Default: {} """.format(os.path.join(package_root, 'config', 'checker_severity_map.json')), # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "Print analysis summary and results in a human-readable " "format." }
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker checkers', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Get the list of checkers available and their enabled " "status in the supported analyzers.", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': "The list of checkers that are enabled of disabled by " "default can be edited by editing the file '" + os.path.join(analyzer_context.get_context() .package_root, 'config', 'config.json') + "'.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "List the checkers available for code analysis." }
def __init__(self): context = analyzer_context.get_context() self.version = context.version self.build_date = context.package_build_date self.git_hash = context.package_git_hash self.git_tag = context.package_git_tag
def main(args): """ Entry point for the command handling automatic fixes. TODO: Currently clang-tidy is the only tool which supports the dumping of fixit replacements. In this script we assume that the replacement dump .yaml files are in the format so clang-apply-replacement Clang tool can consume them. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() if not context.replacer_binary: LOG.error("clang-apply-replacements tool is not found") return if not sys.stdin.isatty() and args.interactive: LOG.error("Interactive mode can't be used in case of JSON data " "arriving from standard input.") sys.exit(1) try: reports = None if sys.stdin.isatty() else json.loads(sys.stdin.read()) except json.decoder.JSONDecodeError as ex: LOG.error("JSON format error on standard input: %s", ex) sys.exit(1) if 'apply' in args or args.interactive: apply_fixits(args.input, args.checker_name, args.file, args.interactive, reports) else: list_fixits(args.input, args.checker_name, args.file, args.interactive, reports)
def get_diagtool_bin(env=None): """ Return full path of diagtool. Select clang binary, check for a 'diagtool' binary next to the selected clang binary and return full path of this binary if it exists. """ context = analyzer_context.get_context() analyzer_binary = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME) clang_bin = analyzer_binary if not os.path.isabs(clang_bin): clang_bin = find_executable(analyzer_binary, env['PATH'] if env else None) if not clang_bin: return None # Resolve symlink. clang_bin = os.path.realpath(clang_bin) # Find diagtool next to the clang binary. diagtool_bin = os.path.join(os.path.dirname(clang_bin), 'diagtool') if os.path.exists(diagtool_bin): return diagtool_bin LOG.debug("'diagtool' can not be found next to the clang binary (%s)!", clang_bin) return None
def main(args): """ Generates a build log by running the original build command. No analysis is done. """ logger.setup_logger(args.verbose if 'verbose' in args else None) args.logfile = os.path.realpath(args.logfile) # It is possible that the log file will not be created or it will be empty # for example when the build command is an empty string or when there is no # compiler command to log. For this reason we will create this log file if # it does not exist and we will insert an empty array to it, so it will be # a valid json file. with open(args.logfile, 'w', encoding="utf-8", errors="ignore") as logfile: logfile.write("[\n]") context = analyzer_context.get_context() verbose = args.verbose if 'verbose' in args else None build_manager.perform_build_command(args.logfile, args.command, context, 'keep_link' in args, silent='quiet' in args, verbose=verbose)
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() working, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*']) elif args.dump_config == 'clangsa': # TODO: Not supported by ClangSA yet! LOG.warning("'--dump-config clangsa' is not supported yet.") return if args.output_format not in ['csv', 'json']: if 'details' not in args: header = ['Name'] else: header = ['Name', 'Path', 'Version'] else: if 'details' not in args: header = ['name'] else: header = ['name', 'path', 'version_string'] rows = [] for analyzer in working: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output([binary, '--version']) except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([ analyzer, context.analyzer_binaries.get(analyzer), err_reason ]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ data_files_dir_path = analyzer_context.get_context().data_files_dir_path labels_dir_path = os.path.join(data_files_dir_path, 'config', 'labels') return { 'prog': 'CodeChecker checkers', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Get the list of checkers available and their enabled " "status in the supported analyzers.", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': """ The list of checkers that are enabled or disabled by default can be edited by editing "profile:default" labels in the directory '{}'. Example scenario: List checkers by labels ----------------------------------------- List checkers in "sensitive" profile: CodeChecker checkers --label profile:sensitive CodeChecker checkers --profile sensitive List checkers in "HIGH" severity: CodeChecker checkers --label severity:HIGH CodeChecker checkers --severity HIGH List checkers covering str34-c SEI-CERT rule: CodeChecker checkers --label sei-cert:str-34-c CodeChecker checkers --guideline sei-cert:str34-c List checkers covering all SEI-CERT rules: CodeChecker checkers --label guideline:sei-cert CodeChecker checkers --guideline sei-cert List available profiles, guidelines and severities: CodeChecker checkers --profile CodeChecker checkers --guideline CodeChecker checkers --severity List labels and their available values: CodeChecker checkers --label CodeChecker checkers --label severity """.format(os.path.join(labels_dir_path)), # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "List the checkers available for code analysis." }
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ context = analyzer_context.get_context() working_analyzers, _ = analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) parser.add_argument('--all', dest="all", action='store_true', default=argparse.SUPPRESS, required=False, help="Show all supported analyzers, not just the " "available ones.") parser.add_argument('--details', dest="details", action='store_true', default=argparse.SUPPRESS, required=False, help="Show details about the analyzers, not just " "their names.") parser.add_argument('--dump-config', dest='dump_config', required=False, choices=working_analyzers, help="Dump the available checker options for the " "given analyzer to the standard output. " "Currently only clang-tidy supports this option. " "The output can be redirected to a file named " ".clang-tidy. If this file is placed to the " "project directory then the options are applied " "to the files under that directory. This config " "file can also be provided via " "'CodeChecker analyze' and 'CodeChecker check' " "commands.") parser.add_argument('--analyzer-config', dest='analyzer_config', required=False, default=argparse.SUPPRESS, choices=working_analyzers, help="Show analyzer configuration options. These can " "be given to 'CodeChecker analyze " "--analyzer-options'.") parser.add_argument('-o', '--output', dest='output_format', required=False, default='rows', choices=output_formatters.USER_FORMATS, help="Specify the format of the output list.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def apply_fixits(inputs, checker_names, file_paths, interactive, reports): """ This function applies the replacements from the .yaml files. inputs -- A list of report directories which contains the fixit dumps in a subdirectory named "fixit". """ not_existing_files = set() existing_files = set() modified_files = set() for i in inputs: fixit_dir = os.path.join(i, 'fixit') if not os.path.isdir(fixit_dir): LOG.info('No fixits in %s', i) continue with tempfile.TemporaryDirectory() as out_dir: for fixit_file in os.listdir(fixit_dir): with open(os.path.join(fixit_dir, fixit_file), encoding='utf-8', errors='ignore') as f: content = yaml.load(f, Loader=yaml.BaseLoader) fixit_mtime = get_last_mod_time( os.path.join(fixit_dir, fixit_file)) existing, not_existing, modified = clang_tidy_fixit_filter( content, checker_names, file_paths, reports, fixit_mtime, interactive) existing_files.update(existing) not_existing_files.update(not_existing) modified_files.update(modified) if len(content['Diagnostics']) != 0: with open(os.path.join(out_dir, fixit_file), 'w', encoding='utf-8', errors='ignore') as out: yaml.dump(content, out) proc = subprocess.Popen( [analyzer_context.get_context().replacer_binary, out_dir]) proc.communicate() if existing_files: print("Updated files:\n{}".format('\n'.join(sorted(existing_files))), file=sys.stderr) if not_existing_files: print("Not existing files:\n{}".format('\n'.join( sorted(not_existing_files))), file=sys.stderr) if modified_files: print("Skipped files due to modification since last analysis:\n{}". format('\n'.join(sorted(modified_files))), file=sys.stderr)
def __print_checker_config(args: argparse.Namespace): """ Print checker config options according to the command line arguments to the standard output. The set of config options comes from the analyzers. """ if args.output_format == 'custom': args.output_format = 'rows' context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_types.check_available_analyzers(working_analyzers, errored) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) rows = [] analyzer_failures = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config(config_handler, analyzer_environment) if not configs: analyzer_failures.append(analyzer) continue rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) if rows: print(twodim.to_str(args.output_format, header, rows)) analyzer_types.print_unsupported_analyzers(errored) if analyzer_failures: LOG.error( "Failed to get checker configuration options for '%s' " "analyzer(s)! Please try to upgrade your analyzer " "version to use this feature.", ', '.join(analyzer_failures)) sys.exit(1)
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ context = analyzer_context.get_context() working, _ = analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) parser.add_argument('--all', dest="all", action='store_true', default=argparse.SUPPRESS, required=False, help="Show all supported analyzers, not just the " "available ones.") parser.add_argument('--details', dest="details", action='store_true', default=argparse.SUPPRESS, required=False, help="Show details about the analyzers, not just " "their names.") parser.add_argument('--dump-config', dest='dump_config', required=False, choices=list(working), help="Dump the available checker options for the " "given analyzer to the standard output. " "Currently only clang-tidy supports this option. " "The output can be redirected to a file named " ".clang-tidy. If this file is placed to the " "project directory then the options are applied " "to the files under that directory. This config " "file can also be provided via " "'CodeChecker analyze' and 'CodeChecker check' " "commands.") parser.add_argument('-o', '--output', dest='output_format', required=False, default='rows', choices=output_formatters.USER_FORMATS, help="Specify the format of the output list.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ Generates a build log by running the original build command. No analysis is done. """ logger.setup_logger(args.verbose if 'verbose' in args else None) args.logfile = os.path.realpath(args.logfile) if os.path.exists(args.logfile): os.remove(args.logfile) context = analyzer_context.get_context() build_manager.perform_build_command(args.logfile, args.command, context, 'keep_link' in args, silent='quiet' in args)
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ package_root = analyzer_context.get_context().package_root return { 'prog': 'CodeChecker checkers', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Get the list of checkers available and their enabled " "status in the supported analyzers.", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': """ The list of checkers that are enabled or disabled by default can be edited by editing the file '{}'. Environment variables ------------------------------------------------ CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file. Default: '{}' CC_GUIDELINE_MAP_FILE Path of the checker-guideline mapping config file. Default: '{}' CC_PROFILE_MAP_FILE Path of the checker-profile mapping config file. Default: '{}' """.format(os.path.join(package_root, 'config', 'checker_profile_map.json'), os.path.join(package_root, 'config', 'checker_severity_map.json'), os.path.join(package_root, 'config', 'checker_guideline_map.json'), os.path.join(package_root, 'config', 'checker_profile_map.json')), # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "List the checkers available for code analysis." }
def print_version(output_format=None): """ Print analyzer version information in the given format. """ context = analyzer_context.get_context() rows = [("Base package version", context.version), ("Package build date", context.package_build_date), ("Git commit ID (hash)", context.package_git_hash), ("Git tag information", context.package_git_tag)] if output_format == "json": # Use a special JSON format here, instead of # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ] # do # { "something": "0.0.0", "foo": ... } print(json.dumps(dict(rows))) else: print(twodim.to_str(output_format, ["Kind", "Version"], rows))
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ # If the given output format is not 'table', redirect logger's output to # the stderr. logger.setup_logger(args.verbose if 'verbose' in args else None, None if args.output_format == 'table' else 'stderr') cl = analyzer_context.get_context().checker_labels if 'profile' in args and not args.profile: __print_profiles(args, cl) return if 'severity' in args and not args.severity: __print_severities(args, cl) return if 'guideline' in args and not args.guideline: __print_guidelines(args, cl) return if 'label' in args and not args.label: __print_labels(args, cl) return if 'label' in args and ':' not in args.label: __print_label_values(args, cl) return if 'checker_config' in args: __print_checker_config(args) return __print_checkers(args, cl)
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' logger.setup_logger(args.verbose if 'verbose' in args else None, stream) context = analyzer_context.get_context() working_analyzers, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*'], encoding="utf-8", errors="ignore") elif args.dump_config == 'clangsa': ret = subprocess.call([binary, '-cc1', '-analyzer-checker-option-help', '-analyzer-checker-option-help-alpha'], stderr=subprocess.PIPE, encoding="utf-8", errors="ignore") if ret: # This flag is supported from Clang 9. LOG.warning("'--dump-config clangsa' is not supported yet. " "Please make sure that you are using Clang 9 or " "newer.") return analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') if 'analyzer_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) analyzer = args.analyzer_config config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_analyzer_config(config_handler, analyzer_environment) rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])),) for c in configs] print(output_formatters.twodim_to_str(args.output_format, header, rows)) return if 'details' in args: header = ['Name', 'Path', 'Version'] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output( [binary, '--version'], encoding="utf-8", errors="ignore") except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([analyzer, context.analyzer_binaries.get(analyzer), err_reason]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ # If the given output format is not 'table', redirect logger's output to # the stderr. logger.setup_logger(args.verbose if 'verbose' in args else None, None if args.output_format == 'table' else 'stderr') context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') def match_guideline(checker_name, selected_guidelines): """ Returns True if checker_name gives reports related to any of the selected guideline rule. checker_name -- A full checker name. selected_guidelines -- A list of guideline names or guideline rule IDs. """ guideline = context.guideline_map.get(checker_name, {}) guideline_set = set(guideline) for value in guideline.values(): guideline_set |= set(value) return any(g in guideline_set for g in selected_guidelines) def format_guideline(guideline): """ Convert guideline rules to human-readable format. guideline -- Dictionary in the following format: {"guideline_1": ["rule_1", "rule_2"]} """ return ' '.join('Related {} rules: {}'.format(g, ', '.join(r)) for g, r in guideline.items()) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' in args: header = ['Profile name', 'Description'] rows = context.available_profiles.items() else: header = ['Profile name'] rows = [(key, "") for key in context.available_profiles.keys()] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # List checker config options. if 'checker_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config( config_handler, analyzer_environment) rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return if args.guideline is not None and len(args.guideline) == 0: result = defaultdict(set) for _, guidelines in context.guideline_map.items(): for guideline, rules in guidelines.items(): result[guideline] |= set(rules) header = ['Guideline', 'Rules'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) if args.output_format == 'json': rows = [(g, sorted(list(r))) for g, r in result.items()] else: rows = [(g, ', '.join(sorted(r))) for g, r in result.items()] if args.output_format == 'rows': for row in rows: print('Guideline: {}'.format(row[0])) print('Rules: {}'.format(row[1])) else: print( output_formatters.twodim_to_str(args.output_format, header, rows)) return # List available checkers. if 'details' in args: header = [ 'Enabled', 'Name', 'Analyzer', 'Severity', 'Guideline', 'Description' ] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get(analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): state, description = value if state != CheckerState.enabled and 'profile' in args: continue if state == CheckerState.enabled and 'only_disabled' in args: continue elif state != CheckerState.enabled and 'only_enabled' in args: continue if args.output_format == 'json': state = state == CheckerState.enabled else: state = '+' if state == CheckerState.enabled else '-' if args.guideline is not None: if not match_guideline(checker_name, args.guideline): continue if 'details' in args: severity = context.severity_map.get(checker_name) guideline = context.guideline_map.get(checker_name, {}) if args.output_format != 'json': guideline = format_guideline(guideline) rows.append([ state, checker_name, analyzer, severity, guideline, description ]) else: rows.append([checker_name]) if 'show_warnings' in args: severity = context.severity_map.get('clang-diagnostic-') for warning in get_warnings(analyzer_environment): warning = 'clang-diagnostic-' + warning[2:] if 'guideline' in args: if not match_guideline(warning, args.guideline): continue guideline = context.guideline_map.get(warning, {}) if args.output_format != 'json': guideline = format_guideline(guideline) if 'details' in args: rows.append(['', warning, '-', severity, guideline, '-']) else: rows.append([warning]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error( "Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('-o', '--output', type=str, dest="output_dir", required=False, default=argparse.SUPPRESS, help="Store the analysis output in the given folder. " "If it is not given then the results go into a " "temporary directory which will be removed after " "the analysis.") parser.add_argument('-t', '--type', '--output-format', dest="output_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results " "should use.") parser.add_argument('-q', '--quiet', dest="quiet", action='store_true', required=False, default=argparse.SUPPRESS, help="If specified, the build tool's and the " "analyzers' output will not be printed to the " "standard output.") parser.add_argument('--keep-gcc-include-fixed', dest="keep_gcc_include_fixed", required=False, action='store_true', default=False, help="There are some implicit include paths which are " "only used by GCC (include-fixed). This flag " "determines whether these should be kept among " "the implicit include paths.") parser.add_argument('--keep-gcc-intrin', dest="keep_gcc_intrin", required=False, action='store_true', default=False, help="There are some implicit include paths which " "contain GCC-specific header files (those " "which end with intrin.h). This flag determines " "whether these should be kept among the implicit " "include paths. Use this flag if Clang analysis " "fails with error message related to __builtin " "symbols.") log_args = parser.add_argument_group( "log arguments", """ Specify how the build information database should be obtained. You need to specify either an already existing log file, or a build command which will be used to generate a log file on the fly.""") log_args = log_args.add_mutually_exclusive_group(required=True) log_args.add_argument('-b', '--build', type=str, dest="command", default=argparse.SUPPRESS, help="Execute and record a build command. Build " "commands can be simple calls to 'g++' or " "'clang++' or 'make', but a more complex " "command, or the call of a custom script file " "is also supported.") log_args.add_argument('-l', '--logfile', type=str, dest="logfile", default=argparse.SUPPRESS, help="Use an already existing JSON compilation " "command database file specified at this path.") analyzer_opts = parser.add_argument_group("analyzer arguments") analyzer_opts.add_argument('-j', '--jobs', type=int, dest="jobs", required=False, default=1, help="Number of threads to use in analysis. " "More threads mean faster analysis at " "the cost of using more memory.") analyzer_opts.add_argument('-c', '--clean', dest="clean", required=False, action='store_true', default=argparse.SUPPRESS, help="Delete analysis reports stored in the " "output directory. (By default, " "CodeChecker would keep reports and " "overwrites only those files that were " "update by the current build command).") parser.add_argument('--compile-uniqueing', type=str, dest="compile_uniqueing", default="none", required=False, help="Specify the method the compilation " "actions in the compilation database are " "uniqued before analysis. " "CTU analysis works properly only if " "there is exactly one " "compilation action per source file. " "none(default in non CTU mode): " "no uniqueing is done. " "strict: no uniqueing is done, " "and an error is given if " "there is more than one compilation " "action for a source file. " "alpha(default in CTU mode): If there is more " "than one compilation action for a source " "file, only the one is kept that belongs to the " "alphabetically first " "compilation target. " "If none of the above given, " "this parameter should " "be a python regular expression." "If there is more than one compilation action " "for a source, " "only the one is kept which matches the " "given python regex. If more than one " "matches an error is given. " "The whole compilation " "action text is searched for match.") analyzer_opts.add_argument('--report-hash', dest="report_hash", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help="R|Specify the hash calculation method " "for reports. By default the calculation " "method for Clang Static Analyzer is " "context sensitive and for Clang Tidy it " "is context insensitive.\nYou can use the " "following calculation methods:\n" "- context-free: there was a bug and for " "Clang Tidy not the context free hash " "was generated (kept for backward " "compatibility).\n" "- context-free-v2: context free hash is " "used for ClangSA and Clang Tidy.\n" "See the 'issue hashes' section of the " "help message of this command below for " "more information.\n" "USE WISELY AND AT YOUR OWN RISK!") skip_mode = analyzer_opts.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest="skipfile", required=False, default=argparse.SUPPRESS, help="Path to the Skipfile dictating which project " "files should be omitted from analysis. " "Please consult the User guide on how a " "Skipfile should be laid out.") skip_mode.add_argument('--file', nargs='+', dest="files", metavar='FILE', required=False, default=argparse.SUPPRESS, help="Analyze only the given file(s) not the whole " "compilation database. Absolute directory " "paths should start with '/', relative " "directory paths should start with '*' and " "it can contain path glob pattern. " "Example: '/path/to/main.cpp', 'lib/*.cpp', " "*/test*'.") analyzer_opts.add_argument( '--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Run analysis only with the analyzers " "specified. Currently supported analyzers " "are: " + ', '.join(analyzer_types.supported_analyzers) + ".") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help="Store standard output and standard error " "of successful analyzer invocations " "into the '<OUTPUT_DIR>/success' " "directory.") analyzer_opts.add_argument('--config', dest='config_file', required=False, help="R|Allow the configuration from an " "explicit JSON based configuration file. " "The value of the 'analyzer' key in the " "config file will be emplaced as command " "line arguments. The format of " "configuration file is:\n" "{\n" " \"analyzer\": [\n" " \"--enable=core.DivideZero\",\n" " \"--enable=core.CallAndMessage\",\n" " \"--report-hash=context-free-v2\",\n" " \"--verbose=debug\",\n" " \"--skip=$HOME/project/skip.txt\",\n" " \"--clean\"\n" " ]\n" "}.\n" "You can use any environment variable " "inside this file and it will be " "expaneded.") # TODO: One day, get rid of these. See Issue #36, #427. analyzer_opts.add_argument('--saargs', dest="clangsa_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang Static " "analyzer.") analyzer_opts.add_argument('--tidyargs', dest="tidy_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang-Tidy " "analyzer.") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help="A file in YAML format containing the " "configuration of clang-tidy checkers. " "The file can be dumped by " "'CodeChecker analyzers --dump-config " "clang-tidy' command.") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=["clang-tidy:HeaderFilterRegex=.*"], help="Analyzer configuration options in the " "following format: analyzer:key=value. " "The collection of the options can be " "printed with " "'CodeChecker analyzers " "--analyzer-config'. To disable the " "default behaviour of this option you can " "use the " "'clang-tidy:take-config-from-directory=" "true' option. If the file at --tidyargs " "contains a -config flag then those " "options extend these and override " "\"HeaderFilterRegex\" if any.") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help="Checker configuration options in the " "following format: analyzer:key=value. " "The collection of the options can be " "printed with " "'CodeChecker checkers --checker-config'.") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help="The amount of time (in seconds) that " "each analyzer can spend, individually, " "to analyze the project. If the analysis " "of a particular file takes longer than " "this time, the analyzer is killed and " "the analysis is considered as a failed " "one.") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help="Enable the z3 solver backend. This " "allows reasoning over more complex " "queries, but performance is worse " "than the default range-based " "constraint solver.") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument( '--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help="Switch on/off the Z3 SMT Solver " "backend to " "reduce false positives. The results " "of the ranged based constraint " "solver in the Clang Static Analyzer " "will be cross checked with the Z3 " "SMT solver. This should not cause " "that much of a slowdown compared to " "using the Z3 solver only.") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( "cross translation unit analysis arguments", """ These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker check' is called.""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform Cross Translation Unit (CTU) " "analysis, both 'collect' and 'analyze' " "phases. In this mode, the extra files " "created by 'collect' are cleaned up " "after the analysis.") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the first, 'collect' phase of " "Cross-TU analysis. This phase generates " "extra files needed by CTU analysis, and " "puts them into '<OUTPUT_DIR>/ctu-dir'. " "NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the second, 'analyze' phase of " "Cross-TU analysis, using already " "available extra files in " "'<OUTPUT_DIR>/ctu-dir'. (These files " "will not be cleaned up in this mode.)") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help="DEPRECATED. The flag will be removed. " "If Cross-TU analysis is enabled and " "fails for some reason, try to re analyze " "the same translation unit without " "Cross-TU enabled.") # Only check for AST loading modes if CTU is available. if analyzer_types.is_ctu_on_demand_available(context): ctu_opts.add_argument('--ctu-ast-mode', action='store', dest='ctu_ast_mode', choices=['load-from-pch', 'parse-on-demand'], default=argparse.SUPPRESS, help="Choose the way ASTs are loaded during " "CTU analysis. Only available if CTU " "mode is enabled. Mode 'load-from-pch' " "generates PCH format serialized ASTs " "during the 'collect' phase. Mode " "'parse-on-demand' only generates the " "invocations needed to parse the ASTs. " "Mode 'load-from-pch' can use " "significant disk-space for the " "serialized ASTs, while mode " "'parse-on-demand' can incur some " "runtime CPU overhead in the second " "phase of the analysis. (default: " "parse-on-demand)") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( "Statistics analysis feature arguments", """ These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available).""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help="Perform the first, 'collect' phase of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis, and " "puts them into " "'<STATS_OUTPUT>'." " NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help="Use the previously generated statistics " "results for the analysis from the given " "'<STATS_DIR>'.") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help="Perform both phases of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis and enables " "the statistical checkers. " "No need to enable them explicitly.") stat_opts.add_argument('--stats-min-sample-count', action='store', default="10", type=int, dest='stats_min_sample_count', help="Minimum number of samples (function call" " occurrences) to be collected" " for a statistics to be relevant.") stat_opts.add_argument('--stats-relevance-threshold', action='store', default="0.85", type=float, dest='stats_relevance_threshold', help="The minimum ratio of calls of function " "f that must have a certain property " "property to consider it true for that " "function (calculated as calls " "with a property/all calls)." " CodeChecker will warn for" " calls of f do not have that property.") checkers_opts = parser.add_argument_group( "checker configuration", """ Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or "checkers". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy. Profiles ------------------------------------------------ In CodeCheckers there is a manual grouping of checkers. These groups are called profiles. The collection of profiles is found in config/checker_profile_map.json file. The goal of these profile is that you can enable or disable checkers by these profiles. See the output of "CodeChecker checkers --profile list" command. Guidelines ------------------------------------------------ There are several coding guidelines like CppCoreGuideline, SEI-CERT, etc. These are collections of best programming practices to avoid common programming errors. Some checkers cover the rules of these guidelines. In CodeChecker there is a mapping between guidelines and checkers. This way you can list and enable those checkers which check the fulfillment of certain guideline rules. See the output of "CodeChecker checkers --guideline" command.""") checkers_opts.add_argument('-e', '--enable', dest="enable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group), " "profile or guideline " "to BE USED in the analysis. In case of " "ambiguity the priority order is profile, " "guideline, checker name (e.g. security " "means the profile, not the checker " "group). Profiles and guidelines can be " "labeled: 'profile:security' or " "'guideline:sei-cert'.") checkers_opts.add_argument('-d', '--disable', dest="disable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group), " "profile or guideline " "to BE PROHIBITED from use in the " "analysis. In case of " "ambiguity the priority order is profile, " "guideline, checker name (e.g. security " "means the profile, not the checker " "group). Profiles and guidelines can be " "labeled: 'profile:security' or " "'guideline:sei-cert'.") checkers_opts.add_argument('--enable-all', dest="enable_all", action='store_true', required=False, default=argparse.SUPPRESS, help="Force the running analyzers to use " "almost every checker available. The " "checker groups 'alpha.', 'debug.'," "'osx.', 'abseil-', 'android-', " "'darwin-', 'objc-', " "'cppcoreguidelines-', 'fuchsia.', " "'fuchsia-', 'hicpp-', 'llvm-', " "'llvmlibc-', 'google-', 'zircon-', " "'osx.' (on Linux) are NOT enabled " "automatically and must be EXPLICITLY " "specified. WARNING! Enabling all " "checkers might result in the analysis " "losing precision and stability, and " "could even result in a total failure of " "the analysis. USE WISELY AND AT YOUR " "OWN RISK!") output_opts = parser.add_argument_group("output arguments") output_opts.add_argument('--print-steps', dest="print_steps", action="store_true", required=False, default=argparse.SUPPRESS, help="Print the steps the analyzers took in " "finding the reported defect.") output_opts.add_argument('--suppress', type=str, dest="suppress", default=argparse.SUPPRESS, required=False, help="Path of the suppress file to use. Records " "in the suppress file are used to suppress " "the display of certain results when " "parsing the analyses' report. (Reports to " "an analysis result can also be suppressed " "in the source code -- please consult the " "manual on how to do so.) NOTE: The " "suppress file relies on the " "\"bug identifier\" generated by the " "analyzers which is experimental, take " "care when relying on it.") parser.add_argument('--review-status', nargs='*', dest="review_status", metavar='REVIEW_STATUS', choices=REVIEW_STATUS_VALUES, default=["confirmed", "unreviewed"], help="Filter results by review statuses. Valid " "values are: {0}".format( ', '.join(REVIEW_STATUS_VALUES))) logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ logger.setup_logger(args.verbose if 'verbose' in args else None) # If nothing is set, list checkers for all supported analyzers. analyzers = args.analyzers \ if 'analyzers' in args \ else analyzer_types.supported_analyzers context = analyzer_context.get_context() working, errored = analyzer_types.check_supported_analyzers(analyzers, context) analyzer_environment = get_check_env(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers(args, context, working) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Profile name'] else: header = ['profile_name'] else: if args.output_format not in ['csv', 'json']: header = ['Profile name', 'Description'] else: header = ['profile_name', 'description'] rows = [] for (profile, description) in context.available_profiles.items(): if 'details' not in args: rows.append([profile]) else: rows.append([profile, description]) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # Use good looking different headers based on format. if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Name'] else: header = ['name'] else: if args.output_format not in ['csv', 'json']: header = ['', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['enabled', 'name', 'analyzer', 'severity', 'description'] rows = [] for analyzer in working: config_handler = analyzer_config_map.get(analyzer) analyzer_class = \ analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers(config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get( analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): enabled, description = value if not enabled and 'profile' in args: continue if enabled and 'only_disabled' in args: continue elif not enabled and 'only_enabled' in args: continue if args.output_format != 'json': enabled = '+' if enabled else '-' if 'details' not in args: rows.append([checker_name]) else: severity = context.severity_map.get(checker_name) rows.append([enabled, checker_name, analyzer, severity, description]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error("Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
import re import shutil import sys from codechecker_analyzer import analyzer, analyzer_context, env from codechecker_analyzer.analyzers import analyzer_types, clangsa from codechecker_analyzer.arg import OrderedCheckersAction from codechecker_analyzer.buildlog import log_parser from codechecker_common import arg, logger, skiplist_handler, cmd_config from codechecker_common.util import load_json_or_empty LOG = logger.get_logger('system') _data_files_dir_path = analyzer_context.get_context().data_files_dir_path _severity_map_file = os.path.join(_data_files_dir_path, 'config', 'checker_severity_map.json') epilog_env_var = f""" CC_ANALYZERS_FROM_PATH Set to `yes` or `1` to enforce taking the analyzers from the `PATH` instead of the given binaries. CC_CLANGSA_PLUGIN_DIR If the CC_ANALYZERS_FROM_PATH environment variable is set you can configure the plugin directory of the Clang Static Analyzer by using this environment variable. CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file. Default: {_severity_map_file} """ epilog_issue_hashes = """
def main(args): """ Perform analysis on the given logfiles and store the results in a machine- readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) check_config_file(args) if not os.path.exists(args.logfile): LOG.error("The specified logfile '%s' does not exist!", args.logfile) sys.exit(1) args.output_path = os.path.abspath(args.output_path) if os.path.exists(args.output_path) and \ not os.path.isdir(args.output_path): LOG.error("The given output path is not a directory: " + args.output_path) sys.exit(1) if 'enable_all' in args: LOG.info("'--enable-all' was supplied for this analysis.") # We clear the output directory in the following cases. ctu_dir = os.path.join(args.output_path, 'ctu-dir') if 'ctu_phases' in args and args.ctu_phases[0] and \ os.path.isdir(ctu_dir): # Clear the CTU-dir if the user turned on the collection phase. LOG.debug("Previous CTU contents have been deleted.") shutil.rmtree(ctu_dir) if 'clean' in args and os.path.isdir(args.output_path): LOG.info( "Previous analysis results in '%s' have been removed, " "overwriting with current result", args.output_path) shutil.rmtree(args.output_path) if not os.path.exists(args.output_path): os.makedirs(args.output_path) LOG.debug("args: " + str(args)) LOG.debug("Output will be stored to: '" + args.output_path + "'") config_option_re = re.compile(r'^({}):.+=.+$'.format('|'.join( analyzer_types.supported_analyzers))) # Check the format of analyzer options. if 'analyzer_config' in args: for config in args.analyzer_config: if not re.match(config_option_re, config): LOG.error("Analyzer option in wrong format: %s", config) sys.exit(1) # Check the format of checker options. if 'checker_config' in args: for config in args.checker_config: if not re.match(config_option_re, config): LOG.error("Checker option in wrong format: %s", config) sys.exit(1) compile_commands = load_json_or_empty(args.logfile, default={}) # Process the skip list if present. skip_handler = __get_skip_handler(args, compile_commands) # Enable alpha uniqueing by default if ctu analysis is used. if 'none' in args.compile_uniqueing and 'ctu_phases' in args: args.compile_uniqueing = "alpha" compiler_info_file = None if 'compiler_info_file' in args: LOG.debug("Compiler info is read from: %s", args.compiler_info_file) if not os.path.exists(args.compiler_info_file): LOG.error("Compiler info file %s does not exist", args.compiler_info_file) sys.exit(1) compiler_info_file = args.compiler_info_file ctu_or_stats_enabled = False # Skip list is applied only in pre-analysis # if --ctu-collect was called explicitly. pre_analysis_skip_handler = None if 'ctu_phases' in args: ctu_collect = args.ctu_phases[0] ctu_analyze = args.ctu_phases[1] if ctu_collect and not ctu_analyze: pre_analysis_skip_handler = skip_handler if ctu_collect or ctu_analyze: ctu_or_stats_enabled = True # Skip list is applied only in pre-analysis # if --stats-collect was called explicitly. if 'stats_output' in args and args.stats_output: pre_analysis_skip_handler = skip_handler ctu_or_stats_enabled = True if 'stats_enabled' in args and args.stats_enabled: ctu_or_stats_enabled = True context = analyzer_context.get_context() analyzer_env = env.extend(context.path_env_extra, context.ld_lib_path_extra) # Number of all the compilation commands in the parsed log files, # logged by the logger. all_cmp_cmd_count = len(compile_commands) actions, skipped_cmp_cmd_count = log_parser.parse_unique_log( compile_commands, args.output_path, args.compile_uniqueing, compiler_info_file, args.keep_gcc_include_fixed, args.keep_gcc_intrin, skip_handler, pre_analysis_skip_handler, ctu_or_stats_enabled, analyzer_env) if not actions: LOG.info("No analysis is required.\nThere were no compilation " "commands in the provided compilation database or " "all of them were skipped.") sys.exit(0) uniqued_compilation_db_file = os.path.join(args.output_path, "unique_compile_commands.json") with open(uniqued_compilation_db_file, 'w', encoding="utf-8", errors="ignore") as f: json.dump(actions, f, cls=log_parser.CompileCommandEncoder) metadata = { 'version': 2, 'tools': [{ 'name': 'codechecker', 'action_num': len(actions), 'command': sys.argv, 'version': "{0} ({1})".format(context.package_git_tag, context.package_git_hash), 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {}, 'analyzers': {} }] } metadata_tool = metadata['tools'][0] if 'name' in args: metadata_tool['run_name'] = args.name # Update metadata dictionary with old values. metadata_file = os.path.join(args.output_path, 'metadata.json') metadata_prev = None if os.path.exists(metadata_file): metadata_prev = load_json_or_empty(metadata_file) metadata_tool['result_source_files'] = \ __get_result_source_files(metadata_prev) CompileCmdParseCount = \ collections.namedtuple('CompileCmdParseCount', 'total, analyze, skipped, removed_by_uniqueing') cmp_cmd_to_be_uniqued = all_cmp_cmd_count - skipped_cmp_cmd_count # Number of compile commands removed during uniqueing. removed_during_uniqueing = cmp_cmd_to_be_uniqued - len(actions) all_to_be_analyzed = cmp_cmd_to_be_uniqued - removed_during_uniqueing compile_cmd_count = CompileCmdParseCount( total=all_cmp_cmd_count, analyze=all_to_be_analyzed, skipped=skipped_cmp_cmd_count, removed_by_uniqueing=removed_during_uniqueing) LOG.debug_analyzer( "Total number of compile commands without " "skipping or uniqueing: %d", compile_cmd_count.total) LOG.debug_analyzer("Compile commands removed by uniqueing: %d", compile_cmd_count.removed_by_uniqueing) LOG.debug_analyzer("Compile commands skipped during log processing: %d", compile_cmd_count.skipped) LOG.debug_analyzer("Compile commands forwarded for analysis: %d", compile_cmd_count.analyze) analyzer.perform_analysis(args, skip_handler, context, actions, metadata_tool, compile_cmd_count) __update_skip_file(args) __cleanup_metadata(metadata_prev, metadata) LOG.debug("Analysis metadata write to '%s'", metadata_file) with open(metadata_file, 'w', encoding="utf-8", errors="ignore") as metafile: json.dump(metadata, metafile) # WARN: store command will search for this file!!!! compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json') try: source = os.path.abspath(args.logfile) target = os.path.abspath(compile_cmd_json) if source != target: shutil.copyfile(source, target) except shutil.Error: LOG.debug("Compilation database JSON file is the same.") except Exception: LOG.debug("Copying compilation database JSON file failed.") try: # pylint: disable=no-name-in-module from codechecker_analyzer import analyzer_statistics analyzer_statistics.collect(metadata, "analyze") except Exception: pass
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) try: cmd_config.check_config_file(args) except FileNotFoundError as fnerr: LOG.error(fnerr) sys.exit(1) export = args.export if 'export' in args else None if export == 'html' and 'output_path' not in args: LOG.error("Argument --export not allowed without argument --output " "when exporting to HTML.") sys.exit(1) if export == 'gerrit' and not gerrit.mandatory_env_var_is_set(): sys.exit(1) context = analyzer_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() src_comment_status_filter = args.review_status suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w', encoding='utf-8', errors='ignore') as _: # Just create the file. __make_handler = True LOG.info( "Will write source-code suppressions to " "suppress file: %s", args.suppress) else: LOG.warning( "Suppress file '%s' given, but it does not exist" " -- will not suppress anything.", args.suppress) else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args, src_comment_status_filter) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r', encoding='utf-8', errors='ignore') as skip_file: skip_handler = SkipListHandler(skip_file.read()) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None if export: if export not in EXPORT_TYPES: LOG.error(f"Unknown export format: {export}") return # The HTML part will be handled separately below. if export != 'html': try: res = parse_convert_reports(args.input, export, context.severity_map, trim_path_prefixes) if 'output_path' in args: output_path = os.path.abspath(args.output_path) if not os.path.exists(output_path): os.mkdir(output_path) reports_json = os.path.join(output_path, 'reports.json') with open(reports_json, mode='w', encoding='utf-8', errors="ignore") as output_f: output_f.write(json.dumps(res)) return print(json.dumps(res)) except Exception as ex: LOG.error(ex) sys.exit(1) def trim_path_prefixes_handler(source_file): """ Callback to util.trim_path_prefixes to prevent module dependency of plist_to_html """ return util.trim_path_prefixes(source_file, trim_path_prefixes) html_builder = None def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. This function will return a tuple. The first element will decide whether the report should be skipped or not and the second element will be a list of source code comments related to the actual report. """ files_dict = {k: v for k, v in enumerate(files)} report = Report({'check_name': checker_name}, diag['path'], files_dict, metadata=None) path_hash = get_report_path_hash(report) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True, [] skip, source_code_comments = skip_report(report_hash, source_file, report_line, checker_name, suppr_handler, src_comment_status_filter) if skip_handler: skip |= skip_handler.should_skip(source_file) if not skip: processed_path_hashes.add(path_hash) return skip, source_code_comments file_change = set() severity_stats = defaultdict(int) file_stats = defaultdict(int) report_count = 0 for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '%s'", input_path) if export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder, trim_path_prefixes_handler) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error( "Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [ os.path.join(input_path, file_name) for file_name in file_names ] file_report_map = defaultdict(list) plist_pltf = PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes, trim_path_prefixes, src_comment_status_filter) plist_pltf.print_steps = 'print_steps' in args for file_path in files: f_change = parse_with_plt_formatter(file_path, metadata_dict, plist_pltf, file_report_map) file_change = file_change.union(f_change) report_stats = plist_pltf.write(file_report_map) sev_stats = report_stats.get('severity') for severity in sev_stats: severity_stats[severity] += sev_stats[severity] f_stats = report_stats.get('files') for file_path in f_stats: file_stats[file_path] += f_stats[file_path] rep_stats = report_stats.get('reports') report_count += rep_stats.get("report_count", 0) # Create index.html and statistics.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path) html_builder.create_statistics_html(args.output_path) print('\nTo view statistics in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'statistics.html'))) print('\nTo view the results in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'index.html'))) else: print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] vals.sort(key=itemgetter(0)) keys = ['Filename', 'Report count'] table = twodim.to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] vals.sort(key=itemgetter(0)) keys = ['Severity', 'Report count'] table = twodim.to_str('table', keys, vals, 1, True) print(table) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning( "The following source file contents changed since the " "latest analysis:\n%s\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!", changed_files) os.chdir(original_cwd) if report_count != 0: sys.exit(2)
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('logfile', type=str, nargs='+', help="Path to the JSON compilation command database " "files which were created during the build. " "The analyzers will check only the files " "registered in these build databases.") parser.add_argument('-j', '--jobs', type=int, dest="jobs", required=False, default=1, help="Number of threads to use in analysis. More " "threads mean faster analysis at the cost of " "using more memory.") parser.add_argument('-i', '--ignore', '--skip', dest="skipfile", required=False, default=argparse.SUPPRESS, help="Path to the Skipfile dictating which project " "files should be omitted from analysis. Please " "consult the User guide on how a Skipfile " "should be laid out.") parser.add_argument('-o', '--output', dest="output_path", required=True, default=argparse.SUPPRESS, help="Store the analysis output in the given folder.") parser.add_argument('--compiler-info-file', dest="compiler_info_file", required=False, default=argparse.SUPPRESS, help="Read the compiler includes and target from the " "specified file rather than invoke the compiler " "executable.") parser.add_argument('-t', '--type', '--output-format', dest="output_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results should " "use.") parser.add_argument('-q', '--quiet', dest="quiet", action='store_true', default=argparse.SUPPRESS, required=False, help="Do not print the output or error of the " "analyzers to the standard output of " "CodeChecker.") parser.add_argument('-c', '--clean', dest="clean", required=False, action='store_true', default=argparse.SUPPRESS, help="Delete analysis reports stored in the output " "directory. (By default, CodeChecker would keep " "reports and overwrites only those files that " "were update by the current build command).") parser.add_argument('--compile-uniqueing', type=str, dest="compile_uniqueing", default="none", required=False, help="Specify the method the compilation " "actions in the compilation database are " "uniqued before analysis. " "CTU analysis works properly only if " "there is exactly one " "compilation action per source file. " "none(default in non CTU mode): " "no uniqueing is done. " "strict: no uniqueing is done, " "and an error is given if " "there is more than one compilation " "action for a source file. " "alpha(default in CTU mode): If there is more " "than one compilation action for a source " "file, only the one is kept that belongs to the " "alphabetically first " "compilation target. " "If none of the above given, " "this parameter should " "be a python regular expression." "If there is more than one compilation action " "for a source, " "only the one is kept which matches the " "given python regex. If more than one " "matches an error is given. " "The whole compilation " "action text is searched for match.") parser.add_argument('--report-hash', dest="report_hash", default=argparse.SUPPRESS, required=False, choices=['context-free'], help="EXPERIMENTAL feature. " "Specify the hash calculation method for " "reports. If this option is not set, the default " "calculation method for Clang Static Analyzer " "will be context sensitive and for Clang Tidy it " "will be context insensitive. If this option is " "set to 'context-free' bugs will be identified " "with the CodeChecker generated context free " "hash for every analyzers. USE WISELY AND AT " "YOUR OWN RISK!") parser.add_argument('-n', '--name', dest="name", required=False, default=argparse.SUPPRESS, help="Annotate the run analysis with a custom name in " "the created metadata file.") analyzer_opts = parser.add_argument_group("analyzer arguments") analyzer_opts.add_argument('--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Run analysis only with the analyzers " "specified. Currently supported analyzers " "are: " + ', '.join(analyzer_types. supported_analyzers) + ".") analyzer_opts.add_argument('--add-compiler-defaults', action='store_true', required=False, default=argparse.SUPPRESS, help="DEPRECATED. Always True. Retrieve " "compiler-specific configuration " "from the compilers themselves, and use " "them with Clang. This is used when the " "compiler on the system is special, e.g. " "when doing cross-compilation.") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help="Store standard output and standard error " "of successful analyzer invocations " "into the '<OUTPUT_DIR>/success' " "directory.") analyzer_opts.add_argument('--saargs', dest="clangsa_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang Static " "Analyzer.") analyzer_opts.add_argument('--tidyargs', dest="tidy_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for Clang-Tidy.") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help="A file in YAML format containing the " "configuration of clang-tidy checkers. " "The file can be dumped by " "'CodeChecker analyzers --dump-config " "clang-tidy' command.") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help="The amount of time (in seconds) that " "each analyzer can spend, individually, " "to analyze the project. If the analysis " "of a particular file takes longer than " "this time, the analyzer is killed and " "the analysis is considered as a failed " "one.") context = analyzer_context.get_context() if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( "cross translation unit analysis arguments", """ These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker analyze' is called.""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform Cross Translation Unit (CTU) " "analysis, both 'collect' and 'analyze' " "phases. In this mode, the extra files " "created by 'collect' are cleaned up " "after the analysis.") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the first, 'collect' phase of " "Cross-TU analysis. This phase generates " "extra files needed by CTU analysis, and " "puts them into '<OUTPUT_DIR>/ctu-dir'. " "NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the second, 'analyze' phase of " "Cross-TU analysis, using already " "available extra files in " "'<OUTPUT_DIR>/ctu-dir'. (These files " "will not be cleaned up in this mode.)") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help="If Cross-TU analysis is enabled and fails " "for some reason, try to re analyze the " "same translation unit without " "Cross-TU enabled.") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( "EXPERIMENTAL statistics analysis feature arguments", """ These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available).""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help="EXPERIMENTAL feature. " "Perform the first, 'collect' phase of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis, and " "puts them into " "'<STATS_OUTPUT>'." " NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help="EXPERIMENTAL feature. " "Use the previously generated statistics " "results for the analysis from the given " "'<STATS_DIR>'.") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help="EXPERIMENTAL feature. " "Perform both phases of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis and enables " "the statistical checkers. " "No need to enable them explicitly.") stat_opts.add_argument('--stats-min-sample-count', action='store', default="10", type=int, dest='stats_min_sample_count', help="EXPERIMENTAL feature. " "Minimum number of samples (function call" " occurrences) to be collected" " for a statistics to be relevant " "'<MIN-SAMPLE-COUNT>'.") stat_opts.add_argument('--stats-relevance-threshold', action='store', default="0.85", type=float, dest='stats_relevance_threshold', help="EXPERIMENTAL feature. " "The minimum ratio of calls of function " "f that must have a certain property " "property to consider it true for that " "function (calculated as calls " "with a property/all calls)." " CodeChecker will warn for" " calls of f do not have that property." "'<RELEVANCE_THRESHOLD>'.") checkers_opts = parser.add_argument_group( "checker configuration", """ Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or "checkers". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html.""") checkers_opts.add_argument('-e', '--enable', dest="enable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE USED in the analysis.") checkers_opts.add_argument('-d', '--disable', dest="disable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE PROHIBITED from use in the " "analysis.") checkers_opts.add_argument('--enable-all', dest="enable_all", action='store_true', required=False, default=argparse.SUPPRESS, help="Force the running analyzers to use " "almost every checker available. The " "checker groups 'alpha.', 'debug.' and " "'osx.' (on Linux) are NOT enabled " "automatically and must be EXPLICITLY " "specified. WARNING! Enabling all " "checkers might result in the analysis " "losing precision and stability, and " "could even result in a total failure of " "the analysis. USE WISELY AND AT YOUR " "OWN RISK!") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ Perform analysis on the given logfiles and store the results in a machine- readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if len(args.logfile) != 1: LOG.warning("Only one log file can be processed right now!") sys.exit(1) args.output_path = os.path.abspath(args.output_path) if os.path.exists(args.output_path) and \ not os.path.isdir(args.output_path): LOG.error("The given output path is not a directory: " + args.output_path) sys.exit(1) if 'enable_all' in args: LOG.info("'--enable-all' was supplied for this analysis.") # We clear the output directory in the following cases. ctu_dir = os.path.join(args.output_path, 'ctu-dir') if 'ctu_phases' in args and args.ctu_phases[0] and \ os.path.isdir(ctu_dir): # Clear the CTU-dir if the user turned on the collection phase. LOG.debug("Previous CTU contents have been deleted.") shutil.rmtree(ctu_dir) if 'clean' in args and os.path.isdir(args.output_path): LOG.info("Previous analysis results in '%s' have been removed, " "overwriting with current result", args.output_path) shutil.rmtree(args.output_path) if not os.path.exists(args.output_path): os.makedirs(args.output_path) LOG.debug("args: " + str(args)) LOG.debug("Output will be stored to: '" + args.output_path + "'") # Process the skip list if present. skip_handler = __get_skip_handler(args) # Enable alpha uniqueing by default if ctu analysis is used. if 'none' in args.compile_uniqueing and 'ctu_phases' in args: args.compile_uniqueing = "alpha" compiler_info_file = None if 'compiler_info_file' in args: LOG.debug("Compiler info is read from: %s", args.compiler_info_file) if not os.path.exists(args.compiler_info_file): LOG.error("Compiler info file %s does not exist", args.compiler_info_file) sys.exit(1) compiler_info_file = args.compiler_info_file report_dir = args.output_path # Parse the JSON CCDBs and retrieve the compile commands. actions = [] for log_file in args.logfile: if not os.path.exists(log_file): LOG.error("The specified logfile '%s' does not exist!", log_file) continue actions += log_parser.parse_unique_log( load_json_or_empty(log_file), report_dir, args.compile_uniqueing, skip_handler, compiler_info_file ) if not actions: LOG.info("None of the specified build log files contained " "valid compilation commands. No analysis needed...") sys.exit(1) uniqued_compilation_db_file = os.path.join( args.output_path, "unique_compile_commands.json") with open(uniqued_compilation_db_file, 'w') as f: json.dump(actions, f, cls=log_parser.CompileCommandEncoder) context = analyzer_context.get_context() metadata = {'action_num': len(actions), 'command': sys.argv, 'versions': { 'codechecker': "{0} ({1})".format( context.package_git_tag, context.package_git_hash)}, 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {}} if 'name' in args: metadata['name'] = args.name # Update metadata dictionary with old values. metadata_file = os.path.join(args.output_path, 'metadata.json') if os.path.exists(metadata_file): metadata_prev = load_json_or_empty(metadata_file) metadata['result_source_files'] = \ metadata_prev['result_source_files'] analyzer.perform_analysis(args, skip_handler, context, actions, metadata) __update_skip_file(args) LOG.debug("Analysis metadata write to '%s'", metadata_file) with open(metadata_file, 'w') as metafile: json.dump(metadata, metafile) # WARN: store command will search for this file!!!! compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json') try: source = os.path.abspath(args.logfile[0]) target = os.path.abspath(compile_cmd_json) if source != target: shutil.copyfile(source, target) except shutil.Error: LOG.debug("Compilation database JSON file is the same.") except Exception: LOG.debug("Copying compilation database JSON file failed.") try: from codechecker_analyzer import analyzer_statistics analyzer_statistics.collect(metadata, "analyze") except Exception: pass
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file.") else: LOG.warning("Suppress file '%s' given, but it does not exist" " -- will not suppress anything.", args.suppress) else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r') as skip_file: skip_handler = SkipListHandler(skip_file.read()) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None def trim_path_prefixes_handler(source_file): """ Callback to util.trim_path_prefixes to prevent module dependency of plist_to_html """ return util.trim_path_prefixes(source_file, trim_path_prefixes) html_builder = None def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. """ report = Report(None, diag['path'], files) path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True skip = plist_parser.skip_report(report_hash, source_file, report_line, checker_name, suppr_handler) if skip_handler: skip |= skip_handler.should_skip(source_file) if not skip: processed_path_hashes.add(path_hash) return skip for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '%s'", input_path) export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder, trim_path_prefixes_handler) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error("Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [os.path.join(input_path, file_name) for file_name in file_names] file_change = set() file_report_map = defaultdict(list) rh = plist_parser.PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes, trim_path_prefixes) rh.print_steps = 'print_steps' in args for file_path in files: f_change = parse(file_path, metadata_dict, rh, file_report_map) file_change = file_change.union(f_change) report_stats = rh.write(file_report_map) severity_stats = report_stats.get('severity') file_stats = report_stats.get('files') reports_stats = report_stats.get('reports') print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) report_count = reports_stats.get("report_count", 0) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning("The following source file contents changed since the " "latest analysis:\n%s\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!", changed_files) os.chdir(original_cwd) # Create index.html and statistics.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path) html_builder.create_statistics_html(args.output_path) print('\nTo view statistics in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'statistics.html'))) print('\nTo view the results in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'index.html')))
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('-o', '--output', type=str, dest="output_dir", required=False, default=argparse.SUPPRESS, help="Store the analysis output in the given folder. " "If it is not given then the results go into a " "temporary directory which will be removed after " "the analysis.") parser.add_argument('-t', '--type', '--output-format', dest="output_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results " "should use.") parser.add_argument('-q', '--quiet', dest="quiet", action='store_true', required=False, default=argparse.SUPPRESS, help="If specified, the build tool's and the " "analyzers' output will not be printed to the " "standard output.") parser.add_argument('-f', '--force', dest="force", default=argparse.SUPPRESS, action='store_true', required=False, help="DEPRECATED. Delete analysis results stored in " "the database for the current analysis run's " "name and store only the results reported in the " "'input' files. (By default, CodeChecker would " "keep reports that were coming from files not " "affected by the analysis, and only " "incrementally update defect reports for source " "files that were analysed.)") log_args = parser.add_argument_group( "log arguments", """ Specify how the build information database should be obtained. You need to specify either an already existing log file, or a build command which will be used to generate a log file on the fly.""") log_args = log_args.add_mutually_exclusive_group(required=True) log_args.add_argument('-b', '--build', type=str, dest="command", default=argparse.SUPPRESS, help="Execute and record a build command. Build " "commands can be simple calls to 'g++' or " "'clang++' or 'make', but a more complex " "command, or the call of a custom script file " "is also supported.") log_args.add_argument('-l', '--logfile', type=str, dest="logfile", default=argparse.SUPPRESS, help="Use an already existing JSON compilation " "command database file specified at this path.") analyzer_opts = parser.add_argument_group("analyzer arguments") analyzer_opts.add_argument('-j', '--jobs', type=int, dest="jobs", required=False, default=1, help="Number of threads to use in analysis. " "More threads mean faster analysis at " "the cost of using more memory.") analyzer_opts.add_argument('-c', '--clean', dest="clean", required=False, action='store_true', default=argparse.SUPPRESS, help="Delete analysis reports stored in the " "output directory. (By default, " "CodeChecker would keep reports and " "overwrites only those files that were " "update by the current build command).") parser.add_argument('--compile-uniqueing', type=str, dest="compile_uniqueing", default="none", required=False, help="Specify the method the compilation " "actions in the compilation database are " "uniqued before analysis. " "CTU analysis works properly only if " "there is exactly one " "compilation action per source file. " "none(default in non CTU mode): " "no uniqueing is done. " "strict: no uniqueing is done, " "and an error is given if " "there is more than one compilation " "action for a source file. " "alpha(default in CTU mode): If there is more " "than one compilation action for a source " "file, only the one is kept that belongs to the " "alphabetically first " "compilation target. " "If none of the above given, " "this parameter should " "be a python regular expression." "If there is more than one compilation action " "for a source, " "only the one is kept which matches the " "given python regex. If more than one " "matches an error is given. " "The whole compilation " "action text is searched for match.") analyzer_opts.add_argument('--report-hash', dest="report_hash", default=argparse.SUPPRESS, required=False, choices=['context-free'], help="EXPERIMENTAL feature. " "Specify the hash calculation method for " "reports. If this option is not set, the " "default calculation method for Clang " "Static Analyzer will be context " "sensitive and for Clang Tidy it will be " "context insensitive. If this option is " "set to 'context-free' bugs will be " "identified with the CodeChecker " "generated context free hash for every " "analyzers. USE WISELY AND AT YOUR OWN " "RISK!") analyzer_opts.add_argument('-i', '--ignore', '--skip', dest="skipfile", required=False, default=argparse.SUPPRESS, help="Path to the Skipfile dictating which " "project files should be omitted from " "analysis. Please consult the User guide " "on how a Skipfile should be laid out.") analyzer_opts.add_argument( '--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Run analysis only with the analyzers " "specified. Currently supported analyzers " "are: " + ', '.join(analyzer_types.supported_analyzers) + ".") analyzer_opts.add_argument('--add-compiler-defaults', action='store_true', required=False, default=argparse.SUPPRESS, help="DEPRECATED. Always True. Retrieve " " compiler-specific configuration " "from the analyzers themselves, and use " "them with Clang. This is used when the " "compiler on the system is special, e.g. " "when doing cross-compilation.") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help="Store standard output and standard error " "of successful analyzer invocations " "into the '<OUTPUT_DIR>/success' " "directory.") # TODO: One day, get rid of these. See Issue #36, #427. analyzer_opts.add_argument('--saargs', dest="clangsa_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang Static " "analyzer.") analyzer_opts.add_argument('--tidyargs', dest="tidy_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang-Tidy " "analyzer.") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help="A file in YAML format containing the " "configuration of clang-tidy checkers. " "The file can be dumped by " "'CodeChecker analyzers --dump-config " "clang-tidy' command.") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help="The amount of time (in seconds) that " "each analyzer can spend, individually, " "to analyze the project. If the analysis " "of a particular file takes longer than " "this time, the analyzer is killed and " "the analysis is considered as a failed " "one.") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help="Enable the z3 solver backend. This " "allows reasoning over more complex " "queries, but performance is worse " "than the default range-based " "constraint solver.") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument( '--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help="Switch on/off the Z3 SMT Solver " "backend to " "reduce false positives. The results " "of the ranged based constraint " "solver in the Clang Static Analyzer " "will be cross checked with the Z3 " "SMT solver. This should not cause " "that much of a slowdown compared to " "using the Z3 solver only.") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( "cross translation unit analysis arguments", """ These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker check' is called.""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform Cross Translation Unit (CTU) " "analysis, both 'collect' and 'analyze' " "phases. In this mode, the extra files " "created by 'collect' are cleaned up " "after the analysis.") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the first, 'collect' phase of " "Cross-TU analysis. This phase generates " "extra files needed by CTU analysis, and " "puts them into '<OUTPUT_DIR>/ctu-dir'. " "NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the second, 'analyze' phase of " "Cross-TU analysis, using already " "available extra files in " "'<OUTPUT_DIR>/ctu-dir'. (These files " "will not be cleaned up in this mode.)") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( "EXPERIMENTAL statistics analysis feature arguments", """ These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available).""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help="EXPERIMENTAL feature. " "Perform the first, 'collect' phase of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis, and " "puts them into " "'<STATS_OUTPUT>'." " NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help="EXPERIMENTAL feature. " "Use the previously generated statistics " "results for the analysis from the given " "'<STATS_DIR>'.") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help="EXPERIMENTAL feature. " "Perform both phases of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis and enables " "the statistical checkers. " "No need to enable them explicitly.") stat_opts.add_argument('--stats-min-sample-count', action='store', default="10", type=int, dest='stats_min_sample_count', help="EXPERIMENTAL feature. " "Minimum number of samples (function call" " occurrences) to be collected" " for a statistics to be relevant.") stat_opts.add_argument('--stats-relevance-threshold', action='store', default="0.85", type=float, dest='stats_relevance_threshold', help="EXPERIMENTAL feature. " "The minimum ratio of calls of function " "f that must have a certain property " "property to consider it true for that " "function (calculated as calls " "with a property/all calls)." " CodeChecker will warn for" " calls of f do not have that property.") checkers_opts = parser.add_argument_group( "checker configuration", """ Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or "checkers". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html.""") checkers_opts.add_argument('-e', '--enable', dest="enable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=arg.OrderedCheckersAction, help="Set a checker (or checker group) " "to BE USED in the analysis.") checkers_opts.add_argument('-d', '--disable', dest="disable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=arg.OrderedCheckersAction, help="Set a checker (or checker group) " "to BE PROHIBITED from use in the " "analysis.") checkers_opts.add_argument('--enable-all', dest="enable_all", action='store_true', required=False, default=argparse.SUPPRESS, help="Force the running analyzers to use " "almost every checker available. The " "checker groups 'alpha.', 'debug.' and " "'osx.' (on Linux) are NOT enabled " "automatically and must be EXPLICITLY " "specified. WARNING! Enabling all " "checkers might result in the analysis " "losing precision and stability, and " "could even result in a total failure of " "the analysis. USE WISELY AND AT YOUR " "OWN RISK!") output_opts = parser.add_argument_group("output arguments") output_opts.add_argument('--print-steps', dest="print_steps", action="store_true", required=False, default=argparse.SUPPRESS, help="Print the steps the analyzers took in " "finding the reported defect.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ package_root = analyzer_context.get_context().package_root return { 'prog': 'CodeChecker check', 'formatter_class': arg.RawDescriptionDefaultHelpFormatter, # Description is shown when the command's help is queried directly 'description': """ Run analysis for a project with printing results immediately on the standard output. Check only needs a build command or an already existing logfile and performs every step of doing the analysis in batch.""", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': """ Environment variables ------------------------------------------------ CC_ANALYZERS_FROM_PATH Set to `yes` or `1` to enforce taking the analyzers from the `PATH` instead of the given binaries. CC_CLANGSA_PLUGIN_DIR If the CC_ANALYZERS_FROM_PATH environment variable is set you can configure the plugin directory of the Clang Static Analyzer by using this environment variable. CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file. Default: {} Issue hashes ------------------------------------------------ - By default the issue hash calculation method for 'Clang Static Analyzer' is context sensitive. It means the hash will be generated based on the following information: * signature of the enclosing function declaration, type declaration or namespace. * content of the line where the bug is. * unique name of the checker. * position (column) within the line. - By default the issue hash calculation method for 'Clang Tidy' is context insensitive. It means the hash will be generated based on the following information: * 'file name' from the main diag section. * 'checker name'. * 'checker message'. * 'line content' from the source file if can be read up. * 'column numbers' from the main diag section. * 'range column numbers' only from the control diag sections if column number in the range is not the same as the previous control diag section number in the bug path. If there are no control sections event section column numbers are used. - context-free: there was a bug and for Clang Tidy the default hash was generated and not the context free hash (kept for backward compatibility). Use 'context-free-v2' instead of this. - context-free-v2: * 'file name' from the main diag section. * 'checker message'. * 'line content' from the source file if can be read up. All the whitespaces from the source content are removed. * 'column numbers' from the main diag sections location. OUR RECOMMENDATION: we recommend you to use 'context-free-v2' hash because the hash will not be changed so easily for example on code indentation or when a checker is renamed. Exit status ------------------------------------------------ 0 - Successful analysis and no new reports 1 - CodeChecker error 2 - At least one report emitted by an analyzer and there is no analyzer failure 3 - Analysis of at least one translation unit failed 128+signum - Terminating on a fatal signal whose number is signum If you wish to reuse the logfile resulting from executing the build, see 'CodeChecker log'. To keep analysis results for later, see and use 'CodeChecker analyze'. To print human-readable output from previously saved analysis results, see 'CodeChecker parse'. 'CodeChecker check' exposes a wrapper calling these three commands in succession. Please make sure your build command actually builds the files -- it is advised to execute builds on empty trees, aka. after a 'make clean', as CodeChecker only analyzes files that had been used by the build system. """.format(os.path.join(package_root, 'config', 'checker_severity_map.json')), # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "Perform analysis on a project and print results to standard " "output." }
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() working, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*']) elif args.dump_config == 'clangsa': # TODO: Not supported by ClangSA yet! LOG.warning("'--dump-config clangsa' is not supported yet.") return if args.output_format not in ['csv', 'json']: if 'details' not in args: header = ['Name'] else: header = ['Name', 'Path', 'Version'] else: if 'details' not in args: header = ['name'] else: header = ['name', 'path', 'version_string'] rows = [] for analyzer in working: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output([binary, '--version']) except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([analyzer, context.analyzer_binaries.get(analyzer), err_reason]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def __get_detailed_checker_info(args: argparse.Namespace, cl: CheckerLabels) -> Dict[str, list]: """ Returns a dictionary which maps analyzer names to the collection of their supported checkers. Checker information is described with tuples of this information: (status, checker name, analyzer name, description, labels). """ context = analyzer_context.get_context() working_analyzers, _ = analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) checker_info = defaultdict(list) for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) profile_checkers = [] if 'profile' in args: available_profiles = cl.get_description('profile') if args.profile not in available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") sys.exit(1) profile_checkers.append((f'profile:{args.profile}', True)) if 'label' in args: profile_checkers.extend((label, True) for label in args.label) if 'severity' in args: profile_checkers.append((f'severity:{args.severity}', True)) if 'guideline' in args: profile_checkers.append((__guideline_to_label(args, cl), True)) config_handler.initialize_checkers(context, checkers, profile_checkers) for checker, (state, description) in config_handler.checks().items(): # severity = cl.severity(checker) # guideline = guideline_rules_for_checker(checker, context) # checker_info[analyzer].append( # (state, checker, analyzer, severity, guideline, description)) checker_info[analyzer].append( (state, checker, analyzer, description, sorted(cl.labels_of_checker(checker, analyzer)))) if 'show_warnings' in args: for warning in get_warnings(analyzer_environment): warning = 'clang-diagnostic-' + warning # guideline = guideline_rules_for_checker(warning, context) # checker_info[ClangTidy.ANALYZER_NAME].append( # (CheckerState.default, warning, ClangTidy.ANALYZER_NAME, # 'MEDIUM', guideline, '')) checker_info[ClangTidy.ANALYZER_NAME].append( (CheckerState.default, warning, ClangTidy.ANALYZER_NAME, '', sorted(cl.labels_of_checker(warning, ClangTidy.ANALYZER_NAME)))) return checker_info
from codechecker_common import arg, logger, plist_parser, util, cmd_config from codechecker_common.output import json as out_json, twodim, \ codeclimate, gerrit from codechecker_common.skiplist_handler import SkipListHandler from codechecker_common.source_code_comment_handler import \ REVIEW_STATUS_VALUES, SourceCodeCommentHandler, SpellException from codechecker_common.report import Report from codechecker_report_hash.hash import get_report_path_hash LOG = logger.get_logger('system') EXPORT_TYPES = ['html', 'json', 'codeclimate', 'gerrit'] _package_root = analyzer_context.get_context().package_root _severity_map_file = os.path.join(_package_root, 'config', 'checker_severity_map.json') epilog_env_var = f""" CC_CHANGED_FILES Path of changed files json from Gerrit. Use it when generating gerrit output. CC_REPO_DIR Root directory of the sources, i.e. the directory where the repository was cloned. Use it when generating gerrit output. CC_REPORT_URL URL where the report can be found. Use it when generating gerrit output. CC_SEVERITY_MAP_FILE Path of the checker-severity mapping config file. Default: {_severity_map_file} """
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = analyzer_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file: %s", args.suppress) else: LOG.warning("Suppress file '%s' given, but it does not exist" " -- will not suppress anything.", args.suppress) else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r') as skip_file: skip_handler = SkipListHandler(skip_file.read()) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None def trim_path_prefixes_handler(source_file): """ Callback to util.trim_path_prefixes to prevent module dependency of plist_to_html """ return util.trim_path_prefixes(source_file, trim_path_prefixes) html_builder = None def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. """ report = Report(None, diag['path'], files) path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True skip = skip_report(report_hash, source_file, report_line, checker_name, suppr_handler) if skip_handler: skip |= skip_handler.should_skip(source_file) if not skip: processed_path_hashes.add(path_hash) return skip file_change = set() severity_stats = defaultdict(int) file_stats = defaultdict(int) report_count = 0 for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '%s'", input_path) export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder, trim_path_prefixes_handler) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error("Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [os.path.join(input_path, file_name) for file_name in file_names] file_report_map = defaultdict(list) rh = PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes, trim_path_prefixes) rh.print_steps = 'print_steps' in args for file_path in files: f_change = parse(file_path, metadata_dict, rh, file_report_map) file_change = file_change.union(f_change) report_stats = rh.write(file_report_map) sev_stats = report_stats.get('severity') for severity in sev_stats: severity_stats[severity] += sev_stats[severity] f_stats = report_stats.get('files') for file_path in f_stats: file_stats[file_path] += f_stats[file_path] rep_stats = report_stats.get('reports') report_count += rep_stats.get("report_count", 0) print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning("The following source file contents changed since the " "latest analysis:\n%s\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!", changed_files) os.chdir(original_cwd) # Create index.html and statistics.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path) html_builder.create_statistics_html(args.output_path) print('\nTo view statistics in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'statistics.html'))) print('\nTo view the results in a browser run:\n> firefox {0}'.format( os.path.join(args.output_path, 'index.html')))
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('logfile', type=str, help="Path to the JSON compilation command database " "files which were created during the build. " "The analyzers will check only the files " "registered in these build databases.") parser.add_argument('-j', '--jobs', type=int, dest="jobs", required=False, default=1, help="Number of threads to use in analysis. More " "threads mean faster analysis at the cost of " "using more memory.") skip_mode = parser.add_mutually_exclusive_group() skip_mode.add_argument('-i', '--ignore', '--skip', dest="skipfile", required=False, default=argparse.SUPPRESS, help="Path to the Skipfile dictating which project " "files should be omitted from analysis. " "Please consult the User guide on how a " "Skipfile should be laid out.") skip_mode.add_argument('--file', nargs='+', dest="files", metavar='FILE', required=False, default=argparse.SUPPRESS, help="Analyze only the given file(s) not the whole " "compilation database. Absolute directory " "paths should start with '/', relative " "directory paths should start with '*' and " "it can contain path glob pattern. " "Example: '/path/to/main.cpp', 'lib/*.cpp', " "*/test*'.") parser.add_argument('-o', '--output', dest="output_path", required=True, default=argparse.SUPPRESS, help="Store the analysis output in the given folder.") parser.add_argument('--compiler-info-file', dest="compiler_info_file", required=False, default=argparse.SUPPRESS, help="Read the compiler includes and target from the " "specified file rather than invoke the compiler " "executable.") parser.add_argument('--keep-gcc-include-fixed', dest="keep_gcc_include_fixed", required=False, action='store_true', default=False, help="There are some implicit include paths which are " "only used by GCC (include-fixed). This flag " "determines whether these should be kept among " "the implicit include paths.") parser.add_argument('--keep-gcc-intrin', dest="keep_gcc_intrin", required=False, action='store_true', default=False, help="There are some implicit include paths which " "contain GCC-specific header files (those " "which end with intrin.h). This flag determines " "whether these should be kept among the implicit " "include paths. Use this flag if Clang analysis " "fails with error message related to __builtin " "symbols.") parser.add_argument('-t', '--type', '--output-format', dest="output_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results should " "use.") parser.add_argument('-q', '--quiet', dest="quiet", action='store_true', default=argparse.SUPPRESS, required=False, help="Do not print the output or error of the " "analyzers to the standard output of " "CodeChecker.") parser.add_argument('-c', '--clean', dest="clean", required=False, action='store_true', default=argparse.SUPPRESS, help="Delete analysis reports stored in the output " "directory. (By default, CodeChecker would keep " "reports and overwrites only those files that " "were update by the current build command).") parser.add_argument('--compile-uniqueing', type=str, dest="compile_uniqueing", default="none", required=False, help="Specify the method the compilation " "actions in the compilation database are " "uniqued before analysis. " "CTU analysis works properly only if " "there is exactly one " "compilation action per source file. " "none(default in non CTU mode): " "no uniqueing is done. " "strict: no uniqueing is done, " "and an error is given if " "there is more than one compilation " "action for a source file. " "alpha(default in CTU mode): If there is more " "than one compilation action for a source " "file, only the one is kept that belongs to the " "alphabetically first " "compilation target. " "If none of the above given, " "this parameter should " "be a python regular expression." "If there is more than one compilation action " "for a source, " "only the one is kept which matches the " "given python regex. If more than one " "matches an error is given. " "The whole compilation " "action text is searched for match.") parser.add_argument('--report-hash', dest="report_hash", default=argparse.SUPPRESS, required=False, choices=['context-free', 'context-free-v2'], help="R|Specify the hash calculation method for " "reports. By default the calculation method for " "Clang Static Analyzer is context sensitive and " "for Clang Tidy it is context insensitive.\n" "You can use the following calculation methods:\n" "- context-free: there was a bug and for Clang " "Tidy not the context free hash was generated " "(kept for backward compatibility).\n" "- context-free-v2: context free hash is used " "for ClangSA and Clang Tidy.\n" "See the 'issue hashes' section of the help " "message of this command below for more " "information.\n" "USE WISELY AND AT YOUR OWN RISK!") parser.add_argument('-n', '--name', dest="name", required=False, default=argparse.SUPPRESS, help="Annotate the run analysis with a custom name in " "the created metadata file.") analyzer_opts = parser.add_argument_group("analyzer arguments") analyzer_opts.add_argument( '--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Run analysis only with the analyzers " "specified. Currently supported analyzers " "are: " + ', '.join(analyzer_types.supported_analyzers) + ".") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help="Store standard output and standard error " "of successful analyzer invocations " "into the '<OUTPUT_DIR>/success' " "directory.") analyzer_opts.add_argument('--config', dest='config_file', required=False, help="Allow the configuration from an explicit " "JSON based configuration file. The " "value of the 'analyzer' key in the " "config file will be emplaced as command " "line arguments. The format of " "configuration file is: " "{" " \"analyzer\": [" " \"--enable=core.DivideZero\"," " \"--enable=core.CallAndMessage\"," " \"--clean\"" " ]" "}.") analyzer_opts.add_argument('--saargs', dest="clangsa_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang Static " "Analyzer.") analyzer_opts.add_argument('--tidyargs', dest="tidy_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for Clang-Tidy.") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help="A file in YAML format containing the " "configuration of clang-tidy checkers. " "The file can be dumped by " "'CodeChecker analyzers --dump-config " "clang-tidy' command.") analyzer_opts.add_argument('--analyzer-config', dest='analyzer_config', nargs='*', default=["clang-tidy:HeaderFilterRegex=.*"], help="Analyzer configuration options in the " "following format: analyzer:key=value. " "The collection of the options can be " "printed with " "'CodeChecker analyzers " "--analyzer-config'. To disable the " "default behaviour of this option you can " "use the " "'clang-tidy:take-config-from-directory=" "true' option.") analyzer_opts.add_argument('--checker-config', dest='checker_config', nargs='*', default=argparse.SUPPRESS, help="Checker configuration options in the " "following format: analyzer:key=value. " "The collection of the options can be " "printed with " "'CodeChecker checkers --checker-config'.") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help="The amount of time (in seconds) that " "each analyzer can spend, individually, " "to analyze the project. If the analysis " "of a particular file takes longer than " "this time, the analyzer is killed and " "the analysis is considered as a failed " "one.") context = analyzer_context.get_context() clang_has_z3 = analyzer_types.is_z3_capable(context) if clang_has_z3: analyzer_opts.add_argument('--z3', dest='enable_z3', choices=['on', 'off'], default='off', help="Enable the z3 solver backend. This " "allows reasoning over more complex " "queries, but performance is worse " "than the default range-based " "constraint solver.") clang_has_z3_refutation = analyzer_types.is_z3_refutation_capable(context) if clang_has_z3_refutation: analyzer_opts.add_argument( '--z3-refutation', dest='enable_z3_refutation', choices=['on', 'off'], default='on' if clang_has_z3_refutation else 'off', help="Switch on/off the Z3 SMT Solver " "backend to " "reduce false positives. The results " "of the ranged based constraint " "solver in the Clang Static Analyzer " "will be cross checked with the Z3 " "SMT solver. This should not cause " "that much of a slowdown compared to " "using the Z3 solver only.") if analyzer_types.is_ctu_capable(context): ctu_opts = parser.add_argument_group( "cross translation unit analysis arguments", """ These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker analyze' is called.""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform Cross Translation Unit (CTU) " "analysis, both 'collect' and 'analyze' " "phases. In this mode, the extra files " "created by 'collect' are cleaned up " "after the analysis.") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the first, 'collect' phase of " "Cross-TU analysis. This phase generates " "extra files needed by CTU analysis, and " "puts them into '<OUTPUT_DIR>/ctu-dir'. " "NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the second, 'analyze' phase of " "Cross-TU analysis, using already " "available extra files in " "'<OUTPUT_DIR>/ctu-dir'. (These files " "will not be cleaned up in this mode.)") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help="If Cross-TU analysis is enabled and fails " "for some reason, try to re analyze the " "same translation unit without " "Cross-TU enabled.") if analyzer_types.is_statistics_capable(context): stat_opts = parser.add_argument_group( "Statistics analysis feature arguments", """ These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available).""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help="Perform the first, 'collect' phase of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis, and " "puts them into " "'<STATS_OUTPUT>'." " NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help="Use the previously generated statistics " "results for the analysis from the given " "'<STATS_DIR>'.") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help="Perform both phases of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis and enables " "the statistical checkers. " "No need to enable them explicitly.") stat_opts.add_argument('--stats-min-sample-count', action='store', default="10", type=int, dest='stats_min_sample_count', help="Minimum number of samples (function call" " occurrences) to be collected" " for a statistics to be relevant " "'<MIN-SAMPLE-COUNT>'.") stat_opts.add_argument('--stats-relevance-threshold', action='store', default="0.85", type=float, dest='stats_relevance_threshold', help="The minimum ratio of calls of function " "f that must have a certain property " "property to consider it true for that " "function (calculated as calls " "with a property/all calls)." " CodeChecker will warn for" " calls of f do not have that property." "'<RELEVANCE_THRESHOLD>'.") checkers_opts = parser.add_argument_group( "checker configuration", """ Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or "checkers". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings and errors ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html. Sometimes GCC is more permissive than Clang, so it is possible that a specific construction doesn't compile with Clang but compiles with GCC. These compiler errors are also collected as CodeChecker reports as 'clang-diagnostic-error'. Note that compiler errors and warnings are captured by CodeChecker only if it was emitted by clang-tidy.""") checkers_opts.add_argument('-e', '--enable', dest="enable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE USED in the analysis.") checkers_opts.add_argument('-d', '--disable', dest="disable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE PROHIBITED from use in the " "analysis.") checkers_opts.add_argument('--enable-all', dest="enable_all", action='store_true', required=False, default=argparse.SUPPRESS, help="Force the running analyzers to use " "almost every checker available. The " "checker groups 'alpha.', 'debug.' and " "'osx.' (on Linux) are NOT enabled " "automatically and must be EXPLICITLY " "specified. WARNING! Enabling all " "checkers might result in the analysis " "losing precision and stability, and " "could even result in a total failure of " "the analysis. USE WISELY AND AT YOUR " "OWN RISK!") logger.add_verbose_arguments(parser) parser.set_defaults(func=main, func_process_config_file=process_config_file)
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ logger.setup_logger(args.verbose if 'verbose' in args else None) # If nothing is set, list checkers for all supported analyzers. analyzers = args.analyzers \ if 'analyzers' in args \ else analyzer_types.supported_analyzers context = analyzer_context.get_context() working, errored = analyzer_types.check_supported_analyzers(analyzers, context) analyzer_environment = get_check_env(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers(args, context, working) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Profile name'] else: header = ['profile_name'] else: if args.output_format not in ['csv', 'json']: header = ['Profile name', 'Description'] else: header = ['profile_name', 'description'] rows = [] for (profile, description) in context.available_profiles.items(): if 'details' not in args: rows.append([profile]) else: rows.append([profile, description]) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # Use good looking different headers based on format. if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Name'] else: header = ['name'] else: if args.output_format not in ['csv', 'json']: header = ['', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['enabled', 'name', 'analyzer', 'severity', 'description'] rows = [] for analyzer in working: config_handler = analyzer_config_map.get(analyzer) analyzer_class = \ analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers(config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get( analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") return profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): enabled, description = value if not enabled and 'profile' in args: continue if enabled and 'only_disabled' in args: continue elif not enabled and 'only_enabled' in args: continue if args.output_format != 'json': enabled = '+' if enabled else '-' if 'details' not in args: rows.append([checker_name]) else: severity = context.severity_map.get(checker_name) rows.append([enabled, checker_name, analyzer, severity, description]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error("Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' logger.setup_logger(args.verbose if 'verbose' in args else None, stream) context = analyzer_context.get_context() working, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*']) elif args.dump_config == 'clangsa': ret = subprocess.call([binary, '-cc1', '-analyzer-checker-option-help', '-analyzer-checker-option-help-alpha'], stderr=subprocess.PIPE) if ret: # This flag is supported from Clang 9. LOG.warning("'--dump-config clangsa' is not supported yet. " "Please make sure that you are using Clang 9 or " "newer.") return if args.output_format not in ['csv', 'json']: if 'details' not in args: header = ['Name'] else: header = ['Name', 'Path', 'Version'] else: if 'details' not in args: header = ['name'] else: header = ['name', 'path', 'version_string'] rows = [] for analyzer in working: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output([binary, '--version']) except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([analyzer, context.analyzer_binaries.get(analyzer), err_reason]) if rows: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def main(args): """ Perform analysis on the given logfiles and store the results in a machine- readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if len(args.logfile) != 1: LOG.warning("Only one log file can be processed right now!") sys.exit(1) args.output_path = os.path.abspath(args.output_path) if os.path.exists(args.output_path) and \ not os.path.isdir(args.output_path): LOG.error("The given output path is not a directory: " + args.output_path) sys.exit(1) if 'enable_all' in args: LOG.info("'--enable-all' was supplied for this analysis.") # We clear the output directory in the following cases. ctu_dir = os.path.join(args.output_path, 'ctu-dir') if 'ctu_phases' in args and args.ctu_phases[0] and \ os.path.isdir(ctu_dir): # Clear the CTU-dir if the user turned on the collection phase. LOG.debug("Previous CTU contents have been deleted.") shutil.rmtree(ctu_dir) if 'clean' in args and os.path.isdir(args.output_path): LOG.info("Previous analysis results in '%s' have been removed, " "overwriting with current result", args.output_path) shutil.rmtree(args.output_path) if not os.path.exists(args.output_path): os.makedirs(args.output_path) LOG.debug("args: " + str(args)) LOG.debug("Output will be stored to: '" + args.output_path + "'") # Process the skip list if present. skip_handler = __get_skip_handler(args) # Enable alpha uniqueing by default if ctu analysis is used. if 'none' in args.compile_uniqueing and 'ctu_phases' in args: args.compile_uniqueing = "alpha" compiler_info_file = None if 'compiler_info_file' in args: LOG.debug("Compiler info is read from: %s", args.compiler_info_file) if not os.path.exists(args.compiler_info_file): LOG.error("Compiler info file %s does not exist", args.compiler_info_file) sys.exit(1) compiler_info_file = args.compiler_info_file report_dir = args.output_path # Skip list is applied only in pre-analysis # if --ctu-collect or --stats-collect was called explicitly. pre_analysis_skip_handler = None if 'ctu_phases' in args: ctu_collect = args.ctu_phases[0] ctu_analyze = args.ctu_phases[1] if ((ctu_collect and not ctu_analyze) or ("stats_output" in args and args.stats_output)): pre_analysis_skip_handler = skip_handler # Parse the JSON CCDBs and retrieve the compile commands. actions = [] for log_file in args.logfile: if not os.path.exists(log_file): LOG.error("The specified logfile '%s' does not exist!", log_file) continue actions += log_parser.parse_unique_log( load_json_or_empty(log_file), report_dir, args.compile_uniqueing, compiler_info_file, args.keep_gcc_include_fixed, skip_handler, pre_analysis_skip_handler) if not actions: LOG.info("No analysis is required.\nThere were no compilation " "commands in the provided compilation database or " "all of them were skipped.") sys.exit(0) uniqued_compilation_db_file = os.path.join( args.output_path, "unique_compile_commands.json") with open(uniqued_compilation_db_file, 'w') as f: json.dump(actions, f, cls=log_parser.CompileCommandEncoder) context = analyzer_context.get_context() metadata = {'action_num': len(actions), 'command': sys.argv, 'versions': { 'codechecker': "{0} ({1})".format( context.package_git_tag, context.package_git_hash)}, 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {}} if 'name' in args: metadata['name'] = args.name # Update metadata dictionary with old values. metadata_file = os.path.join(args.output_path, 'metadata.json') if os.path.exists(metadata_file): metadata_prev = load_json_or_empty(metadata_file) metadata['result_source_files'] = \ metadata_prev['result_source_files'] analyzer.perform_analysis(args, skip_handler, context, actions, metadata) __update_skip_file(args) LOG.debug("Analysis metadata write to '%s'", metadata_file) with open(metadata_file, 'w') as metafile: json.dump(metadata, metafile) # WARN: store command will search for this file!!!! compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json') try: source = os.path.abspath(args.logfile[0]) target = os.path.abspath(compile_cmd_json) if source != target: shutil.copyfile(source, target) except shutil.Error: LOG.debug("Compilation database JSON file is the same.") except Exception: LOG.debug("Copying compilation database JSON file failed.") try: from codechecker_analyzer import analyzer_statistics analyzer_statistics.collect(metadata, "analyze") except Exception: pass