def main(args): """ List the analyzers' basic information supported by CodeChecker. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = package_context.get_context() working, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*']) elif args.dump_config == 'clangsa': # TODO: Not supported by ClangSA yet! LOG.warning("'--dump-config clangsa' is not supported yet.") return if args.output_format not in ['csv', 'json']: if 'details' not in args: header = ['Name'] else: header = ['Name', 'Path', 'Version'] else: if 'details' not in args: header = ['name'] else: header = ['name', 'path', 'version_string'] rows = [] for analyzer in working: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output([binary, '--version']) except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([ analyzer, context.analyzer_binaries.get(analyzer), err_reason ]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows))
def get_argparser_ctor_args(): """ This method returns a dict containing the kwargs for constructing an argparse.ArgumentParser (either directly or as a subparser). """ return { 'prog': 'CodeChecker checkers', 'formatter_class': argparse.ArgumentDefaultsHelpFormatter, # Description is shown when the command's help is queried directly 'description': "Get the list of checkers available and their enabled " "status in the supported analyzers. Currently " "supported analyzers are: " + ', '.join(analyzer_types.supported_analyzers) + ".", # Epilogue is shown after the arguments when the help is queried # directly. 'epilog': "The list of checkers that are enabled of disabled by " "default can be edited by editing the file '" + os.path.join(package_context.get_context().package_root, 'config', 'config.json') + "'.", # Help is shown when the "parent" CodeChecker command lists the # individual subcommands. 'help': "List the checkers available for code analysis." }
def is_ctu_capable(): """ Detects if the current clang is CTU compatible. """ context = package_context.get_context() ctu_func_map_cmd = context.ctu_func_map_cmd try: version = subprocess.check_output([ctu_func_map_cmd, '-version']) except (subprocess.CalledProcessError, OSError): version = 'ERROR' return version != 'ERROR'
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ context = package_context.get_context() working, _ = analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) parser.add_argument('--all', dest="all", action='store_true', default=argparse.SUPPRESS, required=False, help="Show all supported analyzers, not just the " "available ones.") parser.add_argument('--details', dest="details", action='store_true', default=argparse.SUPPRESS, required=False, help="Show details about the analyzers, not just " "their names.") parser.add_argument('--dump-config', dest='dump_config', required=False, choices=list(working), help="Dump the available checker options for the " "given analyzer to the standard output. " "Currently only clang-tidy supports this option. " "The output can be redirected to a file named " ".clang-tidy. If this file is placed to the " "project directory then the options are applied " "to the files under that directory. This config " "file can also be provided via " "'CodeChecker analyze' and 'CodeChecker check' " "commands.") parser.add_argument('-o', '--output', dest='output_format', required=False, default='rows', choices=output_formatters.USER_FORMATS, help="Specify the format of the output list.") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ Generates a build log by running the original build command. No analysis is done. """ logger.setup_logger(args.verbose if 'verbose' in args else None) args.logfile = os.path.realpath(args.logfile) if os.path.exists(args.logfile): os.remove(args.logfile) context = package_context.get_context() build_manager.perform_build_command(args.logfile, args.command, context, 'keep_link' in args, silent='quiet' in args)
def is_statistics_capable(): """ Detects if the current clang is Statistics compatible. """ context = package_context.get_context() analyzer = "clangsa" enabled_analyzers = [analyzer] cfg_handlers = analyzer_types.build_config_handlers({}, context, enabled_analyzers) clangsa_cfg = cfg_handlers[analyzer] analyzer = analyzer_types.supported_analyzers[analyzer](clangsa_cfg, None) check_env = analyzer_env.get_check_env(context.path_env_extra, context.ld_lib_path_extra) checkers = analyzer.get_analyzer_checkers(clangsa_cfg, check_env) stat_checkers_pattern = re.compile(r'.+statisticscollector.+') for checker_name, _ in checkers: if stat_checkers_pattern.match(checker_name): return True return False
def server_init_start(args): """ Start or manage a CodeChecker report server. """ if 'list' in args or 'stop' in args or 'stop_all' in args: __instance_management(args) sys.exit(0) if 'reload' in args: __reload_config(args) sys.exit(0) # Actual server starting from this point. if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used # for which the is_localhost should return true. if util.is_localhost(args.dbaddress) and \ not os.path.exists(args.config_directory): os.makedirs(args.config_directory) # Make sure the SQLite file can be created if it not exists. if 'sqlite' in args and \ not os.path.isdir(os.path.dirname(args.sqlite)): os.makedirs(os.path.dirname(args.sqlite)) if 'reset_root' in args: try: os.remove(os.path.join(args.config_directory, 'root.user')) LOG.info("Master superuser (root) credentials invalidated and " "deleted. New ones will be generated...") except OSError: # File doesn't exist. pass if 'force_auth' in args: LOG.info("'--force-authentication' was passed as a command-line " "option. The server will ask for users to authenticate!") context = package_context.get_context() context.codechecker_workspace = args.config_directory context.db_username = args.dbusername check_env = analyzer_env.get_check_env(context.path_env_extra, context.ld_lib_path_extra) cfg_sql_server = database.SQLServer.from_cmdline_args( vars(args), CONFIG_META, context.config_migration_root, interactive=True, env=check_env) LOG.info("Checking configuration database ...") db_status = cfg_sql_server.connect() db_status_msg = database_status.db_status_msg.get(db_status) LOG.info(db_status_msg) if db_status == DBStatus.SCHEMA_MISSING: LOG.debug("Config database schema is missing, initializing new.") db_status = cfg_sql_server.connect(init=True) if db_status != DBStatus.OK: LOG.error("Config database initialization failed!") LOG.error("Please check debug logs.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_NO: LOG.debug("Configuration database schema mismatch.") LOG.debug("No schema upgrade is possible.") sys.exit(1) force_upgrade = True if 'force_upgrade' in args else False if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch.") LOG.debug("Schema upgrade is possible.") LOG.warning("Please note after migration only " "newer CodeChecker versions can be used" "to start the server") LOG.warning("It is advised to make a full backup of your " "configuration database") LOG.warning(cfg_sql_server.get_db_location()) question = 'Do you want to upgrade to the new schema?' \ ' Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): print("Upgrading schema ...") ret = cfg_sql_server.upgrade() msg = database_status.db_status_msg.get( ret, 'Unknown database status') print(msg) if ret != DBStatus.OK: LOG.error("Schema migration failed") sys.exit(ret) else: LOG.info("No schema migration was done.") sys.exit(0) if db_status == DBStatus.MISSING: LOG.error("Missing configuration database.") LOG.error("Server can not be started.") sys.exit(1) # Configuration database setup and check is needed before database # statuses can be checked. try: if args.status: ret = __db_status_check(cfg_sql_server, context, args.status) sys.exit(ret) except AttributeError: LOG.debug('Status was not in the arguments.') try: if args.product_to_upgrade: ret = __db_migration(cfg_sql_server, context, args.product_to_upgrade, force_upgrade) sys.exit(ret) except AttributeError: LOG.debug('Product upgrade was not in the arguments.') # Create the main database link from the arguments passed over the # command line. cfg_dir = os.path.abspath(args.config_directory) default_product_path = os.path.join(cfg_dir, 'Default.sqlite') create_default_product = 'sqlite' in args and \ not os.path.exists(default_product_path) if create_default_product: # Create a default product and add it to the configuration database. LOG.debug("Create default product...") LOG.debug("Configuring schema and migration...") prod_server = database.SQLiteDatabase( default_product_path, RUN_META, context.run_migration_root, check_env) LOG.debug("Checking 'Default' product database.") db_status = prod_server.connect() if db_status != DBStatus.MISSING: db_status = prod_server.connect(init=True) LOG.debug(database_status.db_status_msg.get(db_status)) if db_status != DBStatus.OK: LOG.error("Failed to configure default product") sys.exit(1) product_conn_string = prod_server.get_connection_string() server.add_initial_run_database( cfg_sql_server, product_conn_string) LOG.info("Product 'Default' at '{0}' created and set up." .format(default_product_path)) prod_statuses = check_product_db_status(cfg_sql_server, context) upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ db_status == DBStatus.SCHEMA_MISSING: upgrade_available[k] = v if upgrade_available: print_prod_status(prod_statuses) LOG.warning("Multiple products can be upgraded, make a backup!") __db_migration(cfg_sql_server, context, 'all', force_upgrade) prod_statuses = check_product_db_status(cfg_sql_server, context) print_prod_status(prod_statuses) non_ok_db = False for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status != DBStatus.OK: non_ok_db = True break if non_ok_db: msg = "There are some database issues. " \ "Do you want to start the " \ "server? Y(es)/n(o) " if not util.get_user_input(msg): sys.exit(1) # Start database viewer. checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs') checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json') checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {}) package_data = {'www_root': context.www_root, 'doc_root': context.doc_root, 'checker_md_docs': checker_md_docs, 'checker_md_docs_map': checker_md_docs_map, 'version': context.package_git_tag} suppr_handler = suppress_handler. \ GenericSuppressHandler(None, False) try: server.start_server(args.config_directory, package_data, args.view_port, cfg_sql_server, suppr_handler, args.listen_address, 'force_auth' in args, args.skip_db_cleanup, context, check_env) except socket.error as err: if err.errno == errno.EADDRINUSE: LOG.error("Server can't be started, maybe the given port number " "({}) is already used. Check the connection " "parameters.".format(args.view_port)) sys.exit(1) else: raise
def main(args): """ Entry point for parsing some analysis results and printing them to the stdout in a human-readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) context = package_context.get_context() # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] original_cwd = os.getcwd() suppr_handler = None if 'suppress' in args: __make_handler = False if not os.path.isfile(args.suppress): if 'create_suppress' in args: with open(args.suppress, 'w') as _: # Just create the file. __make_handler = True LOG.info("Will write source-code suppressions to " "suppress file.") else: LOG.warning("Suppress file '" + args.suppress + "' given, but " "it does not exist -- will not suppress anything.") else: __make_handler = True if __make_handler: suppr_handler = suppress_handler.\ GenericSuppressHandler(args.suppress, 'create_suppress' in args) elif 'create_suppress' in args: LOG.error("Can't use '--export-source-suppress' unless '--suppress " "SUPPRESS_FILE' is also given.") sys.exit(2) processed_path_hashes = set() def skip_html_report_data_handler(report_hash, source_file, report_line, checker_name, diag, files): """ Report handler which skips bugs which were suppressed by source code comments. """ report = Report(None, diag['path'], files) path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Skip report because it is a deduplication of an " "already processed report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(diag) return True skip = plist_parser.skip_report(report_hash, source_file, report_line, checker_name, suppr_handler) if not skip: processed_path_hashes.add(path_hash) return skip skip_handler = None if 'skipfile' in args: with open(args.skipfile, 'r') as skip_file: skip_handler = SkipListHandler(skip_file.read()) html_builder = None for input_path in args.input: input_path = os.path.abspath(input_path) os.chdir(original_cwd) LOG.debug("Parsing input argument: '" + input_path + "'") export = args.export if 'export' in args else None if export is not None and export == 'html': output_path = os.path.abspath(args.output_path) if not html_builder: html_builder = \ PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) LOG.info("Generating html output files:") PlistToHtml.parse(input_path, output_path, context.path_plist_to_html_dist, skip_html_report_data_handler, html_builder) continue files = [] metadata_dict = {} if os.path.isfile(input_path): files.append(input_path) elif os.path.isdir(input_path): metadata_file = os.path.join(input_path, "metadata.json") if os.path.exists(metadata_file): metadata_dict = util.load_json_or_empty(metadata_file) LOG.debug(metadata_dict) if 'working_directory' in metadata_dict: working_dir = metadata_dict['working_directory'] try: os.chdir(working_dir) except OSError as oerr: LOG.debug(oerr) LOG.error( "Working directory %s is missing.\n" "Can not parse reports safely.", working_dir) sys.exit(1) _, _, file_names = next(os.walk(input_path), ([], [], [])) files = [ os.path.join(input_path, file_name) for file_name in file_names ] file_change = set() file_report_map = defaultdict(list) rh = plist_parser.PlistToPlaintextFormatter(suppr_handler, skip_handler, context.severity_map, processed_path_hashes) rh.print_steps = 'print_steps' in args for file_path in files: f_change = parse(file_path, metadata_dict, rh, file_report_map) file_change = file_change.union(f_change) report_stats = rh.write(file_report_map) severity_stats = report_stats.get('severity') file_stats = report_stats.get('files') reports_stats = report_stats.get('reports') print("\n----==== Summary ====----") if file_stats: vals = [[os.path.basename(k), v] for k, v in dict(file_stats).items()] keys = ['Filename', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) if severity_stats: vals = [[k, v] for k, v in dict(severity_stats).items()] keys = ['Severity', 'Report count'] table = twodim_to_str('table', keys, vals, 1, True) print(table) report_count = reports_stats.get("report_count", 0) print("----=================----") print("Total number of reports: {}".format(report_count)) print("----=================----") if file_change: changed_files = '\n'.join([' - ' + f for f in file_change]) LOG.warning("The following source file contents changed since the " "latest analysis:\n{0}\nMultiple reports were not " "shown and skipped from the statistics. Please " "analyze your project again to update the " "reports!".format(changed_files)) os.chdir(original_cwd) # Create index.html for the generated html files. if html_builder: html_builder.create_index_html(args.output_path)
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ logger.setup_logger(args.verbose if 'verbose' in args else None) # If nothing is set, list checkers for all supported analyzers. analyzers = args.analyzers \ if 'analyzers' in args \ else analyzer_types.supported_analyzers context = package_context.get_context() working, errored = analyzer_types.check_supported_analyzers( analyzers, context) analyzer_environment = analyzer_env.get_check_env( context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Profile name'] else: header = ['profile_name'] else: if args.output_format not in ['csv', 'json']: header = ['Profile name', 'Description'] else: header = ['profile_name', 'description'] rows = [] for (profile, description) in context.available_profiles.items(): if 'details' not in args: rows.append([profile]) else: rows.append([profile, description]) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # Use good looking different headers based on format. if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Name'] else: header = ['name'] else: if args.output_format not in ['csv', 'json']: header = ['', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['enabled', 'name', 'analyzer', 'severity', 'description'] rows = [] for analyzer in working: config_handler = analyzer_config_map.get(analyzer) analyzer_class = \ analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get(analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '" + args.profile + "' does not exist!") LOG.error("To list available profiles, use '--profile list'.") return profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): enabled, description = value if not enabled and 'profile' in args: continue if enabled and 'only_disabled' in args: continue elif not enabled and 'only_enabled' in args: continue if args.output_format != 'json': enabled = '+' if enabled else '-' if 'details' not in args: rows.append([checker_name]) else: severity = context.severity_map.get(checker_name) rows.append( [enabled, checker_name, analyzer, severity, description]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error("Failed to get checkers for '" + analyzer_binary + "'! The error reason was: '" + reason + "'") LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def add_arguments_to_parser(parser): """ Add the subcommand's arguments to the given argparse.ArgumentParser. """ parser.add_argument('logfile', type=str, nargs='+', help="Path to the JSON compilation command database " "files which were created during the build. " "The analyzers will check only the files " "registered in these build databases.") parser.add_argument('-j', '--jobs', type=int, dest="jobs", required=False, default=1, help="Number of threads to use in analysis. More " "threads mean faster analysis at the cost of " "using more memory.") parser.add_argument('-i', '--ignore', '--skip', dest="skipfile", required=False, default=argparse.SUPPRESS, help="Path to the Skipfile dictating which project " "files should be omitted from analysis. Please " "consult the User guide on how a Skipfile " "should be laid out.") parser.add_argument('-o', '--output', dest="output_path", required=True, default=argparse.SUPPRESS, help="Store the analysis output in the given folder.") parser.add_argument('--compiler-includes-file', dest="compiler_includes_file", required=False, default=None, help="DEPRECATED. Read the compiler includes from the " "specified file rather than invoke the compiler " "executable.") parser.add_argument('--compiler-target-file', dest="compiler_target_file", required=False, default=None, help="DEPRECATED. Read the compiler target from the " "specified file rather than invoke the compiler " "executable.") parser.add_argument('--compiler-info-file', dest="compiler_info_file", required=False, default=None, help="Read the compiler includes and target from the " "specified file rather than invoke the compiler " "executable.") parser.add_argument('-t', '--type', '--output-format', dest="output_format", required=False, choices=['plist'], default='plist', help="Specify the format the analysis results should " "use.") parser.add_argument('-q', '--quiet', dest="quiet", action='store_true', default=argparse.SUPPRESS, required=False, help="Do not print the output or error of the " "analyzers to the standard output of " "CodeChecker.") parser.add_argument('-c', '--clean', dest="clean", required=False, action='store_true', default=argparse.SUPPRESS, help="Delete analysis reports stored in the output " "directory. (By default, CodeChecker would keep " "reports and overwrites only those files that " "were update by the current build command).") parser.add_argument('--report-hash', dest="report_hash", default=argparse.SUPPRESS, required=False, choices=['context-free'], help="EXPERIMENTAL feature. " "Specify the hash calculation method for " "reports. If this option is not set, the default " "calculation method for Clang Static Analyzer " "will be context sensitive and for Clang Tidy it " "will be context insensitive. If this option is " "set to 'context-free' bugs will be identified " "with the CodeChecker generated context free " "hash for every analyzers. USE WISELY AND AT " "YOUR OWN RISK!") parser.add_argument('-n', '--name', dest="name", required=False, default=argparse.SUPPRESS, help="Annotate the run analysis with a custom name in " "the created metadata file.") analyzer_opts = parser.add_argument_group("analyzer arguments") analyzer_opts.add_argument( '--analyzers', nargs='+', dest='analyzers', metavar='ANALYZER', required=False, choices=analyzer_types.supported_analyzers, default=argparse.SUPPRESS, help="Run analysis only with the analyzers " "specified. Currently supported analyzers " "are: " + ', '.join(analyzer_types.supported_analyzers) + ".") analyzer_opts.add_argument('--add-compiler-defaults', action='store_true', required=False, default=argparse.SUPPRESS, help="DEPRECATED. Always True. Retrieve " "compiler-specific configuration " "from the compilers themselves, and use " "them with Clang. This is used when the " "compiler on the system is special, e.g. " "when doing cross-compilation.") analyzer_opts.add_argument('--capture-analysis-output', dest='capture_analysis_output', action='store_true', default=argparse.SUPPRESS, required=False, help="Store standard output and standard error " "of successful analyzer invocations " "into the '<OUTPUT_DIR>/success' " "directory.") analyzer_opts.add_argument('--saargs', dest="clangsa_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for the Clang Static " "Analyzer.") analyzer_opts.add_argument('--tidyargs', dest="tidy_args_cfg_file", required=False, default=argparse.SUPPRESS, help="File containing argument which will be " "forwarded verbatim for Clang-Tidy.") analyzer_opts.add_argument('--tidy-config', dest='tidy_config', required=False, default=argparse.SUPPRESS, help="A file in YAML format containing the " "configuration of clang-tidy checkers. " "The file can be dumped by " "'CodeChecker analyzers --dump-config " "clang-tidy' command.") analyzer_opts.add_argument('--timeout', type=int, dest='timeout', required=False, default=argparse.SUPPRESS, help="The amount of time (in seconds) that " "each analyzer can spend, individually, " "to analyze the project. If the analysis " "of a particular file takes longer than " "this time, the analyzer is killed and " "the analysis is considered as a failed " "one.") context = package_context.get_context() if host_check.is_ctu_capable(context): ctu_opts = parser.add_argument_group( "cross translation unit analysis arguments", """ These arguments are only available if the Clang Static Analyzer supports Cross-TU analysis. By default, no CTU analysis is run when 'CodeChecker analyze' is called.""") ctu_modes = ctu_opts.add_mutually_exclusive_group() ctu_modes.add_argument('--ctu', '--ctu-all', action='store_const', const=[True, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform Cross Translation Unit (CTU) " "analysis, both 'collect' and 'analyze' " "phases. In this mode, the extra files " "created by 'collect' are cleaned up " "after the analysis.") ctu_modes.add_argument('--ctu-collect', action='store_const', const=[True, False], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the first, 'collect' phase of " "Cross-TU analysis. This phase generates " "extra files needed by CTU analysis, and " "puts them into '<OUTPUT_DIR>/ctu-dir'. " "NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") ctu_modes.add_argument('--ctu-analyze', action='store_const', const=[False, True], dest='ctu_phases', default=argparse.SUPPRESS, help="Perform the second, 'analyze' phase of " "Cross-TU analysis, using already " "available extra files in " "'<OUTPUT_DIR>/ctu-dir'. (These files " "will not be cleaned up in this mode.)") ctu_opts.add_argument('--ctu-reanalyze-on-failure', action='store_true', dest='ctu_reanalyze_on_failure', default=argparse.SUPPRESS, help="If Cross-TU analysis is enabled and fails " "for some reason, try to re analyze the " "same translation unit without " "Cross-TU enabled.") if host_check.is_statistics_capable(context): stat_opts = parser.add_argument_group( "EXPERIMENTAL statistics analysis feature arguments", """ These arguments are only available if the Clang Static Analyzer supports Statistics-based analysis (e.g. statisticsCollector.ReturnValueCheck, statisticsCollector.SpecialReturnValue checkers are available).""") stat_opts.add_argument('--stats-collect', '--stats-collect', action='store', default=argparse.SUPPRESS, dest='stats_output', help="EXPERIMENTAL feature. " "Perform the first, 'collect' phase of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis, and " "puts them into " "'<STATS_OUTPUT>'." " NOTE: If this argument is present, " "CodeChecker will NOT execute the " "analyzers!") stat_opts.add_argument('--stats-use', '--stats-use', action='store', default=argparse.SUPPRESS, dest='stats_dir', help="EXPERIMENTAL feature. " "Use the previously generated statistics " "results for the analysis from the given " "'<STATS_DIR>'.") stat_opts.add_argument('--stats', action='store_true', default=argparse.SUPPRESS, dest='stats_enabled', help="EXPERIMENTAL feature. " "Perform both phases of " "Statistical analysis. This phase " "generates extra files needed by " "statistics analysis and enables " "the statistical checkers. " "No need to enable them explicitly.") stat_opts.add_argument('--stats-min-sample-count', action='store', default="10", type=int, dest='stats_min_sample_count', help="EXPERIMENTAL feature. " "Minimum number of samples (function call" " occurrences) to be collected" " for a statistics to be relevant " "'<MIN-SAMPLE-COUNT>'.") stat_opts.add_argument('--stats-relevance-threshold', action='store', default="0.85", type=float, dest='stats_relevance_threshold', help="EXPERIMENTAL feature. " "The minimum ratio of calls of function " "f that must have a certain property " "property to consider it true for that " "function (calculated as calls " "with a property/all calls)." " CodeChecker will warn for" " calls of f do not have that property." "'<RELEVANCE_THRESHOLD>'.") checkers_opts = parser.add_argument_group( "checker configuration", """ Checkers ------------------------------------------------ The analyzer performs checks that are categorized into families or "checkers". See 'CodeChecker checkers' for the list of available checkers. You can fine-tune which checkers to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e core -d core.uninitialized -e core.uninitialized.Assign' will enable every 'core' checker, but only 'core.uninitialized.Assign' from the 'core.uninitialized' group. Please consult the manual for details. Disabling certain checkers - such as the 'core' group - is unsupported by the LLVM/Clang community, and thus discouraged. Compiler warnings ------------------------------------------------ Compiler warnings are diagnostic messages that report constructions that are not inherently erroneous but that are risky or suggest there may have been an error. Compiler warnings are named 'clang-diagnostic-<warning-option>', e.g. Clang warning controlled by '-Wliteral-conversion' will be reported with check name 'clang-diagnostic-literal-conversion'. You can fine-tune which warnings to use in the analysis by setting the enabled and disabled flags starting from the bigger groups and going inwards, e.g. '-e Wunused -d Wno-unused-parameter' will enable every 'unused' warnings except 'unused-parameter'. These flags should start with a capital 'W' or 'Wno-' prefix followed by the waning name (E.g.: '-e Wliteral-conversion', '-d Wno-literal-conversion'). By default '-Wall' and '-Wextra' warnings are enabled. For more information see: https://clang.llvm.org/docs/DiagnosticsReference.html.""") checkers_opts.add_argument('-e', '--enable', dest="enable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE USED in the analysis.") checkers_opts.add_argument('-d', '--disable', dest="disable", metavar='checker/group/profile', default=argparse.SUPPRESS, action=OrderedCheckersAction, help="Set a checker (or checker group) " "to BE PROHIBITED from use in the " "analysis.") checkers_opts.add_argument('--enable-all', dest="enable_all", action='store_true', required=False, default=argparse.SUPPRESS, help="Force the running analyzers to use " "almost every checker available. The " "checker groups 'alpha.', 'debug.' and " "'osx.' (on Linux) are NOT enabled " "automatically and must be EXPLICITLY " "specified. WARNING! Enabling all " "checkers might result in the analysis " "losing precision and stability, and " "could even result in a total failure of " "the analysis. USE WISELY AND AT YOUR " "OWN RISK!") logger.add_verbose_arguments(parser) parser.set_defaults(func=main)
def main(args): """ Perform analysis on the given logfiles and store the results in a machine- readable format. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if len(args.logfile) != 1: LOG.warning("Only one log file can be processed right now!") sys.exit(1) args.output_path = os.path.abspath(args.output_path) if os.path.exists(args.output_path) and \ not os.path.isdir(args.output_path): LOG.error("The given output path is not a directory: " + args.output_path) sys.exit(1) if 'enable_all' in args: LOG.info("'--enable-all' was supplied for this analysis.") # We clear the output directory in the following cases. ctu_dir = os.path.join(args.output_path, 'ctu-dir') if 'ctu_phases' in args and args.ctu_phases[0] and \ os.path.isdir(ctu_dir): # Clear the CTU-dir if the user turned on the collection phase. LOG.debug("Previous CTU contents have been deleted.") shutil.rmtree(ctu_dir) if 'clean' in args and os.path.isdir(args.output_path): LOG.info("Previous analysis results in '{0}' have been removed, " "overwriting with current result".format(args.output_path)) shutil.rmtree(args.output_path) if not os.path.exists(args.output_path): os.makedirs(args.output_path) LOG.debug("args: " + str(args)) LOG.debug("Output will be stored to: '" + args.output_path + "'") # Process the skip list if present. skip_handler = __get_skip_handler(args) # Parse the JSON CCDBs and retrieve the compile commands. actions = [] for log_file in args.logfile: if not os.path.exists(log_file): LOG.error("The specified logfile '" + log_file + "' does not " "exist!") continue parseLogOptions = ParseLogOptions(args) actions += log_parser.parse_log(log_file, parseLogOptions, skip_handler) if len(actions) == 0: LOG.info("None of the specified build log files contained " "valid compilation commands. No analysis needed...") sys.exit(1) context = package_context.get_context() metadata = { 'action_num': len(actions), 'command': sys.argv, 'versions': { 'codechecker': "{0} ({1})".format(context.package_git_tag, context.package_git_hash) }, 'working_directory': os.getcwd(), 'output_path': args.output_path, 'result_source_files': {} } if 'name' in args: metadata['name'] = args.name # Update metadata dictionary with old values. metadata_file = os.path.join(args.output_path, 'metadata.json') if os.path.exists(metadata_file): metadata_prev = load_json_or_empty(metadata_file) metadata['result_source_files'] = \ metadata_prev['result_source_files'] analyzer.perform_analysis(args, skip_handler, context, actions, metadata) __update_skip_file(args) LOG.debug("Analysis metadata write to '" + metadata_file + "'") with open(metadata_file, 'w') as metafile: json.dump(metadata, metafile) # WARN: store command will search for this file!!!! compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json') try: source = os.path.abspath(args.logfile[0]) target = os.path.abspath(compile_cmd_json) if source != target: shutil.copyfile(source, target) except shutil.Error: LOG.debug("Compilation database JSON file is the same.") except Exception: LOG.debug("Copying compilation database JSON file failed.")
def main(args): """ Store the defect results in the specified input list as bug reports in the database. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] if 'name' not in args: LOG.debug("Generating name for analysis...") generated = __get_run_name(args.input) if generated: setattr(args, 'name', generated) else: LOG.error("No suitable name was found in the inputs for the " "analysis run. Please specify one by passing argument " "--name run_name in the invocation.") sys.exit(2) # argparse returns error code 2 for bad invocations. LOG.info("Storing analysis results for run '" + args.name + "'") if 'force' in args: LOG.info("argument --force was specified: the run with name '" + args.name + "' will be deleted.") protocol, host, port, product_name = split_product_url(args.product_url) # Before any transmission happens, check if we have the PRODUCT_STORE # permission to prevent a possibly long ZIP operation only to get an # error later on. product_client = libclient.setup_product_client(protocol, host, port, product_name) product_id = product_client.getCurrentProduct().id auth_client, _ = libclient.setup_auth_client(protocol, host, port) has_perm = libclient.check_permission(auth_client, Permission.PRODUCT_STORE, {'productID': product_id}) if not has_perm: LOG.error("You are not authorised to store analysis results in " "product '{0}'".format(product_name)) sys.exit(1) # Setup connection to the remote server. client = libclient.setup_client(args.product_url, product_client=False) LOG.debug("Initializing client connecting to {0}:{1}/{2} done.".format( host, port, product_name)) _, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write mass store ZIP to '{0}'...".format(zip_file)) try: assemble_zip(args.input, zip_file, client) if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE: LOG.error("The result list to upload is too big (max: {}).".format( sizeof_fmt(MAX_UPLOAD_SIZE))) sys.exit(1) with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()) context = package_context.get_context() trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None client.massStoreRun(args.name, args.tag if 'tag' in args else None, str(context.version), b64zip, 'force' in args, trim_path_prefixes) # Storing analysis statistics if the server allows them. if client.allowsStoringAnalysisStatistics(): storing_analysis_statistics(client, args.input, args.name) LOG.info("Storage finished successfully.") except RequestFailed as reqfail: if reqfail.errorCode == ErrorCode.SOURCE_FILE: header = ['File', 'Line', 'Checker name'] table = twodim_to_str('table', header, [c.split('|') for c in reqfail.extraInfo]) LOG.warning("Setting the review statuses for some reports failed " "because of non valid source code comments: " "{0}\n {1}".format(reqfail.message, table)) sys.exit(1) except Exception as ex: LOG.info("Storage failed: " + str(ex)) sys.exit(1) finally: os.remove(zip_file)
def handle_diff_results(args): init_logger(args.verbose if 'verbose' in args else None) check_deprecated_arg_usage(args) f_severities, f_checkers, f_file_path, _, _ = check_filter_values(args) context = package_context.get_context() def skip_report_dir_result(report): """ Returns True if the report should be skipped from the results based on the given filter set. """ if f_severities: severity_name = context.severity_map.get(report.main['check_name']) if severity_name.lower() not in map(str.lower, f_severities): return True if f_checkers: checker_name = report.main['check_name'] if not any([ re.match(r'^' + c.replace("*", ".*") + '$', checker_name, re.IGNORECASE) for c in f_checkers ]): return True if f_file_path: file_path = report.files[int(report.main['location']['file'])] if not any([ re.match(r'^' + f.replace("*", ".*") + '$', file_path, re.IGNORECASE) for f in f_file_path ]): return True if 'checker_msg' in args: checker_msg = report.main['description'] if not any([ re.match(r'^' + c.replace("*", ".*") + '$', checker_msg, re.IGNORECASE) for c in args.checker_msg ]): return True return False def get_report_dir_results(reportdir): all_reports = [] processed_path_hashes = set() for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing:" + file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Not showing report because it is a " "deduplication of an already processed " "report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(report) continue if skip_report_dir_result(report): continue processed_path_hashes.add(path_hash) report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.append(report) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def get_line_from_file(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes): base_results = [] report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 report_filter.reportHash = base_hashes + suppressed_hashes results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) return base_results def get_suppressed_reports(reports): """ Returns suppressed reports. """ suppressed_in_code = [] for rep in reports: bughash = rep.report_hash source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] checker_name = rep.main['check_name'] sc_handler = SourceCodeCommentHandler(source_file) src_comment_data = sc_handler.filter_source_line_comments( bug_line, checker_name) if len(src_comment_data) == 1: suppressed_in_code.append(bughash) LOG.debug("Bug " + bughash + "is suppressed in code. file:" + source_file + "Line " + str(bug_line)) elif len(src_comment_data) > 1: LOG.warning("Multiple source code comment can be found " "for '{0}' checker in '{1}' at line {2}. " "This bug will not be suppressed!".format( checker_name, source_file, bug_line)) return suppressed_in_code def get_diff_type(): """ Returns Thrift DiffType value by processing the arguments. """ if 'new' in args: return ttypes.DiffType.NEW if 'unresolved' in args: return ttypes.DiffType.UNRESOLVED if 'resolved' in args: return ttypes.DiffType.RESOLVED return None def get_diff_local_dir_remote_run(client, report_dir, run_name): """ Compares a local report directory with a remote run. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) if diff_type == ttypes.DiffType.NEW: # Get report hashes which can be found only in the remote runs. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.RESOLVED) results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) elif diff_type == ttypes.DiffType.UNRESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: if result.report_hash not in remote_hashes: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_run_local_dir(client, run_name, report_dir): """ Compares a remote run with a local report directory. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) remote_hashes = client.getDiffResultsHash(run_ids, local_report_hashes, diff_type) if diff_type in [ttypes.DiffType.NEW, ttypes.DiffType.UNRESOLVED]: # Shows reports from the report dir which are not present in # the baseline (NEW reports) or appear in both side (UNRESOLVED # reports) and not suppressed in the code. for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Show bugs in the baseline (server) which are not present in # the report dir or suppressed. results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_runs(client, basename, newname): """ Compares two remote runs and returns the filtered results. """ report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) base_ids, base_run_names, base_run_tags = process_run_arg(basename) report_filter.runTag = base_run_tags cmp_data = ttypes.CompareData() cmp_data.diffType = get_diff_type() new_ids, new_run_names, new_run_tags = process_run_arg(newname) cmp_data.runIds = new_ids cmp_data.runTag = new_run_tags # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED ] sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) return all_results, base_run_names, new_run_names def get_diff_local_dirs(basename, newname): """ Compares two report directories and returns the filtered results. """ filtered_reports = [] base_results = get_report_dir_results(os.path.abspath(basename)) new_results = get_report_dir_results(os.path.abspath(newname)) base_hashes = set([res.report_hash for res in base_results]) new_hashes = set([res.report_hash for res in new_results]) diff_type = get_diff_type() if diff_type == ttypes.DiffType.NEW: for res in new_results: if res.report_hash not in base_hashes: filtered_reports.append(res) if diff_type == ttypes.DiffType.UNRESOLVED: for res in new_results: if res.report_hash in base_hashes: filtered_reports.append(res) elif diff_type == ttypes.DiffType.RESOLVED: for res in base_results: if res.report_hash not in new_hashes: filtered_reports.append(res) return filtered_reports def cached_report_file_lookup(file_cache, file_id): """ Get source file data for the given file and caches it in a file cache if file data is not found in the cache. Finally, it returns the source file data from the cache. """ if file_id not in file_cache: source = client.getSourceFileData(file_id, True, ttypes.Encoding.BASE64) file_content = base64.b64decode(source.fileContent) file_cache[file_id] = { 'id': file_id, 'path': source.filePath, 'content': file_content } return file_cache[file_id] def get_report_data(client, reports, file_cache): """ Returns necessary report files and report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: file_sources[report.fileId] = cached_report_file_lookup( file_cache, report.fileId) details = client.getReportDetails(report.reportId) events = [] for index, event in enumerate(details.pathEvents): file_sources[event.fileId] = cached_report_file_lookup( file_cache, event.fileId) events.append({ 'line': event.startLine, 'col': event.startCol, 'file': event.fileId, 'msg': event.msg, 'step': index + 1 }) report_data.append({ 'events': events, 'path': report.checkedFile, 'reportHash': report.bugHash, 'checkerName': report.checkerId }) return {'files': file_sources, 'reports': report_data} def reports_to_report_data(reports): """ Converts reports from Report class from one plist file to report data events for the HTML plist parser. """ file_sources = {} fname_to_fid = {} report_data = [] findex = 0 for report in reports: # Not all report in this list may refer to the same files # thus we need to create a single file list with # all files from all reports. for f in report.files: if f not in fname_to_fid: try: with codecs.open(f, 'r', 'UTF-8', errors='replace') as source_data: content = source_data.read() except (OSError, IOError): content = f + " NOT FOUND." file_sources[findex] = { 'id': findex, 'path': f, 'content': content } fname_to_fid[f] = findex findex += 1 events = [] pathElements = report.bug_path index = 1 for element in pathElements: if element['kind'] == 'event': fname = report.files[element['location']['file']] new_fid = fname_to_fid[fname] events.append({ 'line': element['location']['line'], 'col': element['location']['col'], 'file': new_fid, 'msg': element['message'], 'step': index }) index += 1 report_hash = report.main['issue_hash_content_of_line_in_context'] report_data.append({ 'events': events, 'path': report.main['location']['file_name'], 'reportHash': report_hash, 'checkerName': report.main['check_name'] }) return {'files': file_sources, 'reports': report_data} def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) file_report_map = defaultdict(list) for report in reports: if isinstance(report, Report): file_path = report.main['location']['file_name'] else: file_path = report.checkedFile file_report_map[file_path].append(report) file_cache = {} for file_path, file_reports in file_report_map.items(): checked_file = file_path filename = os.path.basename(checked_file) h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8) if isinstance(file_reports[0], Report): report_data = reports_to_report_data(file_reports) else: report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(h) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) html_builder.create_index_html(output_dir) def print_reports(client, reports, output_format): output_dir = args.export_dir if 'export_dir' in args else None if 'clean' in args and os.path.isdir(output_dir): print("Previous analysis results in '{0}' have been removed, " "overwriting with current results.".format(output_dir)) shutil.rmtree(output_dir) if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return if output_format == 'html': output_dir = args.export_dir if not os.path.exists(output_dir): os.makedirs(output_dir) print("Generating HTML output files to file://{0} directory:\n". format(output_dir)) report_to_html(client, reports, output_dir) print('\nTo view the results in a browser run:\n' ' $ firefox {0}'.format( os.path.join(args.export_dir, 'index.html'))) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] source_lines = defaultdict(set) for report in reports: if not isinstance(report, Report): source_lines[report.fileId].add(report.line) lines_in_files_requested = [] for key in source_lines: lines_in_files_requested.append( ttypes.LinesInFilesRequested(fileId=key, lines=source_lines[key])) for report in reports: if isinstance(report, Report): # report is coming from a plist file. bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] sev = context.severity_map.get(check_name) check_msg = report.main['description'] source_line =\ get_line_from_file(report.main['location']['file_name'], bug_line) else: source_line_contents = client.getLinesInSourceFileContents( lines_in_files_requested, ttypes.Encoding.BASE64) # report is of ReportData type coming from CodeChecker server. bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile + ':' + str(bug_line) +\ ":" + str(bug_col) source_line = base64.b64decode( source_line_contents[report.fileId][bug_line]) check_name = report.checkerId check_msg = report.checkerMsg rows.append( (sev, checked_file, check_msg, check_name, source_line)) if output_format == 'plaintext': for row in rows: print("[{0}] {1}: {2} [{3}]\n{4}\n".format( row[0], row[1], row[2], row[3], row[4])) else: print(twodim_to_str(output_format, header, rows)) def get_run_tag(client, run_ids, tag_name): """ Returns run tag information for the given tag name in the given runs. """ run_history_filter = ttypes.RunHistoryFilter() run_history_filter.tagNames = [tag_name] run_histories = client.getRunHistory(run_ids, None, None, run_history_filter) return run_histories[0] if len(run_histories) else None def process_run_arg(run_arg_with_tag): """ Process the argument and returns run ids a run tag ids. The argument has the following format: <run_name>:<run_tag> """ run_with_tag = run_arg_with_tag.split(':') run_name = run_with_tag[0] runs = get_runs(client, [run_name]) run_ids = map(lambda run: run.runId, runs) run_names = map(lambda run: run.name, runs) # Set base run tag if it is available. run_tag_name = run_with_tag[1] if len(run_with_tag) > 1 else None run_tags = None if run_tag_name: tag = get_run_tag(client, run_ids, run_tag_name) run_tags = [tag.id] if tag else None if not run_ids: LOG.warning("No run names match the given pattern: " + run_arg_with_tag) sys.exit(1) LOG.info("Matching runs: %s", ', '.join(map(lambda run: run.name, runs))) return run_ids, run_names, run_tags def print_diff_results(reports): """ Print the results. """ if reports: print_reports(client, reports, args.output_format) else: LOG.info("No results.") client = None # We set up the client if we are not comparing two local report directory. if not os.path.isdir(args.basename) or not os.path.isdir(args.newname): client = setup_client(args.product_url) if os.path.isdir(args.basename) and os.path.isdir(args.newname): reports = get_diff_local_dirs(args.basename, args.newname) print_diff_results(reports) LOG.info("Compared two local report directories %s and %s", os.path.abspath(args.basename), os.path.abspath(args.newname)) elif os.path.isdir(args.newname): reports, base_run_names = \ get_diff_remote_run_local_dir(client, args.basename, os.path.abspath(args.newname)) print_diff_results(reports) LOG.info( "Compared remote run(s) %s (matching: %s) and local report " "directory %s", args.basename, ', '.join(base_run_names), os.path.abspath(args.newname)) elif os.path.isdir(args.basename): reports, new_run_names = \ get_diff_local_dir_remote_run(client, os.path.abspath(args.basename), args.newname) print_diff_results(reports) LOG.info( "Compared local report directory %s and remote run(s) %s " "(matching: %s).", os.path.abspath(args.basename), args.newname, ', '.join(new_run_names)) else: reports, base_run_names, new_run_names = \ get_diff_remote_runs(client, args.basename, args.newname) print_diff_results(reports) LOG.info( "Compared multiple remote runs %s (matching: %s) and %s " "(matching: %s)", args.basename, ', '.join(base_run_names), args.newname, ', '.join(new_run_names))