def construct_config_handler(cls, args, context): handler = config_handler_clangsa.ClangSAConfigHandler() handler.analyzer_plugins_dir = context.checker_plugin handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) handler.compiler_resource_dir = \ host_check.get_resource_dir(handler.analyzer_binary, context) handler.report_hash = args.report_hash \ if 'report_hash' in args else None check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) if 'ctu_phases' in args: handler.ctu_dir = os.path.join(args.output_path, args.ctu_dir) handler.ctu_has_analyzer_display_ctu_progress = \ host_check.has_analyzer_feature( context.analyzer_binaries.get(cls.ANALYZER_NAME), '-analyzer-display-ctu-progress', check_env) handler.log_file = args.logfile handler.path_env_extra = context.path_env_extra handler.ld_lib_path_extra = context.ld_lib_path_extra try: with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', replace_env_var(args.clangsa_args_cfg_file), sa_cfg.read().strip()) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clangsa arguments file was given in the command line. LOG.debug_analyzer(aerr) checkers = ClangSA.get_analyzer_checkers(handler, check_env) # Read clang-sa checkers from the config file. clang_sa_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_sa_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def get_ctu_dir(self): """ Returns the path of the ctu directory (containing the triple). """ config = self.config_handler env = get_check_env(config.path_env_extra, config.ld_lib_path_extra) triple_arch = ctu_triple_arch.get_triple_arch(self.buildaction, self.source_file, config, env) ctu_dir = os.path.join(config.ctu_dir, triple_arch) return ctu_dir
def pre_analyze(params): action, context, analyzer_config_map, skip_handler, \ ctu_data, statistics_data = params analyzer_environment = get_check_env(context.path_env_extra, context.ld_lib_path_extra) progress_checked_num.value += 1 if skip_handler and skip_handler.should_skip(action.source): return if action.analyzer_type != ClangSA.ANALYZER_NAME: return _, source_filename = os.path.split(action.source) LOG.info("[%d/%d] %s", progress_checked_num.value, progress_actions.value, source_filename) config = analyzer_config_map.get(ClangSA.ANALYZER_NAME) try: if ctu_data: LOG.debug("running CTU pre analysis") ctu_temp_fnmap_folder = ctu_data.get('ctu_temp_fnmap_folder') triple_arch = \ ctu_triple_arch.get_triple_arch(action, action.source, config, analyzer_environment) ctu_manager.generate_ast(triple_arch, action, action.source, config, analyzer_environment) ctu_manager.map_functions(triple_arch, action, action.source, config, analyzer_environment, context.ctu_func_map_cmd, ctu_temp_fnmap_folder) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise try: if statistics_data: LOG.debug("running statistics pre analysis") collect_statistics(action, action.source, config, analyzer_environment, statistics_data) except Exception as ex: LOG.debug_analyzer(str(ex)) traceback.print_exc(file=sys.stdout) raise
def check_product_db_status(cfg_sql_server, context): """ Check the products for database statuses. :returns: dictionary of product endpoints with database statuses """ migration_root = context.run_migration_root engine = cfg_sql_server.create_engine() config_session = sessionmaker(bind=engine) sess = config_session() try: products = sess.query(ORMProduct).all() except Exception as ex: LOG.debug(ex) LOG.error("Failed to get product configurations from the database.") LOG.error("Please check your command arguments.") sys.exit(1) package_schema = get_schema_version_from_package(migration_root) db_errors = [DBStatus.FAILED_TO_CONNECT, DBStatus.MISSING, DBStatus.SCHEMA_INIT_ERROR, DBStatus.SCHEMA_MISSING] cc_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) prod_status = {} for pd in products: db = database.SQLServer.from_connection_string(pd.connection, RUN_META, migration_root, interactive=False, env=cc_env) db_location = db.get_db_location() ret = db.connect() s_ver = db.get_schema_version() if s_ver in db_errors: s_ver = None prod_status[pd.endpoint] = (ret, s_ver, package_schema, db_location) sess.commit() sess.close() engine.dispose() return prod_status
def is_statistics_capable(context): """ Detects if the current clang is Statistics compatible. """ # Resolve potentially missing binaries. check_supported_analyzers([ClangSA.ANALYZER_NAME], context) clangsa_cfg = ClangSA.construct_config_handler([], context) check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangSA.get_analyzer_checkers(clangsa_cfg, check_env) stat_checkers_pattern = re.compile(r'.+statisticscollector.+') for checker_name, _ in checkers: if stat_checkers_pattern.match(checker_name): return True return False
def __get_analyzer_version(context, analyzer_config_map): """ Get the path and the version of the analyzer binaries. """ check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) # Get the analyzer binaries from the config_map which # contains only the checked and available analyzers. versions = {} for _, analyzer_cfg in analyzer_config_map.items(): analyzer_bin = analyzer_cfg.analyzer_binary version = [analyzer_bin, u' --version'] try: output = subprocess.check_output(shlex.split(' '.join(version)), env=check_env) versions[analyzer_bin] = output except (subprocess.CalledProcessError, OSError) as oerr: LOG.warning("Failed to get analyzer version: %s", ' '.join(version)) LOG.warning(oerr.strerror) return versions
def start_workers(actions_map, actions, context, analyzer_config_map, jobs, output_path, skip_handler, metadata, quiet_analyze, capture_analysis_output, timeout, ctu_reanalyze_on_failure, statistics_data, manager): """ Start the workers in the process pool. For every build action there is worker which makes the analysis. """ # Handle SIGINT to stop this script running. def signal_handler(*arg, **kwarg): try: pool.terminate() manager.shutdown() finally: sys.exit(1) signal.signal(signal.SIGINT, signal_handler) # Start checking parallel. checked_var = multiprocessing.Value('i', 1) actions_num = multiprocessing.Value('i', len(actions)) pool = multiprocessing.Pool(jobs, initializer=init_worker, initargs=(checked_var, actions_num)) failed_dir = os.path.join(output_path, "failed") # If the analysis has failed, we help debugging. if not os.path.exists(failed_dir): os.makedirs(failed_dir) success_dir = os.path.join(output_path, "success") # Analysis was successful processing results. if not os.path.exists(success_dir): os.makedirs(success_dir) output_dirs = {'success': success_dir, 'failed': failed_dir} # Construct analyzer env. analyzer_environment = get_check_env(context.path_env_extra, context.ld_lib_path_extra) actions, skipped_actions = skip_cpp(actions, skip_handler) analyzed_actions = [ (actions_map, build_action, context, analyzer_config_map, output_path, skip_handler, quiet_analyze, capture_analysis_output, timeout, analyzer_environment, ctu_reanalyze_on_failure, output_dirs, statistics_data) for build_action in actions ] if analyzed_actions: try: # Workaround, equivalent of map. # The main script does not get signal # while map or map_async function is running. # It is a python bug, this does not happen if a timeout is # specified, then receive the interrupt immediately. pool.map_async(check, analyzed_actions, 1, callback=lambda results: worker_result_handler( results, metadata, output_path, context. analyzer_binaries)).get(float('inf')) pool.close() except Exception: pool.terminate() raise finally: pool.join() else: LOG.info("----==== Summary ====----") for skp in skipped_actions: LOG.debug_analyzer("%s is skipped", skp.source) LOG.info("Total analyzed compilation commands: %d", len(analyzed_actions)) LOG.info("----=================----") if not os.listdir(success_dir): shutil.rmtree(success_dir) if not os.listdir(failed_dir): shutil.rmtree(failed_dir)
def construct_config_handler(cls, args, context): handler = config_handler_clang_tidy.ClangTidyConfigHandler() handler.analyzer_binary = context.analyzer_binaries.get( cls.ANALYZER_NAME) # FIXME We cannot get the resource dir from the clang-tidy binary, # therefore we get a sibling clang binary which of clang-tidy. # TODO Support "clang-tidy -print-resource-dir" . check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) # Overwrite PATH to contain only the parent of the clang binary. if os.path.isabs(handler.analyzer_binary): check_env['PATH'] = os.path.dirname(handler.analyzer_binary) clang_bin = ClangSA.resolve_missing_binary('clang', check_env) handler.compiler_resource_dir = \ host_check.get_resource_dir(clang_bin, context) try: with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg: handler.analyzer_extra_arguments = \ re.sub(r'\$\((.*?)\)', replace_env_var, tidy_cfg.read().strip()) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy arguments file was given in the command line. LOG.debug_analyzer(aerr) try: # The config file dumped by clang-tidy contains "..." at the end. # This has to be emitted, otherwise -config flag of clang-tidy # cannot consume it. with open(args.tidy_config, 'rb') as tidy_config: lines = tidy_config.readlines() lines = filter(lambda x: x != '...\n', lines) handler.checker_config = ''.join(lines) except IOError as ioerr: LOG.debug_analyzer(ioerr) except AttributeError as aerr: # No clang tidy config file was given in the command line. LOG.debug_analyzer(aerr) check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) checkers = ClangTidy.get_analyzer_checkers(handler, check_env) # Read clang-tidy checkers from the config file. clang_tidy_checkers = context.checker_config.get(cls.ANALYZER_NAME + '_checkers') try: cmdline_checkers = args.ordered_checkers except AttributeError: LOG.debug_analyzer( 'No checkers were defined in ' 'the command line for %s', cls.ANALYZER_NAME) cmdline_checkers = None handler.initialize_checkers(context.available_profiles, context.package_root, checkers, clang_tidy_checkers, cmdline_checkers, 'enable_all' in args and args.enable_all) return handler
def main(args): """ List the checkers available in the specified (or all supported) analyzers alongside with their description or enabled status in various formats. """ logger.setup_logger(args.verbose if 'verbose' in args else None) # If nothing is set, list checkers for all supported analyzers. analyzers = args.analyzers \ if 'analyzers' in args \ else analyzer_types.supported_analyzers context = analyzer_context.get_context() working, errored = analyzer_types.check_supported_analyzers( analyzers, context) analyzer_environment = get_check_env(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working) # List available checker profiles. if 'profile' in args and args.profile == 'list': if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Profile name'] else: header = ['profile_name'] else: if args.output_format not in ['csv', 'json']: header = ['Profile name', 'Description'] else: header = ['profile_name', 'description'] rows = [] for (profile, description) in context.available_profiles.items(): if 'details' not in args: rows.append([profile]) else: rows.append([profile, description]) print(output_formatters.twodim_to_str(args.output_format, header, rows)) return # Use good looking different headers based on format. if 'details' not in args: if args.output_format not in ['csv', 'json']: header = ['Name'] else: header = ['name'] else: if args.output_format not in ['csv', 'json']: header = ['', 'Name', 'Analyzer', 'Severity', 'Description'] else: header = ['enabled', 'name', 'analyzer', 'severity', 'description'] rows = [] for analyzer in working: config_handler = analyzer_config_map.get(analyzer) analyzer_class = \ analyzer_types.supported_analyzers[analyzer] checkers = analyzer_class.get_analyzer_checkers( config_handler, analyzer_environment) default_checker_cfg = context.checker_config.get(analyzer + '_checkers') profile_checkers = None if 'profile' in args: if args.profile not in context.available_profiles: LOG.error("Checker profile '%s' does not exist!", args.profile) LOG.error("To list available profiles, use '--profile list'.") return profile_checkers = [(args.profile, True)] config_handler.initialize_checkers(context.available_profiles, context.package_root, checkers, default_checker_cfg, profile_checkers) for checker_name, value in config_handler.checks().items(): enabled, description = value if not enabled and 'profile' in args: continue if enabled and 'only_disabled' in args: continue elif not enabled and 'only_enabled' in args: continue if args.output_format != 'json': enabled = '+' if enabled else '-' if 'details' not in args: rows.append([checker_name]) else: severity = context.severity_map.get(checker_name) rows.append( [enabled, checker_name, analyzer, severity, description]) if len(rows) > 0: print(output_formatters.twodim_to_str(args.output_format, header, rows)) for analyzer_binary, reason in errored: LOG.error( "Failed to get checkers for '%s'!" "The error reason was: '%s'", analyzer_binary, reason) LOG.error("Please check your installation and the " "'config/package_layout.json' file!")
def check_supported_analyzers(analyzers, context): """ Checks the given analyzers in the current context for their executability and support in CodeChecker. This method also updates the given context.analyzer_binaries if the context's configuration is bogus but had been resolved. :return: (enabled, failed) where enabled is a list of analyzer names and failed is a list of (analyzer, reason) tuple. """ check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) analyzer_binaries = context.analyzer_binaries enabled_analyzers = set() failed_analyzers = set() for analyzer_name in analyzers: if analyzer_name not in supported_analyzers: failed_analyzers.add((analyzer_name, "Analyzer unsupported by CodeChecker.")) continue # Get the compiler binary to check if it can run. available_analyzer = True analyzer_bin = analyzer_binaries.get(analyzer_name) if not analyzer_bin: failed_analyzers.add((analyzer_name, "Failed to detect analyzer binary.")) available_analyzer = False elif not os.path.isabs(analyzer_bin): # If the analyzer is not in an absolute path, try to find it... found_bin = supported_analyzers[analyzer_name].\ resolve_missing_binary(analyzer_bin, check_env) # found_bin is an absolute path, an executable in one of the # PATH folders. # If found_bin is the same as the original binary, ie., normally # calling the binary without any search would have resulted in # the same binary being called, it's NOT a "not found". if found_bin and os.path.basename(found_bin) != analyzer_bin: LOG.debug("Configured binary '%s' for analyzer '%s' was " "not found, but environment PATH contains '%s'.", analyzer_bin, analyzer_name, found_bin) context.analyzer_binaries[analyzer_name] = found_bin analyzer_bin = found_bin if not analyzer_bin or \ not host_check.check_clang(analyzer_bin, check_env): # Analyzers unavailable under absolute paths are deliberately a # configuration problem. failed_analyzers.add((analyzer_name, "Cannot execute analyzer binary.")) available_analyzer = False if available_analyzer: enabled_analyzers.add(analyzer_name) return enabled_analyzers, failed_analyzers
def server_init_start(args): """ Start or manage a CodeChecker report server. """ if 'list' in args or 'stop' in args or 'stop_all' in args: __instance_management(args) sys.exit(0) if 'reload' in args: __reload_config(args) sys.exit(0) # Actual server starting from this point. if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used # for which the is_localhost should return true. if util.is_localhost(args.dbaddress) and \ not os.path.exists(args.config_directory): os.makedirs(args.config_directory) # Make sure the SQLite file can be created if it not exists. if 'sqlite' in args and \ not os.path.isdir(os.path.dirname(args.sqlite)): os.makedirs(os.path.dirname(args.sqlite)) if 'reset_root' in args: try: os.remove(os.path.join(args.config_directory, 'root.user')) LOG.info("Master superuser (root) credentials invalidated and " "deleted. New ones will be generated...") except OSError: # File doesn't exist. pass if 'force_auth' in args: LOG.info("'--force-authentication' was passed as a command-line " "option. The server will ask for users to authenticate!") context = webserver_context.get_context() context.codechecker_workspace = args.config_directory context.db_username = args.dbusername check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) cfg_sql_server = database.SQLServer.from_cmdline_args( vars(args), CONFIG_META, context.config_migration_root, interactive=True, env=check_env) LOG.info("Checking configuration database ...") db_status = cfg_sql_server.connect() db_status_msg = database_status.db_status_msg.get(db_status) LOG.info(db_status_msg) if db_status == DBStatus.SCHEMA_MISSING: LOG.debug("Config database schema is missing, initializing new.") db_status = cfg_sql_server.connect(init=True) if db_status != DBStatus.OK: LOG.error("Config database initialization failed!") LOG.error("Please check debug logs.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_NO: LOG.debug("Configuration database schema mismatch.") LOG.debug("No schema upgrade is possible.") sys.exit(1) force_upgrade = True if 'force_upgrade' in args else False if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch.") LOG.debug("Schema upgrade is possible.") LOG.warning("Please note after migration only " "newer CodeChecker versions can be used " "to start the server") LOG.warning("It is advised to make a full backup of your " "configuration database") LOG.warning(cfg_sql_server.get_db_location()) question = 'Do you want to upgrade to the new schema?' \ ' Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): print("Upgrading schema ...") ret = cfg_sql_server.upgrade() msg = database_status.db_status_msg.get( ret, 'Unknown database status') print(msg) if ret != DBStatus.OK: LOG.error("Schema migration failed") sys.exit(ret) else: LOG.info("No schema migration was done.") sys.exit(0) if db_status == DBStatus.MISSING: LOG.error("Missing configuration database.") LOG.error("Server can not be started.") sys.exit(1) # Configuration database setup and check is needed before database # statuses can be checked. try: if args.status: ret = __db_status_check(cfg_sql_server, context, args.status) sys.exit(ret) except AttributeError: LOG.debug('Status was not in the arguments.') try: if args.product_to_upgrade: ret = __db_migration(cfg_sql_server, context, args.product_to_upgrade, force_upgrade) sys.exit(ret) except AttributeError: LOG.debug('Product upgrade was not in the arguments.') # Create the main database link from the arguments passed over the # command line. cfg_dir = os.path.abspath(args.config_directory) default_product_path = os.path.join(cfg_dir, 'Default.sqlite') create_default_product = 'sqlite' in args and \ not os.path.exists(default_product_path) if create_default_product: # Create a default product and add it to the configuration database. LOG.debug("Create default product...") LOG.debug("Configuring schema and migration...") prod_server = database.SQLiteDatabase( default_product_path, RUN_META, context.run_migration_root, check_env) LOG.debug("Checking 'Default' product database.") db_status = prod_server.connect() if db_status != DBStatus.MISSING: db_status = prod_server.connect(init=True) LOG.debug(database_status.db_status_msg.get(db_status)) if db_status != DBStatus.OK: LOG.error("Failed to configure default product") sys.exit(1) product_conn_string = prod_server.get_connection_string() server.add_initial_run_database( cfg_sql_server, product_conn_string) LOG.info("Product 'Default' at '%s' created and set up.", default_product_path) prod_statuses = check_product_db_status(cfg_sql_server, context) upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ db_status == DBStatus.SCHEMA_MISSING: upgrade_available[k] = v if upgrade_available: print_prod_status(prod_statuses) LOG.warning("Multiple products can be upgraded, make a backup!") __db_migration(cfg_sql_server, context, 'all', force_upgrade) prod_statuses = check_product_db_status(cfg_sql_server, context) print_prod_status(prod_statuses) non_ok_db = False for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status != DBStatus.OK: non_ok_db = True break if non_ok_db: msg = "There are some database issues. " \ "Do you want to start the " \ "server? Y(es)/n(o) " if not util.get_user_input(msg): sys.exit(1) # Start database viewer. checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs') checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json') checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {}) package_data = {'www_root': context.www_root, 'doc_root': context.doc_root, 'checker_md_docs': checker_md_docs, 'checker_md_docs_map': checker_md_docs_map, 'version': context.package_git_tag} suppr_handler = suppress_handler. \ GenericSuppressHandler(None, False) try: server.start_server(args.config_directory, package_data, args.view_port, cfg_sql_server, suppr_handler, args.listen_address, 'force_auth' in args, args.skip_db_cleanup, context, check_env) except socket.error as err: if err.errno == errno.EADDRINUSE: LOG.error("Server can't be started, maybe the given port number " "(%s) is already used. Check the connection " "parameters.", args.view_port) sys.exit(1) else: raise
def __db_migration(cfg_sql_server, context, product_to_upgrade='all', force_upgrade=False): """ Handle database management. Schema checking and migration. """ LOG.info("Preparing schema upgrade for %s", str(product_to_upgrade)) product_name = product_to_upgrade prod_statuses = check_product_db_status(cfg_sql_server, context) prod_to_upgrade = [] if product_name != 'all': avail = prod_statuses.get(product_name) if not avail: LOG.error("No product was found with this endpoint: %s", product_name) return 1 prod_to_upgrade.append(product_name) else: prod_to_upgrade = list(prod_statuses.keys()) migration_root = context.run_migration_root LOG.warning("Please note after migration only " "newer CodeChecker versions can be used " "to start the server") LOG.warning("It is advised to make a full backup of your " "run databases.") cc_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) for prod in prod_to_upgrade: LOG.info("========================") LOG.info("Checking: %s", prod) engine = cfg_sql_server.create_engine() config_session = sessionmaker(bind=engine) sess = config_session() product = sess.query(ORMProduct).filter( ORMProduct.endpoint == prod).first() db = database.SQLServer.from_connection_string(product.connection, RUN_META, migration_root, interactive=False, env=cc_env) db_status = db.connect() msg = database_status.db_status_msg.get(db_status, 'Unknown database status') LOG.info(msg) if db_status == DBStatus.SCHEMA_MISSING: question = 'Do you want to initialize a new schema for ' \ + product.endpoint + '? Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): ret = db.connect(init=True) msg = database_status.db_status_msg.get( ret, 'Unknown database status') LOG.info(msg) else: LOG.info("No schema initialization was done.") elif db_status == DBStatus.SCHEMA_MISMATCH_OK: question = 'Do you want to upgrade to new schema for ' \ + product.endpoint + '? Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): LOG.info("Upgrading schema ...") ret = db.upgrade() LOG.info("Done.") msg = database_status.db_status_msg.get( ret, 'Unknown database status') LOG.info(msg) else: LOG.info("No schema migration was done.") sess.commit() sess.close() engine.dispose() LOG.info("========================") return 0