def print_version(output_format=None): """ Print web server version information in the given format. """ context = webserver_context.get_context() server_versions = ['{0}.{1}'.format(major, minor) for major, minor in version.SUPPORTED_VERSIONS.items()] if output_format != 'json': server_versions = ', '.join(server_versions) rows = [ ("Base package version", context.version), ("Package build date", context.package_build_date), ("Git commit ID (hash)", context.package_git_hash), ("Git tag information", context.package_git_tag), ("Configuration schema", str(context.product_db_version_info)), ("Database schema", str(context.run_db_version_info)), ("Server supported API (Thrift)", server_versions), ("Client API (Thrift)", version.CLIENT_API) ] if output_format != "json": print(output_formatters.twodim_to_str(output_format, ["Kind", "Version"], rows)) elif output_format == "json": # Use a special JSON format here, instead of # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ] # do # { "something": "0.0.0", "foo": ... } print(json.dumps(dict(rows)))
def print_version(output_format=None): """ Print web server version information in the given format. """ context = webserver_context.get_context() server_versions = [ '{0}.{1}'.format(major, minor) for major, minor in version.SUPPORTED_VERSIONS.items() ] if output_format != 'json': server_versions = ', '.join(server_versions) rows = [("Base package version", context.version), ("Package build date", context.package_build_date), ("Git commit ID (hash)", context.package_git_hash), ("Git tag information", context.package_git_tag), ("Server supported API (Thrift)", server_versions), ("Client API (Thrift)", version.CLIENT_API)] if output_format != "json": print( output_formatters.twodim_to_str(output_format, ["Kind", "Version"], rows)) elif output_format == "json": # Use a special JSON format here, instead of # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ] # do # { "something": "0.0.0", "foo": ... } print(json.dumps(dict(rows)))
def __init__(self): context = webserver_context.get_context() self.server_versions = [ f'{major}.{minor}' for major, minor in version.SUPPORTED_VERSIONS.items() ] self.version = context.version self.build_date = context.package_build_date self.git_hash = context.package_git_hash self.git_tag = context.package_git_tag self.client_api = version.CLIENT_API
def upgrade(): op.add_column( 'products', sa.Column('latest_storage_date', sa.DateTime(), nullable=True)) op.add_column( 'products', sa.Column('num_of_runs', sa.Integer(), server_default="0", nullable=True)) try: product_con = op.get_bind() products = product_con.execute( "SELECT id, connection from products").fetchall() context = webserver_context.get_context() for id, connection in products: sql_server = database.SQLServer.from_connection_string( connection, RUN_META, context.run_migration_root) engine = sa.create_engine(sql_server.get_connection_string()) conn = engine.connect() run_info = \ conn.execute("SELECT count(*), max(date) from runs").fetchone() values = [f"num_of_runs={run_info[0]}"] if run_info[1]: values.append(f"latest_storage_date='{run_info[1]}'") product_con.execute(f""" UPDATE products SET {', '.join(values)} WHERE id={id} """) except Exception as ex: LOG.error( "Failed to fill product detail columns (num_of_runs, " "latest_storage_date): %s", ex) pass
def server_init_start(args): """ Start or manage a CodeChecker report server. """ if 'list' in args or 'stop' in args or 'stop_all' in args: __instance_management(args) sys.exit(0) if 'reload' in args: __reload_config(args) sys.exit(0) # Actual server starting from this point. if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used # for which the is_localhost should return true. if util.is_localhost(args.dbaddress) and \ not os.path.exists(args.config_directory): os.makedirs(args.config_directory) # Make sure the SQLite file can be created if it not exists. if 'sqlite' in args and \ not os.path.isdir(os.path.dirname(args.sqlite)): os.makedirs(os.path.dirname(args.sqlite)) if 'reset_root' in args: try: os.remove(os.path.join(args.config_directory, 'root.user')) LOG.info("Master superuser (root) credentials invalidated and " "deleted. New ones will be generated...") except OSError: # File doesn't exist. pass if 'force_auth' in args: LOG.info("'--force-authentication' was passed as a command-line " "option. The server will ask for users to authenticate!") context = webserver_context.get_context() context.codechecker_workspace = args.config_directory context.db_username = args.dbusername check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) cfg_sql_server = database.SQLServer.from_cmdline_args( vars(args), CONFIG_META, context.config_migration_root, interactive=True, env=check_env) LOG.info("Checking configuration database ...") db_status = cfg_sql_server.connect() db_status_msg = database_status.db_status_msg.get(db_status) LOG.info(db_status_msg) if db_status == DBStatus.SCHEMA_MISSING: LOG.debug("Config database schema is missing, initializing new.") db_status = cfg_sql_server.connect(init=True) if db_status != DBStatus.OK: LOG.error("Config database initialization failed!") LOG.error("Please check debug logs.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_NO: LOG.debug("Configuration database schema mismatch.") LOG.debug("No schema upgrade is possible.") sys.exit(1) force_upgrade = True if 'force_upgrade' in args else False if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch.") LOG.debug("Schema upgrade is possible.") LOG.warning("Please note after migration only " "newer CodeChecker versions can be used " "to start the server") LOG.warning("It is advised to make a full backup of your " "configuration database") LOG.warning(cfg_sql_server.get_db_location()) question = 'Do you want to upgrade to the new schema?' \ ' Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): print("Upgrading schema ...") ret = cfg_sql_server.upgrade() msg = database_status.db_status_msg.get(ret, 'Unknown database status') print(msg) if ret != DBStatus.OK: LOG.error("Schema migration failed") sys.exit(ret) else: LOG.info("No schema migration was done.") sys.exit(0) if db_status == DBStatus.MISSING: LOG.error("Missing configuration database.") LOG.error("Server can not be started.") sys.exit(1) # Configuration database setup and check is needed before database # statuses can be checked. try: if args.status: ret = __db_status_check(cfg_sql_server, context, args.status) sys.exit(ret) except AttributeError: LOG.debug('Status was not in the arguments.') try: if args.product_to_upgrade: ret = __db_migration(cfg_sql_server, context, args.product_to_upgrade, force_upgrade) sys.exit(ret) except AttributeError: LOG.debug('Product upgrade was not in the arguments.') # Create the main database link from the arguments passed over the # command line. cfg_dir = os.path.abspath(args.config_directory) default_product_path = os.path.join(cfg_dir, 'Default.sqlite') create_default_product = 'sqlite' in args and \ not os.path.exists(default_product_path) if create_default_product: # Create a default product and add it to the configuration database. LOG.debug("Create default product...") LOG.debug("Configuring schema and migration...") prod_server = database.SQLiteDatabase(default_product_path, RUN_META, context.run_migration_root, check_env) LOG.debug("Checking 'Default' product database.") db_status = prod_server.connect() if db_status != DBStatus.MISSING: db_status = prod_server.connect(init=True) LOG.debug(database_status.db_status_msg.get(db_status)) if db_status != DBStatus.OK: LOG.error("Failed to configure default product") sys.exit(1) product_conn_string = prod_server.get_connection_string() server.add_initial_run_database(cfg_sql_server, product_conn_string) LOG.info("Product 'Default' at '%s' created and set up.", default_product_path) prod_statuses = check_product_db_status(cfg_sql_server, context) upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ db_status == DBStatus.SCHEMA_MISSING: upgrade_available[k] = v if upgrade_available: print_prod_status(prod_statuses) LOG.warning("Multiple products can be upgraded, make a backup!") __db_migration(cfg_sql_server, context, 'all', force_upgrade) prod_statuses = check_product_db_status(cfg_sql_server, context) print_prod_status(prod_statuses) non_ok_db = False for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status != DBStatus.OK: non_ok_db = True break if non_ok_db: msg = "There are some database issues. " \ "Do you want to start the " \ "server? Y(es)/n(o) " if not util.get_user_input(msg): sys.exit(1) # Start database viewer. checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs') checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json') checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {}) package_data = { 'www_root': context.www_root, 'doc_root': context.doc_root, 'checker_md_docs': checker_md_docs, 'checker_md_docs_map': checker_md_docs_map, 'version': context.package_git_tag } suppr_handler = suppress_handler. \ GenericSuppressHandler(None, False) try: server.start_server(args.config_directory, package_data, args.view_port, cfg_sql_server, suppr_handler, args.listen_address, 'force_auth' in args, args.skip_db_cleanup, context, check_env) except socket.error as err: if err.errno == errno.EADDRINUSE: LOG.error( "Server can't be started, maybe the given port number " "(%s) is already used. Check the connection " "parameters.", args.view_port) sys.exit(1) else: raise
def main(args): """ Store the defect results in the specified input list as bug reports in the database. """ logger.setup_logger(args.verbose if 'verbose' in args else None) try: cmd_config.check_config_file(args) except FileNotFoundError as fnerr: LOG.error(fnerr) sys.exit(1) if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] if 'name' not in args: LOG.debug("Generating name for analysis...") generated = __get_run_name(args.input) if generated: setattr(args, 'name', generated) else: LOG.error("No suitable name was found in the inputs for the " "analysis run. Please specify one by passing argument " "--name run_name in the invocation.") sys.exit(2) # argparse returns error code 2 for bad invocations. LOG.info("Storing analysis results for run '" + args.name + "'") if 'force' in args: LOG.info("argument --force was specified: the run with name '" + args.name + "' will be deleted.") # Setup connection to the remote server. client = libclient.setup_client(args.product_url) _, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write mass store ZIP to '%s'...", zip_file) try: LOG.debug("Assembling zip file.") try: assemble_zip(args.input, zip_file, client) except Exception as ex: print(ex) import traceback traceback.print_stack() zip_size = os.stat(zip_file).st_size LOG.debug("Assembling zip done, size is %s", sizeof_fmt(zip_size)) if zip_size > MAX_UPLOAD_SIZE: LOG.error("The result list to upload is too big (max: %s).", sizeof_fmt(MAX_UPLOAD_SIZE)) sys.exit(1) with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()).decode("utf-8") context = webserver_context.get_context() trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None description = args.description if 'description' in args else None LOG.info("Storing results to the server...") client.massStoreRun(args.name, args.tag if 'tag' in args else None, str(context.version), b64zip, 'force' in args, trim_path_prefixes, description) # Storing analysis statistics if the server allows them. if client.allowsStoringAnalysisStatistics(): storing_analysis_statistics(client, args.input, args.name) LOG.info("Storage finished successfully.") except RequestFailed as reqfail: if reqfail.errorCode == ErrorCode.SOURCE_FILE: header = ['File', 'Line', 'Checker name'] table = twodim_to_str('table', header, [c.split('|') for c in reqfail.extraInfo]) LOG.warning("Setting the review statuses for some reports failed " "because of non valid source code comments: " "%s\n %s", reqfail.message, table) sys.exit(1) except Exception as ex: import traceback traceback.print_stack() LOG.info("Storage failed: %s", str(ex)) sys.exit(1) finally: os.remove(zip_file)
def main(args): """ Store the defect results in the specified input list as bug reports in the database. """ logger.setup_logger(args.verbose if 'verbose' in args else None) if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] if 'name' not in args: LOG.debug("Generating name for analysis...") generated = __get_run_name(args.input) if generated: setattr(args, 'name', generated) else: LOG.error("No suitable name was found in the inputs for the " "analysis run. Please specify one by passing argument " "--name run_name in the invocation.") sys.exit(2) # argparse returns error code 2 for bad invocations. LOG.info("Storing analysis results for run '" + args.name + "'") if 'force' in args: LOG.info("argument --force was specified: the run with name '" + args.name + "' will be deleted.") protocol, host, port, product_name = split_product_url(args.product_url) # Before any transmission happens, check if we have the PRODUCT_STORE # permission to prevent a possibly long ZIP operation only to get an # error later on. product_client = libclient.setup_product_client(protocol, host, port, product_name) product_id = product_client.getCurrentProduct().id auth_client, _ = libclient.setup_auth_client(protocol, host, port) has_perm = libclient.check_permission( auth_client, Permission.PRODUCT_STORE, {'productID': product_id}) if not has_perm: LOG.error("You are not authorised to store analysis results in " "product '%s'", product_name) sys.exit(1) # Setup connection to the remote server. client = libclient.setup_client(args.product_url, product_client=False) LOG.debug("Initializing client connecting to %s:%d/%s done.", host, port, product_name) _, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write mass store ZIP to '%s'...", zip_file) try: assemble_zip(args.input, zip_file, client) if os.stat(zip_file).st_size > MAX_UPLOAD_SIZE: LOG.error("The result list to upload is too big (max: %s).", sizeof_fmt(MAX_UPLOAD_SIZE)) sys.exit(1) with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()) context = webserver_context.get_context() trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None client.massStoreRun(args.name, args.tag if 'tag' in args else None, str(context.version), b64zip, 'force' in args, trim_path_prefixes) # Storing analysis statistics if the server allows them. if client.allowsStoringAnalysisStatistics(): storing_analysis_statistics(client, args.input, args.name) LOG.info("Storage finished successfully.") except RequestFailed as reqfail: if reqfail.errorCode == ErrorCode.SOURCE_FILE: header = ['File', 'Line', 'Checker name'] table = twodim_to_str('table', header, [c.split('|') for c in reqfail.extraInfo]) LOG.warning("Setting the review statuses for some reports failed " "because of non valid source code comments: " "%s\n %s", reqfail.message, table) sys.exit(1) except Exception as ex: LOG.info("Storage failed: %s", str(ex)) sys.exit(1) finally: os.remove(zip_file)
def handle_diff_results(args): init_logger(args.verbose if 'verbose' in args else None) check_deprecated_arg_usage(args) f_severities, f_checkers, f_file_path, _, _ = check_filter_values(args) context = webserver_context.get_context() def skip_report_dir_result(report): """ Returns True if the report should be skipped from the results based on the given filter set. """ if f_severities: severity_name = context.severity_map.get(report.main['check_name']) if severity_name.lower() not in map(str.lower, f_severities): return True if f_checkers: checker_name = report.main['check_name'] if not any([ re.match(r'^' + c.replace("*", ".*") + '$', checker_name, re.IGNORECASE) for c in f_checkers ]): return True if f_file_path: file_path = report.files[int(report.main['location']['file'])] if not any([ re.match(r'^' + f.replace("*", ".*") + '$', file_path, re.IGNORECASE) for f in f_file_path ]): return True if 'checker_msg' in args: checker_msg = report.main['description'] if not any([ re.match(r'^' + c.replace("*", ".*") + '$', checker_msg, re.IGNORECASE) for c in args.checker_msg ]): return True return False def get_report_dir_results(reportdir): all_reports = [] processed_path_hashes = set() for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing: %s", file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Not showing report because it is a " "deduplication of an already processed " "report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(report) continue if skip_report_dir_result(report): continue processed_path_hashes.add(path_hash) report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.append(report) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def get_line_from_file(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes): base_results = [] report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 report_filter.reportHash = base_hashes + suppressed_hashes results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) return base_results def get_suppressed_reports(reports): """ Returns suppressed reports. """ suppressed_in_code = [] for rep in reports: bughash = rep.report_hash source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] checker_name = rep.main['check_name'] sc_handler = SourceCodeCommentHandler(source_file) src_comment_data = sc_handler.filter_source_line_comments( bug_line, checker_name) if len(src_comment_data) == 1: suppressed_in_code.append(bughash) LOG.debug("Bug %s is suppressed in code. file: %s Line %s", bughash, source_file, bug_line) elif len(src_comment_data) > 1: LOG.warning( "Multiple source code comment can be found " "for '%s' checker in '%s' at line %s. " "This bug will not be suppressed!", checker_name, source_file, bug_line) return suppressed_in_code def get_diff_type(): """ Returns Thrift DiffType value by processing the arguments. """ if 'new' in args: return ttypes.DiffType.NEW if 'unresolved' in args: return ttypes.DiffType.UNRESOLVED if 'resolved' in args: return ttypes.DiffType.RESOLVED return None def get_diff_local_dir_remote_run(client, report_dir, run_name): """ Compares a local report directory with a remote run. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) if diff_type == ttypes.DiffType.NEW: # Get report hashes which can be found only in the remote runs. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.RESOLVED) results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) elif diff_type == ttypes.DiffType.UNRESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: if result.report_hash not in remote_hashes: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_run_local_dir(client, run_name, report_dir): """ Compares a remote run with a local report directory. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) remote_hashes = client.getDiffResultsHash(run_ids, local_report_hashes, diff_type) if diff_type in [ttypes.DiffType.NEW, ttypes.DiffType.UNRESOLVED]: # Shows reports from the report dir which are not present in # the baseline (NEW reports) or appear in both side (UNRESOLVED # reports) and not suppressed in the code. for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Show bugs in the baseline (server) which are not present in # the report dir or suppressed. results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_runs(client, basename, newname): """ Compares two remote runs and returns the filtered results. """ report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) base_ids, base_run_names, base_run_tags = process_run_arg(basename) report_filter.runTag = base_run_tags cmp_data = ttypes.CompareData() cmp_data.diffType = get_diff_type() new_ids, new_run_names, new_run_tags = process_run_arg(newname) cmp_data.runIds = new_ids cmp_data.runTag = new_run_tags # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED ] sort_mode = [(ttypes.SortMode(ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) return all_results, base_run_names, new_run_names def get_diff_local_dirs(basename, newname): """ Compares two report directories and returns the filtered results. """ filtered_reports = [] base_results = get_report_dir_results(os.path.abspath(basename)) new_results = get_report_dir_results(os.path.abspath(newname)) base_hashes = set([res.report_hash for res in base_results]) new_hashes = set([res.report_hash for res in new_results]) diff_type = get_diff_type() if diff_type == ttypes.DiffType.NEW: for res in new_results: if res.report_hash not in base_hashes: filtered_reports.append(res) if diff_type == ttypes.DiffType.UNRESOLVED: for res in new_results: if res.report_hash in base_hashes: filtered_reports.append(res) elif diff_type == ttypes.DiffType.RESOLVED: for res in base_results: if res.report_hash not in new_hashes: filtered_reports.append(res) return filtered_reports def cached_report_file_lookup(file_cache, file_id): """ Get source file data for the given file and caches it in a file cache if file data is not found in the cache. Finally, it returns the source file data from the cache. """ if file_id not in file_cache: source = client.getSourceFileData(file_id, True, ttypes.Encoding.BASE64) file_content = base64.b64decode(source.fileContent) file_cache[file_id] = { 'id': file_id, 'path': source.filePath, 'content': file_content } return file_cache[file_id] def get_report_data(client, reports, file_cache): """ Returns necessary report files and report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: file_sources[report.fileId] = cached_report_file_lookup( file_cache, report.fileId) details = client.getReportDetails(report.reportId) events = [] for event in details.pathEvents: file_sources[event.fileId] = cached_report_file_lookup( file_cache, event.fileId) location = { 'line': event.startLine, 'col': event.startCol, 'file': event.fileId } events.append({'location': location, 'message': event.msg}) # Get extended data. macros = [] notes = [] for extended_data in details.extendedData: file_sources[extended_data.fileId] = cached_report_file_lookup( file_cache, extended_data.fileId) location = { 'line': extended_data.startLine, 'col': extended_data.startCol, 'file': extended_data.fileId } if extended_data.type == ttypes.ExtendedReportDataType.MACRO: macros.append({ 'location': location, 'expansion': event.msg }) elif extended_data.type == ttypes.ExtendedReportDataType.NOTE: notes.append({'location': location, 'message': event.msg}) report_data.append({ 'events': events, 'macros': macros, 'notes': notes, 'path': report.checkedFile, 'reportHash': report.bugHash, 'checkerName': report.checkerId }) return {'files': file_sources, 'reports': report_data} def reports_to_report_data(reports): """ Converts reports from Report class from one plist file to report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: # Not all report in this list may refer to the same files # thus we need to create a single file list with # all files from all reports. for file_index, file_path in enumerate(report.files): if file_index not in file_sources: try: with io.open(file_path, 'r', encoding='UTF-8', errors='ignore') as source_data: content = source_data.read() except (OSError, IOError): content = file_path + " NOT FOUND." file_sources[file_index] = { 'id': file_index, 'path': file_path, 'content': content } events = [] for element in report.bug_path: kind = element['kind'] if kind == 'event': events.append({ 'location': element['location'], 'message': element['message'] }) macros = [] for macro in report.macro_expansions: macros.append({ 'location': macro['location'], 'expansion': macro['expansion'], 'name': macro['name'] }) notes = [] for note in report.notes: notes.append({ 'location': note['location'], 'message': note['message'] }) report_hash = report.main['issue_hash_content_of_line_in_context'] report_data.append({ 'events': events, 'macros': macros, 'notes': notes, 'path': report.main['location']['file_name'], 'reportHash': report_hash, 'checkerName': report.main['check_name'] }) return {'files': file_sources, 'reports': report_data} def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder(context.path_plist_to_html_dist, context.severity_map) file_report_map = defaultdict(list) for report in reports: if isinstance(report, Report): file_path = report.main['location']['file_name'] else: file_path = report.checkedFile file_report_map[file_path].append(report) file_cache = {} for file_path, file_reports in file_report_map.items(): checked_file = file_path filename = os.path.basename(checked_file) h = int(hashlib.md5(file_path).hexdigest(), 16) % (10**8) if isinstance(file_reports[0], Report): report_data = reports_to_report_data(file_reports) else: report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(h) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) html_builder.create_index_html(output_dir) def print_reports(client, reports, output_format): output_dir = args.export_dir if 'export_dir' in args else None if 'clean' in args and os.path.isdir(output_dir): print("Previous analysis results in '{0}' have been removed, " "overwriting with current results.".format(output_dir)) shutil.rmtree(output_dir) if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return if output_format == 'html': output_dir = args.export_dir if not os.path.exists(output_dir): os.makedirs(output_dir) print("Generating HTML output files to file://{0} directory:\n". format(output_dir)) report_to_html(client, reports, output_dir) print('\nTo view the results in a browser run:\n' ' $ firefox {0}'.format( os.path.join(args.export_dir, 'index.html'))) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] source_lines = defaultdict(set) for report in reports: if not isinstance(report, Report) and report.line is not None: source_lines[report.fileId].add(report.line) lines_in_files_requested = [] for key in source_lines: lines_in_files_requested.append( ttypes.LinesInFilesRequested(fileId=key, lines=source_lines[key])) for report in reports: source_line = '' if isinstance(report, Report): # report is coming from a plist file. bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] sev = context.severity_map.get(check_name) check_msg = report.main['description'] source_line =\ get_line_from_file(report.main['location']['file_name'], bug_line) else: # report is of ReportData type coming from CodeChecker server. bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile if bug_line is not None: checked_file += ':' + str(bug_line) + ":" + str(bug_col) check_name = report.checkerId check_msg = report.checkerMsg if lines_in_files_requested: source_line_contents = client.getLinesInSourceFileContents( lines_in_files_requested, ttypes.Encoding.BASE64) source_line = base64.b64decode( source_line_contents[report.fileId][bug_line]) rows.append( (sev, checked_file, check_msg, check_name, source_line)) if output_format == 'plaintext': for row in rows: print("[{0}] {1}: {2} [{3}]\n{4}\n".format( row[0], row[1], row[2], row[3], row[4])) else: print(twodim_to_str(output_format, header, rows)) def get_run_tag(client, run_ids, tag_name): """ Returns run tag information for the given tag name in the given runs. """ run_history_filter = ttypes.RunHistoryFilter() run_history_filter.tagNames = [tag_name] run_histories = client.getRunHistory(run_ids, None, None, run_history_filter) return run_histories[0] if len(run_histories) else None def process_run_arg(run_arg_with_tag): """ Process the argument and returns run ids a run tag ids. The argument has the following format: <run_name>:<run_tag> """ run_with_tag = run_arg_with_tag.split(':') run_name = run_with_tag[0] runs = get_runs(client, [run_name]) run_ids = map(lambda run: run.runId, runs) run_names = map(lambda run: run.name, runs) # Set base run tag if it is available. run_tag_name = run_with_tag[1] if len(run_with_tag) > 1 else None run_tags = None if run_tag_name: tag = get_run_tag(client, run_ids, run_tag_name) run_tags = [tag.id] if tag else None if not run_ids: LOG.warning("No run names match the given pattern: %s", run_arg_with_tag) sys.exit(1) LOG.info("Matching runs: %s", ', '.join(map(lambda run: run.name, runs))) return run_ids, run_names, run_tags def print_diff_results(reports): """ Print the results. """ if reports: print_reports(client, reports, args.output_format) else: LOG.info("No results.") client = None # We set up the client if we are not comparing two local report directory. if not os.path.isdir(args.basename) or not os.path.isdir(args.newname): client = setup_client(args.product_url) if os.path.isdir(args.basename) and os.path.isdir(args.newname): reports = get_diff_local_dirs(args.basename, args.newname) print_diff_results(reports) LOG.info("Compared two local report directories %s and %s", os.path.abspath(args.basename), os.path.abspath(args.newname)) elif os.path.isdir(args.newname): reports, base_run_names = \ get_diff_remote_run_local_dir(client, args.basename, os.path.abspath(args.newname)) print_diff_results(reports) LOG.info( "Compared remote run(s) %s (matching: %s) and local report " "directory %s", args.basename, ', '.join(base_run_names), os.path.abspath(args.newname)) elif os.path.isdir(args.basename): reports, new_run_names = \ get_diff_local_dir_remote_run(client, os.path.abspath(args.basename), args.newname) print_diff_results(reports) LOG.info( "Compared local report directory %s and remote run(s) %s " "(matching: %s).", os.path.abspath(args.basename), args.newname, ', '.join(new_run_names)) else: reports, base_run_names, new_run_names = \ get_diff_remote_runs(client, args.basename, args.newname) print_diff_results(reports) LOG.info( "Compared multiple remote runs %s (matching: %s) and %s " "(matching: %s)", args.basename, ', '.join(base_run_names), args.newname, ', '.join(new_run_names))
def setup_class(cls): ctx = webserver_context.get_context() cls.__git_commit_urls = ctx.git_commit_urls
def handle_diff_results(args): init_logger(args.verbose if 'verbose' in args else None) check_deprecated_arg_usage(args) f_severities, f_checkers, f_file_path, _, _ = check_filter_values(args) context = webserver_context.get_context() def skip_report_dir_result(report): """ Returns True if the report should be skipped from the results based on the given filter set. """ if f_severities: severity_name = context.severity_map.get(report.main['check_name']) if severity_name.lower() not in map(str.lower, f_severities): return True if f_checkers: checker_name = report.main['check_name'] if not any([re.match(r'^' + c.replace("*", ".*") + '$', checker_name, re.IGNORECASE) for c in f_checkers]): return True if f_file_path: file_path = report.files[int(report.main['location']['file'])] if not any([re.match(r'^' + f.replace("*", ".*") + '$', file_path, re.IGNORECASE) for f in f_file_path]): return True if 'checker_msg' in args: checker_msg = report.main['description'] if not any([re.match(r'^' + c.replace("*", ".*") + '$', checker_msg, re.IGNORECASE) for c in args.checker_msg]): return True return False def get_report_dir_results(reportdir): all_reports = [] processed_path_hashes = set() for filename in os.listdir(reportdir): if filename.endswith(".plist"): file_path = os.path.join(reportdir, filename) LOG.debug("Parsing: %s", file_path) try: files, reports = plist_parser.parse_plist(file_path) for report in reports: path_hash = get_report_path_hash(report, files) if path_hash in processed_path_hashes: LOG.debug("Not showing report because it is a " "deduplication of an already processed " "report!") LOG.debug("Path hash: %s", path_hash) LOG.debug(report) continue if skip_report_dir_result(report): continue processed_path_hashes.add(path_hash) report.main['location']['file_name'] = \ files[int(report.main['location']['file'])] all_reports.append(report) except Exception as ex: LOG.error('The generated plist is not valid!') LOG.error(ex) return all_reports def get_line_from_file(filename, lineno): with open(filename, 'r') as f: i = 1 for line in f: if i == lineno: return line i += 1 return "" def get_diff_base_results(client, baseids, base_hashes, suppressed_hashes): base_results = [] report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) sort_mode = [(ttypes.SortMode( ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 report_filter.reportHash = base_hashes + suppressed_hashes results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) while results: base_results.extend(results) offset += limit results = client.getRunResults(baseids, limit, offset, sort_mode, report_filter, None) return base_results def get_suppressed_reports(reports): """ Returns suppressed reports. """ suppressed_in_code = [] for rep in reports: bughash = rep.report_hash source_file = rep.main['location']['file_name'] bug_line = rep.main['location']['line'] checker_name = rep.main['check_name'] sc_handler = SourceCodeCommentHandler(source_file) src_comment_data = sc_handler.filter_source_line_comments( bug_line, checker_name) if len(src_comment_data) == 1: suppressed_in_code.append(bughash) LOG.debug("Bug %s is suppressed in code. file: %s Line %s", bughash, source_file, bug_line) elif len(src_comment_data) > 1: LOG.warning( "Multiple source code comment can be found " "for '%s' checker in '%s' at line %s. " "This bug will not be suppressed!", checker_name, source_file, bug_line) return suppressed_in_code def get_diff_type(): """ Returns Thrift DiffType value by processing the arguments. """ if 'new' in args: return ttypes.DiffType.NEW if 'unresolved' in args: return ttypes.DiffType.UNRESOLVED if 'resolved' in args: return ttypes.DiffType.RESOLVED return None def get_diff_local_dir_remote_run(client, report_dir, run_name): """ Compares a local report directory with a remote run. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) if diff_type == ttypes.DiffType.NEW: # Get report hashes which can be found only in the remote runs. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.RESOLVED) results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) elif diff_type == ttypes.DiffType.UNRESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Get remote hashes which can be found in the remote run and in the # local report directory. remote_hashes = \ client.getDiffResultsHash(run_ids, local_report_hashes, ttypes.DiffType.UNRESOLVED) for result in report_dir_results: if result.report_hash not in remote_hashes: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_run_local_dir(client, run_name, report_dir): """ Compares a remote run with a local report directory. """ filtered_reports = [] report_dir_results = get_report_dir_results( os.path.abspath(report_dir)) suppressed_in_code = get_suppressed_reports(report_dir_results) diff_type = get_diff_type() run_ids, run_names, _ = process_run_arg(run_name) local_report_hashes = set([r.report_hash for r in report_dir_results]) remote_hashes = client.getDiffResultsHash(run_ids, local_report_hashes, diff_type) if diff_type in [ttypes.DiffType.NEW, ttypes.DiffType.UNRESOLVED]: # Shows reports from the report dir which are not present in # the baseline (NEW reports) or appear in both side (UNRESOLVED # reports) and not suppressed in the code. for result in report_dir_results: rep_h = result.report_hash if rep_h in remote_hashes and rep_h not in suppressed_in_code: filtered_reports.append(result) elif diff_type == ttypes.DiffType.RESOLVED: # Show bugs in the baseline (server) which are not present in # the report dir or suppressed. results = get_diff_base_results(client, run_ids, remote_hashes, suppressed_in_code) for result in results: filtered_reports.append(result) return filtered_reports, run_names def get_diff_remote_runs(client, basename, newname): """ Compares two remote runs and returns the filtered results. """ report_filter = ttypes.ReportFilter() add_filter_conditions(client, report_filter, args) base_ids, base_run_names, base_run_tags = process_run_arg(basename) report_filter.runTag = base_run_tags cmp_data = ttypes.CompareData() cmp_data.diffType = get_diff_type() new_ids, new_run_names, new_run_tags = process_run_arg(newname) cmp_data.runIds = new_ids cmp_data.runTag = new_run_tags # Do not show resolved bugs in compare mode new. if cmp_data.diffType == ttypes.DiffType.NEW: report_filter.detectionStatus = [ ttypes.DetectionStatus.NEW, ttypes.DetectionStatus.UNRESOLVED, ttypes.DetectionStatus.REOPENED] sort_mode = [(ttypes.SortMode( ttypes.SortType.FILENAME, ttypes.Order.ASC))] limit = constants.MAX_QUERY_SIZE offset = 0 all_results = [] results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) while results: all_results.extend(results) offset += limit results = client.getRunResults(base_ids, limit, offset, sort_mode, report_filter, cmp_data) return all_results, base_run_names, new_run_names def get_diff_local_dirs(basename, newname): """ Compares two report directories and returns the filtered results. """ filtered_reports = [] base_results = get_report_dir_results(os.path.abspath(basename)) new_results = get_report_dir_results(os.path.abspath(newname)) base_hashes = set([res.report_hash for res in base_results]) new_hashes = set([res.report_hash for res in new_results]) diff_type = get_diff_type() if diff_type == ttypes.DiffType.NEW: for res in new_results: if res.report_hash not in base_hashes: filtered_reports.append(res) if diff_type == ttypes.DiffType.UNRESOLVED: for res in new_results: if res.report_hash in base_hashes: filtered_reports.append(res) elif diff_type == ttypes.DiffType.RESOLVED: for res in base_results: if res.report_hash not in new_hashes: filtered_reports.append(res) return filtered_reports def cached_report_file_lookup(file_cache, file_id): """ Get source file data for the given file and caches it in a file cache if file data is not found in the cache. Finally, it returns the source file data from the cache. """ if file_id not in file_cache: source = client.getSourceFileData(file_id, True, ttypes.Encoding.BASE64) file_content = base64.b64decode(source.fileContent) file_cache[file_id] = {'id': file_id, 'path': source.filePath, 'content': file_content} return file_cache[file_id] def get_report_data(client, reports, file_cache): """ Returns necessary report files and report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: file_sources[report.fileId] = cached_report_file_lookup( file_cache, report.fileId) details = client.getReportDetails(report.reportId) events = [] for event in details.pathEvents: file_sources[event.fileId] = cached_report_file_lookup( file_cache, event.fileId) location = {'line': event.startLine, 'col': event.startCol, 'file': event.fileId} events.append({'location': location, 'message': event.msg}) # Get extended data. macros = [] notes = [] for extended_data in details.extendedData: file_sources[extended_data.fileId] = cached_report_file_lookup( file_cache, extended_data.fileId) location = {'line': extended_data.startLine, 'col': extended_data.startCol, 'file': extended_data.fileId} if extended_data.type == ttypes.ExtendedReportDataType.MACRO: macros.append({'location': location, 'expansion': event.msg}) elif extended_data.type == ttypes.ExtendedReportDataType.NOTE: notes.append({'location': location, 'message': event.msg}) report_data.append({ 'events': events, 'macros': macros, 'notes': notes, 'path': report.checkedFile, 'reportHash': report.bugHash, 'checkerName': report.checkerId}) return {'files': file_sources, 'reports': report_data} def reports_to_report_data(reports): """ Converts reports from Report class from one plist file to report data events for the HTML plist parser. """ file_sources = {} report_data = [] for report in reports: # Not all report in this list may refer to the same files # thus we need to create a single file list with # all files from all reports. for file_index, file_path in enumerate(report.files): if file_index not in file_sources: try: with io.open(file_path, 'r', encoding='UTF-8', errors='ignore') as source_data: content = source_data.read() except (OSError, IOError): content = file_path + " NOT FOUND." file_sources[file_index] = {'id': file_index, 'path': file_path, 'content': content} events = [] for element in report.bug_path: kind = element['kind'] if kind == 'event': events.append({'location': element['location'], 'message': element['message']}) macros = [] for macro in report.macro_expansions: macros.append({'location': macro['location'], 'expansion': macro['expansion'], 'name': macro['name']}) notes = [] for note in report.notes: notes.append({'location': note['location'], 'message': note['message']}) report_hash = report.main['issue_hash_content_of_line_in_context'] report_data.append({ 'events': events, 'macros': macros, 'notes': notes, 'path': report.main['location']['file_name'], 'reportHash': report_hash, 'checkerName': report.main['check_name']}) return {'files': file_sources, 'reports': report_data} def report_to_html(client, reports, output_dir): """ Generate HTML output files for the given reports in the given output directory by using the Plist To HTML parser. """ html_builder = PlistToHtml.HtmlBuilder( context.path_plist_to_html_dist, context.severity_map) file_report_map = defaultdict(list) for report in reports: if isinstance(report, Report): file_path = report.main['location']['file_name'] else: file_path = report.checkedFile file_report_map[file_path].append(report) file_cache = {} for file_path, file_reports in file_report_map.items(): checked_file = file_path filename = os.path.basename(checked_file) h = int(hashlib.md5(file_path).hexdigest(), 16) % (10 ** 8) if isinstance(file_reports[0], Report): report_data = reports_to_report_data(file_reports) else: report_data = get_report_data(client, file_reports, file_cache) output_path = os.path.join(output_dir, filename + '_' + str(h) + '.html') html_builder.create(output_path, report_data) print('Html file was generated for file://{0}: file://{1}'.format( checked_file, output_path)) html_builder.create_index_html(output_dir) def print_reports(client, reports, output_format): output_dir = args.export_dir if 'export_dir' in args else None if 'clean' in args and os.path.isdir(output_dir): print("Previous analysis results in '{0}' have been removed, " "overwriting with current results.".format(output_dir)) shutil.rmtree(output_dir) if output_format == 'json': output = [] for report in reports: if isinstance(report, Report): output.append(report.main) else: output.append(report) print(CmdLineOutputEncoder().encode(output)) return if output_format == 'html': output_dir = args.export_dir if not os.path.exists(output_dir): os.makedirs(output_dir) print("Generating HTML output files to file://{0} directory:\n" .format(output_dir)) report_to_html(client, reports, output_dir) print('\nTo view the results in a browser run:\n' ' $ firefox {0}'.format(os.path.join(args.export_dir, 'index.html'))) return header = ['File', 'Checker', 'Severity', 'Msg', 'Source'] rows = [] source_lines = defaultdict(set) for report in reports: if not isinstance(report, Report) and report.line is not None: source_lines[report.fileId].add(report.line) lines_in_files_requested = [] for key in source_lines: lines_in_files_requested.append( ttypes.LinesInFilesRequested(fileId=key, lines=source_lines[key])) for report in reports: source_line = '' if isinstance(report, Report): # report is coming from a plist file. bug_line = report.main['location']['line'] bug_col = report.main['location']['col'] checked_file = report.main['location']['file_name']\ + ':' + str(bug_line) + ":" + str(bug_col) check_name = report.main['check_name'] sev = context.severity_map.get(check_name) check_msg = report.main['description'] source_line =\ get_line_from_file(report.main['location']['file_name'], bug_line) else: # report is of ReportData type coming from CodeChecker server. bug_line = report.line bug_col = report.column sev = ttypes.Severity._VALUES_TO_NAMES[report.severity] checked_file = report.checkedFile if bug_line is not None: checked_file += ':' + str(bug_line) + ":" + str(bug_col) check_name = report.checkerId check_msg = report.checkerMsg if lines_in_files_requested: source_line_contents = client.getLinesInSourceFileContents( lines_in_files_requested, ttypes.Encoding.BASE64) source_line = base64.b64decode( source_line_contents[report.fileId][bug_line]) rows.append( (sev, checked_file, check_msg, check_name, source_line)) if output_format == 'plaintext': for row in rows: print("[{0}] {1}: {2} [{3}]\n{4}\n".format( row[0], row[1], row[2], row[3], row[4])) else: print(twodim_to_str(output_format, header, rows)) def get_run_tag(client, run_ids, tag_name): """ Returns run tag information for the given tag name in the given runs. """ run_history_filter = ttypes.RunHistoryFilter() run_history_filter.tagNames = [tag_name] run_histories = client.getRunHistory(run_ids, None, None, run_history_filter) return run_histories[0] if len(run_histories) else None def process_run_arg(run_arg_with_tag): """ Process the argument and returns run ids a run tag ids. The argument has the following format: <run_name>:<run_tag> """ run_with_tag = run_arg_with_tag.split(':') run_name = run_with_tag[0] runs = get_runs(client, [run_name]) run_ids = map(lambda run: run.runId, runs) run_names = map(lambda run: run.name, runs) # Set base run tag if it is available. run_tag_name = run_with_tag[1] if len(run_with_tag) > 1 else None run_tags = None if run_tag_name: tag = get_run_tag(client, run_ids, run_tag_name) run_tags = [tag.id] if tag else None if not run_ids: LOG.warning( "No run names match the given pattern: %s", run_arg_with_tag) sys.exit(1) LOG.info("Matching runs: %s", ', '.join(map(lambda run: run.name, runs))) return run_ids, run_names, run_tags def print_diff_results(reports): """ Print the results. """ if reports: print_reports(client, reports, args.output_format) else: LOG.info("No results.") client = None # We set up the client if we are not comparing two local report directory. if not os.path.isdir(args.basename) or not os.path.isdir(args.newname): client = setup_client(args.product_url) if os.path.isdir(args.basename) and os.path.isdir(args.newname): reports = get_diff_local_dirs(args.basename, args.newname) print_diff_results(reports) LOG.info("Compared two local report directories %s and %s", os.path.abspath(args.basename), os.path.abspath(args.newname)) elif os.path.isdir(args.newname): reports, base_run_names = \ get_diff_remote_run_local_dir(client, args.basename, os.path.abspath(args.newname)) print_diff_results(reports) LOG.info("Compared remote run(s) %s (matching: %s) and local report " "directory %s", args.basename, ', '.join(base_run_names), os.path.abspath(args.newname)) elif os.path.isdir(args.basename): reports, new_run_names = \ get_diff_local_dir_remote_run(client, os.path.abspath(args.basename), args.newname) print_diff_results(reports) LOG.info("Compared local report directory %s and remote run(s) %s " "(matching: %s).", os.path.abspath(args.basename), args.newname, ', '.join(new_run_names)) else: reports, base_run_names, new_run_names = \ get_diff_remote_runs(client, args.basename, args.newname) print_diff_results(reports) LOG.info("Compared multiple remote runs %s (matching: %s) and %s " "(matching: %s)", args.basename, ', '.join(base_run_names), args.newname, ', '.join(new_run_names))
def server_init_start(args): """ Start or manage a CodeChecker report server. """ if 'list' in args or 'stop' in args or 'stop_all' in args: __instance_management(args) sys.exit(0) if 'reload' in args: __reload_config(args) sys.exit(0) # Actual server starting from this point. if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # WARNING # In case of SQLite args.dbaddress default value is used # for which the is_localhost should return true. if util.is_localhost(args.dbaddress) and \ not os.path.exists(args.config_directory): os.makedirs(args.config_directory) # Make sure the SQLite file can be created if it not exists. if 'sqlite' in args and \ not os.path.isdir(os.path.dirname(args.sqlite)): os.makedirs(os.path.dirname(args.sqlite)) if 'reset_root' in args: try: os.remove(os.path.join(args.config_directory, 'root.user')) LOG.info("Master superuser (root) credentials invalidated and " "deleted. New ones will be generated...") except OSError: # File doesn't exist. pass if 'force_auth' in args: LOG.info("'--force-authentication' was passed as a command-line " "option. The server will ask for users to authenticate!") context = webserver_context.get_context() context.codechecker_workspace = args.config_directory context.db_username = args.dbusername check_env = get_check_env(context.path_env_extra, context.ld_lib_path_extra) cfg_sql_server = database.SQLServer.from_cmdline_args( vars(args), CONFIG_META, context.config_migration_root, interactive=True, env=check_env) LOG.info("Checking configuration database ...") db_status = cfg_sql_server.connect() db_status_msg = database_status.db_status_msg.get(db_status) LOG.info(db_status_msg) if db_status == DBStatus.SCHEMA_MISSING: LOG.debug("Config database schema is missing, initializing new.") db_status = cfg_sql_server.connect(init=True) if db_status != DBStatus.OK: LOG.error("Config database initialization failed!") LOG.error("Please check debug logs.") sys.exit(1) if db_status == DBStatus.SCHEMA_MISMATCH_NO: LOG.debug("Configuration database schema mismatch.") LOG.debug("No schema upgrade is possible.") sys.exit(1) force_upgrade = True if 'force_upgrade' in args else False if db_status == DBStatus.SCHEMA_MISMATCH_OK: LOG.debug("Configuration database schema mismatch.") LOG.debug("Schema upgrade is possible.") LOG.warning("Please note after migration only " "newer CodeChecker versions can be used " "to start the server") LOG.warning("It is advised to make a full backup of your " "configuration database") LOG.warning(cfg_sql_server.get_db_location()) question = 'Do you want to upgrade to the new schema?' \ ' Y(es)/n(o) ' if force_upgrade or util.get_user_input(question): print("Upgrading schema ...") ret = cfg_sql_server.upgrade() msg = database_status.db_status_msg.get( ret, 'Unknown database status') print(msg) if ret != DBStatus.OK: LOG.error("Schema migration failed") sys.exit(ret) else: LOG.info("No schema migration was done.") sys.exit(0) if db_status == DBStatus.MISSING: LOG.error("Missing configuration database.") LOG.error("Server can not be started.") sys.exit(1) # Configuration database setup and check is needed before database # statuses can be checked. try: if args.status: ret = __db_status_check(cfg_sql_server, context, args.status) sys.exit(ret) except AttributeError: LOG.debug('Status was not in the arguments.') try: if args.product_to_upgrade: ret = __db_migration(cfg_sql_server, context, args.product_to_upgrade, force_upgrade) sys.exit(ret) except AttributeError: LOG.debug('Product upgrade was not in the arguments.') # Create the main database link from the arguments passed over the # command line. cfg_dir = os.path.abspath(args.config_directory) default_product_path = os.path.join(cfg_dir, 'Default.sqlite') create_default_product = 'sqlite' in args and \ not os.path.exists(default_product_path) if create_default_product: # Create a default product and add it to the configuration database. LOG.debug("Create default product...") LOG.debug("Configuring schema and migration...") prod_server = database.SQLiteDatabase( default_product_path, RUN_META, context.run_migration_root, check_env) LOG.debug("Checking 'Default' product database.") db_status = prod_server.connect() if db_status != DBStatus.MISSING: db_status = prod_server.connect(init=True) LOG.debug(database_status.db_status_msg.get(db_status)) if db_status != DBStatus.OK: LOG.error("Failed to configure default product") sys.exit(1) product_conn_string = prod_server.get_connection_string() server.add_initial_run_database( cfg_sql_server, product_conn_string) LOG.info("Product 'Default' at '%s' created and set up.", default_product_path) prod_statuses = check_product_db_status(cfg_sql_server, context) upgrade_available = {} for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status == DBStatus.SCHEMA_MISMATCH_OK or \ db_status == DBStatus.SCHEMA_MISSING: upgrade_available[k] = v if upgrade_available: print_prod_status(prod_statuses) LOG.warning("Multiple products can be upgraded, make a backup!") __db_migration(cfg_sql_server, context, 'all', force_upgrade) prod_statuses = check_product_db_status(cfg_sql_server, context) print_prod_status(prod_statuses) non_ok_db = False for k, v in prod_statuses.items(): db_status, _, _, _ = v if db_status != DBStatus.OK: non_ok_db = True break if non_ok_db: msg = "There are some database issues. " \ "Do you want to start the " \ "server? Y(es)/n(o) " if not util.get_user_input(msg): sys.exit(1) # Start database viewer. checker_md_docs = os.path.join(context.doc_root, 'checker_md_docs') checker_md_docs_map = os.path.join(checker_md_docs, 'checker_doc_map.json') checker_md_docs_map = util.load_json_or_empty(checker_md_docs_map, {}) package_data = {'www_root': context.www_root, 'doc_root': context.doc_root, 'checker_md_docs': checker_md_docs, 'checker_md_docs_map': checker_md_docs_map, 'version': context.package_git_tag} suppr_handler = suppress_handler. \ GenericSuppressHandler(None, False) try: server.start_server(args.config_directory, package_data, args.view_port, cfg_sql_server, suppr_handler, args.listen_address, 'force_auth' in args, args.skip_db_cleanup, context, check_env) except socket.error as err: if err.errno == errno.EADDRINUSE: LOG.error("Server can't be started, maybe the given port number " "(%s) is already used. Check the connection " "parameters.", args.view_port) sys.exit(1) else: raise