def print_version(output_format=None): """ Print web server version information in the given format. """ context = webserver_context.get_context() server_versions = [ '{0}.{1}'.format(major, minor) for major, minor in version.SUPPORTED_VERSIONS.items() ] if output_format != 'json': server_versions = ', '.join(server_versions) rows = [("Base package version", context.version), ("Package build date", context.package_build_date), ("Git commit ID (hash)", context.package_git_hash), ("Git tag information", context.package_git_tag), ("Server supported API (Thrift)", server_versions), ("Client API (Thrift)", version.CLIENT_API)] if output_format != "json": print(twodim.to_str(output_format, ["Kind", "Version"], rows)) elif output_format == "json": # Use a special JSON format here, instead of # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ] # do # { "something": "0.0.0", "foo": ... } print(json.dumps(dict(rows)))
def __print_guidelines(args: argparse.Namespace, cl: CheckerLabels): """ Print guidelines according to the command line arguments to the standard output. """ if args.output_format == 'custom': args.output_format = 'rows' result = {} for guideline in cl.get_description('guideline'): result[guideline] = set(cl.occurring_values(guideline)) header = ['Guideline', 'Rules'] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) if args.output_format == 'json': rows = [(g, sorted(list(r))) for g, r in result.items()] else: rows = [(g, ', '.join(sorted(r))) for g, r in result.items()] if args.output_format == 'rows': for row in rows: print('Guideline: {}'.format(row[0])) print('Rules: {}'.format(row[1])) else: print(twodim.to_str(args.output_format, header, rows))
def handle_list_components(args): # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' init_logger(args.verbose if 'verbose' in args else None, stream) client = setup_client(args.product_url) components = client.getSourceComponents(None) if args.output_format == 'json': print(CmdLineOutputEncoder().encode(components)) else: # plaintext, csv header = ['Name', 'Value', 'Description'] rows = [] for res in components: if res.name == GEN_OTHER_COMPONENT_NAME: res.value = '' for idx, value in enumerate(res.value.split('\n')): name = res.name if idx == 0 else '' description = res.description \ if idx == 0 and res.description else '' rows.append((name, value, description)) print(twodim.to_str(args.output_format, header, rows))
def handle_list_tokens(args): """ List personal access tokens of the currently logged in user. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' init_logger(args.verbose if 'verbose' in args else None, stream) protocol, host, port = split_server_url(args.server_url) client = init_auth_client(protocol, host, port) tokens = client.getTokens() if args.output_format == 'json': print(CmdLineOutputEncoder().encode(tokens)) else: # plaintext, csv header = ['Token', 'Description', 'Last access'] rows = [] for res in tokens: rows.append((res.token, res.description if res.description else '', res.lastAccess)) print(twodim.to_str(args.output_format, header, rows))
def print(self, output_format: str): """ Print analyzer version information in the given format. """ if output_format == "json": print(json.dumps(self.to_dict())) else: LOG.info("CodeChecker analyzer version:") print( twodim.to_str(output_format, ["Kind", "Version"], self.to_list()))
def __print_checker_config(args: argparse.Namespace): """ Print checker config options according to the command line arguments to the standard output. The set of config options comes from the analyzers. """ if args.output_format == 'custom': args.output_format = 'rows' context = analyzer_context.get_context() working_analyzers, errored = analyzer_types.check_supported_analyzers( args.analyzers, context) analyzer_types.check_available_analyzers(working_analyzers, errored) analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) rows = [] analyzer_failures = [] for analyzer in working_analyzers: config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_checker_config(config_handler, analyzer_environment) if not configs: analyzer_failures.append(analyzer) continue rows.extend( (':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])), ) for c in configs) if rows: print(twodim.to_str(args.output_format, header, rows)) analyzer_types.print_unsupported_analyzers(errored) if analyzer_failures: LOG.error( "Failed to get checker configuration options for '%s' " "analyzer(s)! Please try to upgrade your analyzer " "version to use this feature.", ', '.join(analyzer_failures)) sys.exit(1)
def __print_checkers(args: argparse.Namespace, cl: CheckerLabels): """ Print checkers according to the command line arguments to the standard output. """ labels = [args.label] if 'label' in args else [] if 'profile' in args: labels.append(f'profile:{args.profile}') if 'guideline' in args: labels.append(__guideline_to_label(args, cl)) if 'severity' in args: labels.append(f'severity:{args.severity}') checker_info = __get_detailed_checker_info(args, cl) result = [] for analyzer in args.analyzers: if labels: checkers = cl.checkers_by_labels(labels, analyzer) result.extend( filter(lambda x: x[1] in checkers, checker_info[analyzer])) else: result.extend(checker_info[analyzer]) __post_process_result(result) if args.output_format == 'custom': if result: __print_checkers_custom_format(result) else: LOG.info('No checkers with the given label found.') return if args.output_format == 'json': __print_checkers_json_format(result, 'details' in args) return if 'details' in args: header = ['Status', 'Name', 'Analyzer', 'Description', 'Labels'] rows = list(map(__format_row, result)) else: header = ['Name'] rows = [[r[1]] for r in result] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) if rows: print(twodim.to_str(args.output_format, header, rows)) else: LOG.info('No checkers with the given label found.')
def __instance_management(args): """Handles the instance-manager commands --list/--stop/--stop-all.""" # TODO: The server stopping and listing must be revised on its invocation # once "workspace", as a concept, is removed. # QUESTION: What is the bestest way here to identify a server for the user? if 'list' in args: instances = instance_manager.get_instances() instances_on_multiple_hosts = any(True for inst in instances if inst['hostname'] != socket.gethostname()) if not instances_on_multiple_hosts: head = ['Workspace', 'View port'] else: head = ['Workspace', 'Computer host', 'View port'] rows = [] for instance in instances: if not instances_on_multiple_hosts: rows.append((instance['workspace'], str(instance['port']))) else: rows.append((instance['workspace'], instance['hostname'] if instance['hostname'] != socket.gethostname() else '', str(instance['port']))) print("Your running CodeChecker servers:") print(twodim.to_str('table', head, rows)) elif 'stop' in args or 'stop_all' in args: for i in instance_manager.get_instances(): if i['hostname'] != socket.gethostname(): continue # A STOP only stops the server associated with the given workspace # and view-port. if 'stop' in args and \ not (i['port'] == args.view_port and os.path.abspath(i['workspace']) == os.path.abspath(args.config_directory)): continue try: kill_process_tree(i['pid']) LOG.info("Stopped CodeChecker server running on port %s " "in workspace %s (PID: %s)", i['port'], i['workspace'], i['pid']) except Exception: # Let the exception come out if the commands fail LOG.error("Couldn't stop process PID #%s", str(i['pid'])) raise
def __print_labels(args: argparse.Namespace, cl: CheckerLabels): """ Print labels according to the command line arguments to the standard output. """ if args.output_format == 'custom': args.output_format = 'rows' header = ['Label'] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) rows = list(map(lambda x: (x, ), cl.labels())) print(twodim.to_str(args.output_format, header, rows))
def __print_severities(args: argparse.Namespace, cl: CheckerLabels): """ Print checker severities according to the command line arguments to the standard output. """ if args.output_format == 'custom': args.output_format = 'rows' if 'details' in args: header = ['Severity', 'Description'] rows = cl.get_description('severity').items() else: header = ['Severity'] rows = [(key, ) for key in cl.get_description('severity')] if args.output_format in ['csv', 'json']: header = list(map(__uglify, header)) print(twodim.to_str(args.output_format, header, rows))
def print_version(output_format=None): """ Print analyzer version information in the given format. """ context = analyzer_context.get_context() rows = [("Base package version", context.version), ("Package build date", context.package_build_date), ("Git commit ID (hash)", context.package_git_hash), ("Git tag information", context.package_git_tag)] if output_format == "json": # Use a special JSON format here, instead of # [ {"kind": "something", "version": "0.0.0"}, {"kind": "foo", ... } ] # do # { "something": "0.0.0", "foo": ... } print(json.dumps(dict(rows))) else: print(twodim.to_str(output_format, ["Kind", "Version"], rows))
def __print_label_values(args: argparse.Namespace, cl: CheckerLabels): """ If --label flag is given an argument which doesn't contain a colon (:) then we assume that the user intends to see the available values of that label: CodeChecker checkers --label severity """ if args.output_format == 'custom': args.output_format = 'rows' header = ['Value'] if args.output_format == 'custom': args.output_format = 'rows' rows = list(map(lambda x: (x, ), cl.occurring_values(args.label))) if rows: print(twodim.to_str(args.output_format, header, rows)) else: LOG.info( 'Label "%s" doesn\'t exist. Use "CodeChecker checkers --label" ' 'command to list available labels.', args.label)
def handle_list_products(args): # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' init_logger(args.verbose if 'verbose' in args else None, stream) protocol, host, port = split_server_url(args.server_url) client = setup_product_client(protocol, host, port) products = client.getProducts(None, None) if args.output_format == 'json': results = [] for product in products: results.append({product.endpoint: product}) print(CmdLineOutputEncoder().encode(results)) else: # plaintext, csv header = ['Database status', 'Endpoint', 'Name', 'Description'] rows = [] for product in products: name = convert.from_b64(product.displayedName_b64) \ if product.displayedName_b64 else '' description = convert.from_b64(product.description_b64) \ if product.description_b64 else '' if not product.accessible: db_status_msg = 'No access.' else: db_status = product.databaseStatus db_status_msg = database_status.db_status_msg.get( db_status, 'Unknown database status') rows.append((db_status_msg, product.endpoint, name, description)) print(twodim.to_str(args.output_format, header, rows))
def print_prod_status(prod_status): """ Print the database statuses for each of the products. """ header = ['Product endpoint', 'Database status', 'Database location', 'Schema version in the database', 'Schema version in the package'] rows = [] for k, v in prod_status.items(): db_status, schema_ver, package_ver, db_location = v db_status_msg = database_status.db_status_msg.get(db_status) if schema_ver == package_ver: schema_ver += " (up to date)" rows.append([k, db_status_msg, db_location, str(schema_ver), package_ver]) prod_status = twodim.to_str('table', header, rows, sort_by_column_number=0) LOG.info('Status of products:\n%s', prod_status)
def main(args): """ Store the defect results in the specified input list as bug reports in the database. """ logger.setup_logger(args.verbose if 'verbose' in args else None) try: cmd_config.check_config_file(args) except FileNotFoundError as fnerr: LOG.error(fnerr) sys.exit(1) if not host_check.check_zlib(): raise Exception("zlib is not available on the system!") # To ensure the help message prints the default folder properly, # the 'default' for 'args.input' is a string, not a list. # But we need lists for the foreach here to work. if isinstance(args.input, str): args.input = [args.input] args.input = [os.path.abspath(i) for i in args.input] for input_path in args.input: if not os.path.exists(input_path): LOG.error("Input path '%s' does not exist!", input_path) sys.exit(1) if 'name' not in args: LOG.debug("Generating name for analysis...") generated = __get_run_name(args.input) if generated: setattr(args, 'name', generated) else: LOG.error("No suitable name was found in the inputs for the " "analysis run. Please specify one by passing argument " "--name run_name in the invocation.") sys.exit(2) # argparse returns error code 2 for bad invocations. LOG.info("Storing analysis results for run '%s'", args.name) if 'force' in args: LOG.info( "argument --force was specified: the run with name '%s' will " "be deleted", args.name) # Setup connection to the remote server. client = libclient.setup_client(args.product_url) zip_file_handle, zip_file = tempfile.mkstemp('.zip') LOG.debug("Will write mass store ZIP to '%s'...", zip_file) try: context = webserver_context.get_context() LOG.debug("Assembling zip file.") try: assemble_zip(args.input, zip_file, client, context.checker_labels) except Exception as ex: print(ex) import traceback traceback.print_exc() LOG.error("Failed to assemble zip file.") sys.exit(1) zip_size = os.stat(zip_file).st_size if zip_size > MAX_UPLOAD_SIZE: LOG.error("The result list to upload is too big (max: %s): %s.", sizeof_fmt(MAX_UPLOAD_SIZE), sizeof_fmt(zip_size)) sys.exit(1) b64zip = "" with open(zip_file, 'rb') as zf: b64zip = base64.b64encode(zf.read()).decode("utf-8") if len(b64zip) == 0: LOG.info("Zip content is empty, nothing to store!") sys.exit(1) trim_path_prefixes = args.trim_path_prefix if \ 'trim_path_prefix' in args else None description = args.description if 'description' in args else None LOG.info("Storing results to the server...") client.massStoreRun(args.name, args.tag if 'tag' in args else None, str(context.version), b64zip, 'force' in args, trim_path_prefixes, description) # Storing analysis statistics if the server allows them. if client.allowsStoringAnalysisStatistics(): storing_analysis_statistics(client, args.input, args.name) LOG.info("Storage finished successfully.") except RequestFailed as reqfail: if reqfail.errorCode == ErrorCode.SOURCE_FILE: header = ['File', 'Line', 'Checker name'] table = twodim.to_str('table', header, [c.split('|') for c in reqfail.extraInfo]) LOG.warning( "Setting the review statuses for some reports failed " "because of non valid source code comments: " "%s\n %s", reqfail.message, table) sys.exit(1) except Exception as ex: import traceback traceback.print_exc() LOG.info("Storage failed: %s", str(ex)) sys.exit(1) finally: os.close(zip_file_handle) os.remove(zip_file)
def main(args): """ List the analyzers' basic information supported by CodeChecker. """ # If the given output format is not 'table', redirect logger's output to # the stderr. stream = None if 'output_format' in args and args.output_format != 'table': stream = 'stderr' logger.setup_logger(args.verbose if 'verbose' in args else None, stream) context = analyzer_context.get_context() working_analyzers, errored = \ analyzer_types.check_supported_analyzers( analyzer_types.supported_analyzers, context) if args.dump_config: binary = context.analyzer_binaries.get(args.dump_config) if args.dump_config == 'clang-tidy': subprocess.call([binary, '-dump-config', '-checks=*'], encoding="utf-8", errors="ignore") elif args.dump_config == 'clangsa': ret = subprocess.call([binary, '-cc1', '-analyzer-checker-option-help', '-analyzer-checker-option-help-alpha'], stderr=subprocess.PIPE, encoding="utf-8", errors="ignore") if ret: # This flag is supported from Clang 9. LOG.warning("'--dump-config clangsa' is not supported yet. " "Please make sure that you are using Clang 9 or " "newer.") return analyzer_environment = env.extend(context.path_env_extra, context.ld_lib_path_extra) analyzer_config_map = analyzer_types.build_config_handlers( args, context, working_analyzers) def uglify(text): """ csv and json format output contain this non human readable header string: no CamelCase and no space. """ return text.lower().replace(' ', '_') if 'analyzer_config' in args: if 'details' in args: header = ['Option', 'Description'] else: header = ['Option'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) analyzer = args.analyzer_config config_handler = analyzer_config_map.get(analyzer) analyzer_class = analyzer_types.supported_analyzers[analyzer] configs = analyzer_class.get_analyzer_config(config_handler, analyzer_environment) if not configs: LOG.error("Failed to get analyzer configuration options for '%s' " "analyzer! Please try to upgrade your analyzer version " "to use this feature.", analyzer) sys.exit(1) rows = [(':'.join((analyzer, c[0])), c[1]) if 'details' in args else (':'.join((analyzer, c[0])),) for c in configs] print(twodim.to_str(args.output_format, header, rows)) return if 'details' in args: header = ['Name', 'Path', 'Version'] else: header = ['Name'] if args.output_format in ['csv', 'json']: header = list(map(uglify, header)) rows = [] for analyzer in working_analyzers: if 'details' not in args: rows.append([analyzer]) else: binary = context.analyzer_binaries.get(analyzer) try: version = subprocess.check_output( [binary, '--version'], encoding="utf-8", errors="ignore") except (subprocess.CalledProcessError, OSError): version = 'ERROR' rows.append([analyzer, binary, version]) if 'all' in args: for analyzer, err_reason in errored: if 'details' not in args: rows.append([analyzer]) else: rows.append([analyzer, context.analyzer_binaries.get(analyzer), err_reason]) if rows: print(twodim.to_str(args.output_format, header, rows))