def run(): args = parse_args() configure_logging(args.logconf) cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) if args.group_parser: rg_parser = dynamic_import(args.group_parser, ReplicationGroupParser)() else: rg_parser = DefaultReplicationGroupParser() if args.partition_measurer: partition_measurer = dynamic_import(args.partition_measurer, PartitionMeasurer) else: partition_measurer = UniformPartitionMeasurer if args.cluster_balancer: cluster_balancer = dynamic_import(args.cluster_balancer, ClusterBalancer) else: cluster_balancer = PartitionCountBalancer args.command( cluster_config, rg_parser, partition_measurer, cluster_balancer, args, )
def run(): opts = parse_opts() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) cluster_config = config.get_cluster_config( opts.cluster_type, opts.cluster_name, opts.discovery_base_path, ) brokers = get_broker_list(cluster_config) if validate_opts(opts, len(brokers)): sys.exit(1) print_brokers(cluster_config, brokers[opts.skip:]) if opts.no_confirm or ask_confirmation(): print("Execute restart") try: execute_rolling_restart( brokers, opts.jolokia_port, opts.jolokia_prefix, opts.check_interval, opts.check_count, opts.unhealthy_time_limit, opts.skip, opts.verbose, ) except WaitTimeoutException: print("ERROR: cluster is still unhealthy, exiting") sys.exit(1)
def run(): opts = parse_opts() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) cluster_config = config.get_cluster_config( opts.cluster_type, opts.cluster_name, opts.discovery_base_path, ) brokers = get_broker_list(cluster_config) if validate_opts(opts, len(brokers)): sys.exit(1) print_brokers(cluster_config, brokers[opts.skip:]) if opts.no_confirm or ask_confirmation(): print("Execute restart") try: execute_rolling_restart( brokers, opts.jolokia_port, opts.jolokia_prefix, opts.check_interval, opts.check_count, opts.unhealthy_time_limit, opts.skip, opts.verbose, ) except WaitTimeoutException: print("ERROR: cluster is still unhealthy, exiting") sys.exit(1)
def run(): args = parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) try: cluster = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) if not validate_args(args): sys.exit(1) check_cluster( cluster, args.data_path, args.java_home, args.check_replicas, args.batch_size, args.minutes, args.start_time, args.end_time, )
def run(): args = parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) try: cluster = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) if not validate_args(args): sys.exit(1) check_cluster( cluster, args.data_path, args.java_home, args.check_replicas, args.batch_size, args.minutes, args.start_time, args.end_time, )
def run(): logging.basicConfig(level=logging.ERROR) args = parse_args() try: conf = get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) args.command(args, conf)
def run(): logging.basicConfig(level=logging.ERROR) args = parse_args() try: conf = get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) args.command(args, conf)
def run(): """Verify command-line arguments and run commands""" args = parse_args() logging.basicConfig(level=logging.WARN) # to prevent flooding for sensu-check. logging.getLogger('kafka').setLevel(logging.CRITICAL) if args.controller_only and args.first_broker_only: terminate( status_code.WARNING, prepare_terminate_message( "Only one of controller_only and first_broker_only should be used", ), args.json, ) if args.controller_only or args.first_broker_only: if args.broker_id is None: terminate( status_code.WARNING, prepare_terminate_message("broker_id is not specified"), args.json, ) elif args.broker_id == -1: try: args.broker_id = get_broker_id(args.data_path) except Exception as e: terminate( status_code.WARNING, prepare_terminate_message("{}".format(e)), args.json, ) try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate( status_code.CRITICAL, prepare_terminate_message("ConfigurationError {0}".format(e)), args.json, ) terminate(code, msg, args.json)
def run(): args = parse_args() configure_logging(args.logconf) cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) if args.group_parser: rg_parser = dynamic_import_group_parser(args.group_parser) else: rg_parser = DefaultReplicationGroupParser() args.command(cluster_config, rg_parser, args)
def run(): args = parse_args() configure_logging(args.logconf) cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) if args.group_parser: rg_parser = dynamic_import_group_parser(args.group_parser) else: rg_parser = DefaultReplicationGroupParser() args.command(cluster_config, rg_parser, args)
def run(): opts = parse_opts() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) cluster_config = config.get_cluster_config( opts.cluster_type, opts.cluster_name, opts.discovery_base_path, ) brokers = get_broker_list(cluster_config) if opts.broker_ids: if not validate_broker_ids_subset([id for id, host in brokers], opts.broker_ids): sys.exit(1) brokers = filter_broker_list(brokers, opts.broker_ids) if validate_opts(opts, len(brokers)): sys.exit(1) pre_stop_tasks = [] post_stop_tasks = [] if opts.task: pre_stop_tasks, post_stop_tasks = get_task_class(opts.task, opts.task_args) print_brokers(cluster_config, brokers[opts.skip:]) if opts.no_confirm or ask_confirmation(): print("Execute restart") try: execute_rolling_restart( brokers, opts.jolokia_port, opts.jolokia_prefix, opts.check_interval, opts.check_count, opts.unhealthy_time_limit, opts.skip, opts.verbose, pre_stop_tasks, post_stop_tasks, opts.start_command, opts.stop_command, opts.ssh_password ) except TaskFailedException: print("ERROR: pre/post tasks failed, exiting") sys.exit(1) except WaitTimeoutException: print("ERROR: cluster is still unhealthy, exiting") sys.exit(1)
def run(): """Verify command-line arguments and run commands""" args = parse_args() try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate(status_code.CRITICAL, "ConfigurationError {0}".format(e)) except Exception as e: terminate(status_code.CRITICAL, "Got Exception: {0}".format(e)) terminate(code, msg)
def run(): logging.basicConfig(level=logging.ERROR) args = parse_args() if args.warn_verbose: logging.getLogger().setLevel(logging.WARNING) if args.info_verbose: logging.getLogger().setLevel(logging.INFO) if args.debug_verbose: logging.getLogger().setLevel(logging.DEBUG) try: conf = get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) args.command(args, conf)
def run(): opts = parse_opts() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) if not validate_opts(opts): sys.exit(1) cluster_config = config.get_cluster_config( opts.cluster_type, opts.cluster_name, opts.discovery_base_path, ) with ZK(cluster_config) as zk: brokers = zk.get_brokers(names_only=True) print_throttles(zk, brokers) if opts.read_only: return print("Applying new replication throttles") if not opts.clear: apply_throttles( zk, brokers, opts.leader_throttle, opts.follower_throttle, ) else: clear_throttles(zk, brokers) print("New replication throttles applied.") print_throttles(zk, brokers) if not opts.clear: print( "NOTE: Do not forget to --clear throttles once the reassignment plan completes." )
def run(): """Verify command-line arguments and run commands""" args = parse_args() logging.basicConfig(level=logging.WARN) # to prevent flooding for sensu-check. logging.getLogger('kafka').setLevel(logging.CRITICAL) try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate(status_code.CRITICAL, "ConfigurationError {0}".format(e)) terminate(code, msg)
def run(): logging.basicConfig(level=logging.ERROR) args = parse_args() if args.warn_verbose: logging.getLogger().setLevel(logging.WARNING) if args.info_verbose: logging.getLogger().setLevel(logging.INFO) if args.debug_verbose: logging.getLogger().setLevel(logging.DEBUG) try: conf = get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) except ConfigurationError as e: print(e, file=sys.stderr) sys.exit(1) args.command(args, conf)
def run(): """Verify command-line arguments and run commands""" args = parse_args() logging.basicConfig(level=logging.WARN) # to prevent flooding for sensu-check. logging.getLogger('kafka').setLevel(logging.CRITICAL) try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate(status_code.CRITICAL, "ConfigurationError {0}".format(e)) terminate(code, msg)
def run(): """Verify command-line arguments and run commands""" args = parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) try: cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) code, msg = args.command(cluster_config, args) except ConfigurationError as e: terminate(status_code.CRITICAL, "ConfigurationError {0}".format(e)) terminate(code, msg)
def run(): opts = parse_opts() if opts.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.WARN) cluster_config = config.get_cluster_config( opts.cluster_type, opts.cluster_name, opts.discovery_base_path, ) brokers = get_broker_list(cluster_config) if opts.broker_ids: if not validate_broker_ids_subset([id for id, host in brokers], opts.broker_ids): sys.exit(1) brokers = filter_broker_list(brokers, opts.broker_ids) if validate_opts(opts, len(brokers)): sys.exit(1) pre_stop_tasks = [] post_stop_tasks = [] if opts.task: pre_stop_tasks, post_stop_tasks = get_task_class( opts.task, opts.task_args) print_brokers(cluster_config, brokers[opts.skip:]) if opts.no_confirm or ask_confirmation(): print("Execute restart") try: execute_rolling_restart(brokers, opts.jmxproxy_port, opts.jmxproxy_prefix, opts.check_interval, opts.check_count, opts.unhealthy_time_limit, opts.skip, opts.verbose, pre_stop_tasks, post_stop_tasks, opts.start_command, opts.stop_command, opts.ssh_password) except TaskFailedException: print("ERROR: pre/post tasks failed, exiting") sys.exit(1) except WaitTimeoutException: print("ERROR: cluster is still unhealthy, exiting") sys.exit(1)
def run(): args = parse_args() configure_logging(args.logconf) cluster_config = config.get_cluster_config( args.cluster_type, args.cluster_name, args.discovery_base_path, ) if args.group_parser: rg_parser = dynamic_import(args.group_parser, ReplicationGroupParser)() else: rg_parser = DefaultReplicationGroupParser() if args.partition_measurer: partition_measurer = dynamic_import( args.partition_measurer, PartitionMeasurer ) else: partition_measurer = UniformPartitionMeasurer if args.cluster_balancer: cluster_balancer = dynamic_import( args.cluster_balancer, ClusterBalancer ) else: cluster_balancer = PartitionCountBalancer args.command( cluster_config, rg_parser, partition_measurer, cluster_balancer, args, )
def get_cluster_config(): return config.get_cluster_config( 'test', 'test_cluster', 'tests/acceptance/config', )
def get_cluster_config(): return config.get_cluster_config( 'test', 'test_cluster', 'tests/acceptance/config', )