def rolling_update(args): if not args.job: Log.print_critical( "You must specify the job name to do rolling update") _get_fds_service_config(args) job_name = args.job[0] if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.fds_config.jobs[job_name].hosts wait_time = 0 args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.iterkeys(): for instance_id in args.task_map.get(host_id) or range( hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances( host_id, hosts) else instance_id deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time) stop_job(args, hosts[host_id].ip, job_name, instance_id) deploy_utils.wait_for_job_stopping("fds", args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id) deploy_utils.wait_for_job_starting("fds", args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical( "You must specify the job name to do rolling update") get_yarn_service_config(args) job_name = args.job[0] if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.yarn_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): deploy_utils.confirm_rolling_update(id, wait_time) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") get_hdfs_service_config(args) job_name = args.job[0] if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.hdfs_config.jobs[job_name].hosts wait_time = 0 args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.iterkeys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time) stop_job(args, hosts[host_id].ip, job_name, instance_id) deploy_utils.wait_for_job_stopping("hdfs", args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id) deploy_utils.wait_for_job_starting("hdfs", args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical( "You must specify the job name to do rolling update") if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") get_hbase_service_config(args) job_name = args.job[0] if job_name != 'regionserver': args.vacate_rs = False if args.vacate_rs: balance_switch(args, False) Log.print_info("Rolling updating %s" % job_name) hosts = args.hbase_config.jobs[job_name].hosts wait_time = 0 args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.iterkeys(): for instance_id in args.task_map.get(host_id) or range( hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances( host_id, hosts) else instance_id if not args.skip_confirm: deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time) port = deploy_utils.get_base_port( args.hbase_config.jobs[job_name].base_port, instance_id) if args.vacate_rs: vacate_region_server(args, hosts[host_id].ip, port) stop_job(args, hosts[host_id].ip, job_name, instance_id) deploy_utils.wait_for_job_stopping("hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id) deploy_utils.wait_for_job_starting("hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id) if args.vacate_rs: recover_region_server(args, hosts[host_id].ip, port) wait_time = args.time_interval if args.vacate_rs: balance_switch(args, True) Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") get_hbase_service_config(args) job_name = args.job[0] if job_name != "regionserver": args.vacate_rs = False if args.vacate_rs: balance_switch(args, False) Log.print_info("Rolling updating %s" % job_name) hosts = args.hbase_config.jobs[job_name].hosts wait_time = 0 args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.iterkeys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id if not args.skip_confirm: deploy_utils.confirm_rolling_update(host_id, instance_id, wait_time) port = deploy_utils.get_base_port(args.hbase_config.jobs[job_name].base_port, instance_id) if args.vacate_rs: vacate_region_server(args, hosts[host_id].ip, port) stop_job(args, hosts[host_id].ip, job_name, instance_id) deploy_utils.wait_for_job_stopping( "hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id ) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id) deploy_utils.wait_for_job_starting( "hbase", args.hbase_config.cluster.name, job_name, hosts[host_id].ip, instance_id ) if args.vacate_rs: recover_region_server(args, hosts[host_id].ip, port) wait_time = args.time_interval if args.vacate_rs: balance_switch(args, True) Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): get_zk_service_config(args) job_name = "zookeeper" if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.zookeeper_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): deploy_utils.confirm_rolling_update(id, wait_time) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("zookeeper", args.zookeeper_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("zookeeper", args.zookeeper_config.cluster.name, job_name, hosts[id]) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical( "You must specify the job name to do rolling update") if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") get_hbase_service_config(args) job_name = args.job[0] if job_name != 'regionserver': args.vacate_rs = False if args.vacate_rs: balance_switch(args, False) Log.print_info("Rolling updating %s" % job_name) hosts = args.hbase_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): if not args.skip_confirm: deploy_utils.confirm_rolling_update(id, wait_time) if args.vacate_rs: vacate_region_server(args, hosts[id]) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) if args.vacate_rs: recover_region_server(args, hosts[id]) wait_time = args.time_interval if args.vacate_rs: balance_switch(args, True) Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") get_hbase_service_config(args) job_name = args.job[0] if job_name != 'regionserver': args.vacate_rs = False if args.vacate_rs: balance_switch(args, False) Log.print_info("Rolling updating %s" % job_name) hosts = args.hbase_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): if not args.skip_confirm: deploy_utils.confirm_rolling_update(id, wait_time) if args.vacate_rs: vacate_region_server(args, hosts[id]) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) if args.vacate_rs: recover_region_server(args, hosts[id]) wait_time = args.time_interval if args.vacate_rs: balance_switch(args, True) Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") get_yarn_service_config(args) job_name = args.job[0] if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.yarn_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): deploy_utils.confirm_rolling_update(id, wait_time) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)