def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) _get_fds_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.fds_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range( hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances( host_id, hosts) else instance_id stop_job(args, hosts[host_id].ip, job_name, instance_id) for job_name in args.job or ALL_JOBS: hosts = args.fds_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range( hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances( host_id, hosts) else instance_id deploy_utils.wait_for_job_stopping( "fds", args.fds_config.cluster.name, job_name, hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_impala_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, "stop") parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, "start", is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_zk_service_config(args) hosts = args.zookeeper_config.jobs["zookeeper"].hosts if args.host is not None: args.task = deploy_utils.get_task_by_hostname(hosts, args.host) for id in args.task or hosts.iterkeys(): stop_job(args, hosts[id], "zookeeper") for id in args.task or hosts.iterkeys(): deploy_utils.wait_for_job_stopping("zookeeper", args.zookeeper_config.cluster.name, "zookeeper", hosts[id]) start_job(args, hosts[id], "zookeeper")
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_yarn_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start', is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_hbase_service_config(args) for job_name in args.job or reversed(ALL_JOBS): hosts = args.hbase_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.hbase_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start', is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_zk_service_config(args) hosts = args.zookeeper_config.jobs["zookeeper"].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id stop_job(args, hosts[host_id].ip, "zookeeper", instance_id) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.wait_for_job_stopping("zookeeper", args.zookeeper_config.cluster.name, "zookeeper", hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, "zookeeper", host_id, instance_id)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_hbase_service_config(args) for job_name in args.job or reversed(ALL_JOBS): hosts = args.hbase_config.jobs[job_name].hosts if args.host is not None: args.task = deploy_utils.get_task_by_hostname(hosts, args.host) for id in args.task or hosts.iterkeys(): stop_job(args, hosts[id], job_name) for job_name in args.job or ALL_JOBS: hosts = args.hbase_config.jobs[job_name].hosts for id in args.task or hosts.iterkeys(): deploy_utils.wait_for_job_stopping("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_impala_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts if args.host is not None: args.task = deploy_utils.get_task_by_hostname(hosts, args.host) for id in args.task or hosts.iterkeys(): stop_job(args, hosts[id], job_name) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts for id in args.task or hosts.iterkeys(): deploy_utils.wait_for_job_stopping("impala", args.impala_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_hdfs_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.hdfs_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id stop_job(args, hosts[host_id].ip, job_name, instance_id) for job_name in args.job or ALL_JOBS: hosts = args.hdfs_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.wait_for_job_stopping("hdfs", args.hdfs_config.cluster.name, job_name, hosts[host_id].ip, instance_id) start_job(args, hosts[host_id].ip, job_name, host_id, instance_id)