def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_impala_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, "stop") parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, "start", is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_yarn_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start', is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def restart(args): if not args.skip_confirm: deploy_utils.confirm_restart(args) get_hbase_service_config(args) for job_name in args.job or reversed(ALL_JOBS): hosts = args.hbase_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list) for job_name in args.job or ALL_JOBS: hosts = args.hbase_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start', is_wait=True) parallel_deploy.start_deploy_threads(start_job, task_list)
def show(args): get_yarn_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'show') parallel_deploy.start_deploy_threads(show_job, task_list)
def stop(args): if not args.skip_confirm: deploy_utils.confirm_stop(args) get_yarn_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list)
def bootstrap(args): get_yarn_service_config(args) cleanup_token = deploy_utils.confirm_bootstrap("yarn", args.yarn_config) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'bootstrap', cleanup_token=cleanup_token) parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
def start(args): if not args.skip_confirm: deploy_utils.confirm_start(args) _get_storm_service_config(args) for job_name in args.job or ALL_JOBS: hosts = args.storm_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'start') parallel_deploy.start_deploy_threads(start_job, task_list)
def cleanup(args): _get_kafka_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "kafka", args.kafka_config) for job_name in args.job or ALL_JOBS: hosts = args.kafka_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'cleanup', cleanup_token=cleanup_token) parallel_deploy.start_deploy_threads(cleanup_job, task_list)
def cleanup(args): _get_storm_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "storm", args.storm_config) for job_name in args.job or ALL_JOBS: hosts = args.storm_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'cleanup', cleanup_token=cleanup_token) parallel_deploy.start_deploy_threads(cleanup_job, task_list)
def stop(args): if not args.skip_confirm: deploy_utils.confirm_stop(args) get_hbase_service_config(args) for job_name in args.job or reversed(ALL_JOBS): hosts = args.hbase_config.jobs[job_name].hosts task_list = deploy_utils.schedule_task_for_threads( args, hosts, job_name, 'stop') parallel_deploy.start_deploy_threads(stop_job, task_list)
def bootstrap(args): get_hdfs_service_config(args) cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config) for job_name in args.job or ALL_JOBS: hosts = args.hdfs_config.jobs[job_name].hosts if job_name == "namenode": while not check_journalnode_all_started(args): Log.print_warning("Wait for journalnode starting") time.sleep(2) task_list = deploy_utils.schedule_task_for_threads( args, hosts, job_name, 'bootstrap', cleanup_token=cleanup_token) parallel_deploy.start_deploy_threads(bootstrap_job, task_list)
def bootstrap(args): get_hdfs_service_config(args) cleanup_token = deploy_utils.confirm_bootstrap("hdfs", args.hdfs_config) for job_name in args.job or ALL_JOBS: hosts = args.hdfs_config.jobs[job_name].hosts if job_name == "namenode": while not check_journalnode_all_started(args): Log.print_warning("Wait for journalnode starting") time.sleep(2) task_list = deploy_utils.schedule_task_for_threads(args, hosts, job_name, 'bootstrap', cleanup_token=cleanup_token) parallel_deploy.start_deploy_threads(bootstrap_job, task_list)