def cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active): cleanup_script = str() if job_name == "zkfc": cleanup_script = generate_cleanup_script(args, host, job_name, host_id, instance_id, active) deploy_utils.cleanup_job("hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, cleanup_script)
def cleanup(args): get_zk_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "zookeeper", args.zookeeper_config) hosts = args.zookeeper_config.jobs["zookeeper"].hosts for id, host in hosts.iteritems(): deploy_utils.cleanup_job("zookeeper", args.zookeeper_config, hosts[id], "zookeeper", cleanup_token)
def cleanup(args): get_impala_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "impala", args.impala_config) for job_name in args.job or ALL_JOBS: hosts = args.impala_config.jobs[job_name].hosts for id in args.task or hosts.iterkeys(): deploy_utils.cleanup_job("impala", args.impala_config, hosts[id], job_name, cleanup_token)
def cleanup(args): get_zk_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "zookeeper", args.zookeeper_config) hosts = args.zookeeper_config.jobs["zookeeper"].hosts for host_id in hosts.keys(): for instance_id in range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.cleanup_job("zookeeper", args.zookeeper_config, hosts[host_id].ip, "zookeeper", instance_id, cleanup_token)
def cleanup(args): _get_kafka_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "kafka", args.kafka_config) for job_name in args.job or ALL_JOBS: hosts = args.kafka_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.cleanup_job("kafka", args.kafka_config, hosts[host_id].ip, job_name, instance_id, cleanup_token)
def cleanup(args): get_yarn_service_config(args) cleanup_token = deploy_utils.confirm_cleanup(args, "yarn", args.yarn_config) for job_name in args.job or ALL_JOBS: hosts = args.yarn_config.jobs[job_name].hosts args.task_map = deploy_utils.parse_args_host_and_task(args, hosts) for host_id in args.task_map.keys() or hosts.keys(): for instance_id in args.task_map.get(host_id) or range(hosts[host_id].instance_num): instance_id = -1 if not deploy_utils.is_multiple_instances(host_id, hosts) else instance_id deploy_utils.cleanup_job("yarn", args.yarn_config, hosts[host_id].ip, job_name, instance_id, cleanup_token)
def cleanup_job(args, host, job_name, host_id, instance_id, active, cleanup_token): cleanup_script = str() if job_name == "zkfc": cleanup_script = generate_cleanup_script(args, host, job_name, host_id, instance_id, active) deploy_utils.cleanup_job("hdfs", args.hdfs_config, host, job_name, instance_id, cleanup_token, cleanup_script)
def cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active): deploy_utils.cleanup_job("yarn", args.yarn_config, host, job_name, instance_id, cleanup_token)
def cleanup_job(args, host, job_name, host_id, instance_id, cleanup_token, active): cleanup_script = str() if job_name == "supervisor": cleanup_script = generate_cleanup_script(args, job_name) deploy_utils.cleanup_job("storm", args.storm_config, host, job_name, instance_id, cleanup_token, cleanup_script)