def pack(args): get_hbase_service_config(args) version = args.hbase_config.cluster.version deploy_utils.make_package_dir(args, "hbase", version) generate_client_config(args, "hbase", version) if not args.skip_tarball: deploy_utils.pack_package(args, "hbase", version) Log.print_success("Pack client utilities for hbase success!\n")
def pack(args): get_zk_service_config(args) version = args.zookeeper_config.cluster.version deploy_utils.make_package_dir(args, "zookeeper", version) generate_client_config(args, "zookeeper", version) if not args.skip_tarball: deploy_utils.pack_package(args, "zookeeper", version) Log.print_success("Pack client utilities for zookeeper success!")
def pack(args): get_impala_service_config(args) version = args.impala_config.cluster.version deploy_utils.make_package_dir(args, "impala-shell", version) if not args.skip_tarball: deploy_utils.pack_package(args, "impala-shell", args.impala_config.cluster.version) Log.print_success("Pack client utilities for hadoop success!\n")
def pack(args): get_yarn_service_config(args) version = args.yarn_config.cluster.version deploy_utils.make_package_dir(args, "hadoop", version) deploy_hdfs.generate_client_config(args, "hadoop", version) generate_client_config(args, "hadoop", version) if not args.skip_tarball: deploy_utils.pack_package(args, "hadoop", args.hdfs_config.cluster.version) Log.print_success("Pack client utilities for hadoop success!\n")
def rolling_update(args): get_zk_service_config(args) job_name = "zookeeper" if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.zookeeper_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): deploy_utils.confirm_rolling_update(id, wait_time) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("zookeeper", args.zookeeper_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("zookeeper", args.zookeeper_config.cluster.name, job_name, hosts[id]) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") get_hbase_service_config(args) job_name = args.job[0] if job_name != 'regionserver': args.vacate_rs = False if args.vacate_rs: balance_switch(args, False) Log.print_info("Rolling updating %s" % job_name) hosts = args.hbase_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): if not args.skip_confirm: deploy_utils.confirm_rolling_update(id, wait_time) if args.vacate_rs: vacate_region_server(args, hosts[id]) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("hbase", args.hbase_config.cluster.name, job_name, hosts[id]) if args.vacate_rs: recover_region_server(args, hosts[id]) wait_time = args.time_interval if args.vacate_rs: balance_switch(args, True) Log.print_success("Rolling updating %s success" % job_name)
def rolling_update(args): if not args.job: Log.print_critical("You must specify the job name to do rolling update") get_yarn_service_config(args) job_name = args.job[0] if not args.skip_confirm: deploy_utils.confirm_action(args, "rolling_update") Log.print_info("Rolling updating %s" % job_name) hosts = args.yarn_config.jobs[job_name].hosts wait_time = 0 for id in hosts.iterkeys(): deploy_utils.confirm_rolling_update(id, wait_time) stop_job(args, hosts[id], job_name) deploy_utils.wait_for_job_stopping("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) start_job(args, hosts[id], job_name) deploy_utils.wait_for_job_starting("yarn", args.yarn_config.cluster.name, job_name, hosts[id]) wait_time = args.time_interval Log.print_success("Rolling updating %s success" % job_name)