def get_hdfs_service_config(args): args.hdfs_config = deploy_utils.get_service_config(args) if not args.hdfs_config.cluster.zk_cluster: Log.print_critical( "hdfs cluster must depends on a zookeeper clusters: %s" % args.hdfs_config.cluster.name) namenode_hosts = args.hdfs_config.jobs["namenode"].hosts args.hdfs_config.jobs["zkfc"].hosts = namenode_hosts.copy() args.skip_gen_config_files = False
def _get_storm_service_config(args): args.storm_config = deploy_utils.get_service_config(args) if not args.storm_config.cluster.zk_cluster: Log.print_critical( "storm cluster must depends on a zookeeper clusters: %s" % args.storm_config.cluster.name) nimbus_hosts = args.storm_config.jobs["nimbus"].hosts supervisor_hosts = args.storm_config.jobs["supervisor"].hosts args.storm_config.jobs["ui"].hosts = nimbus_hosts.copy() args.storm_config.jobs["logviewer"].hosts = supervisor_hosts.copy()
def __init__(self, options, config, name): # Parse service config. self.name = name self.jobs = config.get(name, "jobs").split() self.clusters = {} for cluster_name in config.get(name, "clusters").split(): args = argparse.Namespace() args.service = self.name args.cluster = cluster_name # Parse cluster config. self.clusters[cluster_name] = deploy_utils.get_service_config(args) self.metric_url = config.get(name, "metric_url")
def __init__(self, options, config, name): # Parse service config. self.name = name self.jobs = config.get(name, "jobs").split() self.clusters = {} for cluster_name in config.get(name, "clusters").split(): args = argparse.Namespace() args.service = self.name args.cluster = cluster_name # Parse cluster config. self.clusters[cluster_name] = deploy_utils.get_service_config( args) self.metric_url = config.get(name, "metric_url")
def __init__(self, options, config, name): # Parse service config. self.name = name self.jobs = config.get(name, "jobs").split() self.clusters = {} for cluster_name in config.get(name, "clusters").split(): args = argparse.Namespace() args.service = self.name args.cluster = cluster_name # Parse cluster config. self.clusters[cluster_name] = deploy_utils.get_service_config(args) self.metric_url = config.get(name, "metric_url") self.need_analyze = True # analyze for default if config.has_option(name, "need_analyze"): self.need_analyze = config.getboolean(name, "need_analyze")
def _get_fds_service_config(args): args.fds_config = deploy_utils.get_service_config(args)
def _get_chronos_service_config(args): args.chronos_config = deploy_utils.get_service_config(args)
def _get_kafka_service_config(args): args.kafka_config = deploy_utils.get_service_config(args)
def get_yarn_service_config(args): args.yarn_config = deploy_utils.get_service_config(args) if not args.yarn_config.cluster.zk_cluster: Log.print_critical( "yarn cluster must depends on a zookeeper clusters: %s" % args.yarn_config.cluster.name)
def get_zk_service_config(args): args.zookeeper_config = deploy_utils.get_service_config(args) if args.zookeeper_config.cluster.zk_cluster: Log.print_critical( "zookeeper cluster can't depends on other clusters: %s" % args.zookeeper_config.cluster.name)
def get_impala_service_config(args): args.impala_config = deploy_utils.get_service_config(args)
def get_hbase_service_config(args): args.hbase_config = deploy_utils.get_service_config(args) if not args.hbase_config.cluster.zk_cluster: Log.print_critical( "hdfs cluster must depends on a zookeeper clusters: %s" % args.hbase_config.cluster.name)
def get_mapreduce_service_config(args): args.mapreduce_config = deploy_utils.get_service_config(args)