def test_substitute_vars(self): raw_config = { 'val.intvar': '42', 'pass.intvar': '${val.intvar}', 'fail.unknown': 'a${unknown}b', 'fail.empty': '${}', 'fail.space': '${ my.int}', 'val.0': 'will_fail', 'fail.digit': '${val.0}', 'val.file': 'hello', 'val.suffix': '.txt', 'pass.seq.depth': '${val.file}${val.suffix}${pass.intvar}', 'fail.seq.depth.a': '${val.file}${unknown}${pass.intvar}', 'fail.seq.depth.b': '${val.file}${fail.seq.depth.a}${pass.intvar}', 'val.name': 'val.intvar', 'pass.name.as.param': '${${val.name}}', 'fail.inf.loop': '${fail.inf.loop}' } expected_config = { 'val.intvar': '42', 'pass.intvar': '42', 'fail.unknown': 'a${unknown}b', 'fail.empty': '${}', 'fail.space': '${ my.int}', 'val.0': 'will_fail', 'fail.digit': '${val.0}', 'val.file': 'hello', 'val.suffix': '.txt', 'pass.seq.depth': 'hello.txt42', 'fail.seq.depth.a': 'hello${unknown}${pass.intvar}', 'fail.seq.depth.b': 'hellohello${unknown}${pass.intvar}${pass.intvar}', 'val.name': 'val.intvar', 'pass.name.as.param': '42', 'fail.inf.loop': '${fail.inf.loop}' } for key in raw_config.keys(): actual_value = substitute_vars(raw_config[key], raw_config) expected_value = expected_config[key] self.assertEqual(actual_value, expected_value)
def test_substitute_vars(self): raw_config = { 'val.intvar' : '42', 'pass.intvar' : '${val.intvar}', 'fail.unknown' : 'a${unknown}b', 'fail.empty' : '${}', 'fail.space' : '${ my.int}', 'val.0' : 'will_fail', 'fail.digit' : '${val.0}', 'val.file' : 'hello', 'val.suffix' : '.txt', 'pass.seq.depth' : '${val.file}${val.suffix}${pass.intvar}', 'fail.seq.depth.a' : '${val.file}${unknown}${pass.intvar}', 'fail.seq.depth.b' : '${val.file}${fail.seq.depth.a}${pass.intvar}', 'val.name' : 'val.intvar', 'pass.name.as.param' : '${${val.name}}', 'fail.inf.loop' : '${fail.inf.loop}' } expected_config = { 'val.intvar' : '42', 'pass.intvar' : '42', 'fail.unknown' : 'a${unknown}b', 'fail.empty' : '${}', 'fail.space' : '${ my.int}', 'val.0' : 'will_fail', 'fail.digit' : '${val.0}', 'val.file' : 'hello', 'val.suffix' : '.txt', 'pass.seq.depth' : 'hello.txt42', 'fail.seq.depth.a' : 'hello${unknown}${pass.intvar}', 'fail.seq.depth.b' : 'hellohello${unknown}${pass.intvar}${pass.intvar}', 'val.name' : 'val.intvar', 'pass.name.as.param' : '42', 'fail.inf.loop' : '${fail.inf.loop}' } for key in raw_config.keys(): actual_value = substitute_vars(raw_config[key], raw_config) expected_value = expected_config[key] self.assertEqual(actual_value, expected_value)
cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts']) if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']: cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort'] else: cluster_zookeeper_clientPort = '2181' if not is_hbase_distributed: zookeeper_quorum_hosts = hostname zookeeper_clientPort = '61181' else: zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts zookeeper_clientPort = cluster_zookeeper_clientPort ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir'] _hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir'] hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site']) _zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir'] zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site']) # TODO UPGRADE default, update site during upgrade _local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local") local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site']) phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20') phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp') phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp') # Substitute vars if present phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site']) phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site']) client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf") master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
java64_home = config['hostLevelParams']['java_home'] log_dir = config['configurations']['hbase-env']['hbase_log_dir'] master_heapsize = config['configurations']['hbase-env']['hbase_master_heapsize'] regionserver_heapsize = config['configurations']['hbase-env']['hbase_regionserver_heapsize'] regionserver_xmn_max = config['configurations']['hbase-env']['hbase_regionserver_xmn_max'] regionserver_xmn_percent = config['configurations']['hbase-env']['hbase_regionserver_xmn_ratio'] regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max) pid_dir = status_params.pid_dir tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir'] # TODO UPGRADE default, update site during upgrade _local_dir_conf = default('/configurations/hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local") local_dir = substitute_vars(_local_dir_conf, config['configurations']['hbase-site']) client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf") master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf") regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf") queryserver_jaas_config_file = format("{hbase_conf_dir}/hbase_queryserver_jaas.conf") ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0] ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", []) has_metric_collector = not len(ams_collector_hosts) == 0 if has_metric_collector: metric_collector_host = ams_collector_hosts[0] metric_collector_port = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188") if metric_collector_port and metric_collector_port.find(':') != -1:
ams_hbase_conf_dir = "/etc/ams-hbase/conf" ams_user = "******" ams_user_group = "hadoop" if ('configurations' in config and 'ams-env' in config['configurations'] and 'ambari_metrics_user' in config['configurations']['ams-env']): ams_user = config['configurations']['ams-env']['ambari_metrics_user'] if ('configurations' in config and 'cluster-env' in config['configurations'] and 'user_group' in config['configurations']['cluster-env']): ams_user_group = config['configurations']['cluster-env']["user_group"] ams_phoenix_max_global_mem_percent = default( '/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20') ams_phoenix_client_spool_dir = default( '/configurations/ams-site/phoenix.spool.directory', '/tmp') ams_phoenix_client_spool_dir = substitute_vars( ams_phoenix_client_spool_dir, config['configurations']['ams-hbase-site']) analyzer_jvm_heap = 8192 analyzer_jvm_opts = "" if 'activity-env' in config['configurations'] and 'analyzer_jvm_heap' in config[ 'configurations']['activity-env']: analyzer_jvm_heap = config['configurations']['activity-env'][ 'analyzer_jvm_heap'] if 'activity-env' in config['configurations'] and 'analyzer_jvm_opts' in config[ 'configurations']['activity-env']: analyzer_jvm_opts = config['configurations']['activity-env'][ 'analyzer_jvm_opts'] configuredUser = getpass.getuser() if ('role' in config):