proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group'] #hadoop params hdfs_log_dir_prefix = config['configurations']['hadoop-env'][ 'hdfs_log_dir_prefix'] hadoop_root_logger = config['configurations']['hadoop-env'][ 'hadoop_root_logger'] nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir'] dfs_domain_socket_path = config['configurations']['hdfs-site'][ 'dfs.domain.socket.path'] dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path) hdfs_site = config['configurations']['hdfs-site'] if namenode_federation_enabled(hdfs_site): jn_edits_dirs = get_properties_for_all_nameservices( hdfs_site, 'dfs.journalnode.edits.dir').values() else: jn_edits_dirs = [ config['configurations']['hdfs-site']['dfs.journalnode.edits.dir'] ] dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir'] hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}") namenode_dirs_created_stub_dir = hdfs_log_dir namenode_dirs_stub_filename = "namenode_dirs_created" smoke_hdfs_user_dir = format("/user/{smoke_user}") smoke_hdfs_user_mode = 0770 hdfs_namenode_format_disabled = default(
StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks) hostname = config['agentLevelParams']['hostname'] hdfs_site = config['configurations']['hdfs-site'] # HDFS High Availability properties dfs_ha_enabled = False dfs_ha_nameservices = default( '/configurations/hdfs-site/dfs.internal.nameservices', None) if dfs_ha_nameservices is None: dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) # on stacks without any filesystem there is no hdfs-site dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices( hdfs_site, 'dfs.ha.namenodes') if 'hdfs-site' in config['configurations'] else {} dfs_ha_automatic_failover_enabled = default( "/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False) # Values for the current Host namenode_id = None namenode_rpc = None dfs_ha_namemodes_ids_list = [] other_namenode_id = None for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems(): found = False if not is_empty(dfs_ha_namenode_ids): dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
has_zeppelin_master = not len(zeppelin_master_hosts) == 0 stack_supports_zk_security = check_stack_feature( StackFeature.SECURE_ZOOKEEPER, version_for_stack_feature_checks) hostname = config['agentLevelParams']['hostname'] hdfs_site = config['configurations']['hdfs-site'] # HDFS High Availability properties dfs_ha_enabled = False dfs_ha_nameservices = default( '/configurations/hdfs-site/dfs.internal.nameservices', None) if dfs_ha_nameservices is None: dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices', None) dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices( hdfs_site, 'dfs.ha.namenodes') dfs_ha_automatic_failover_enabled = default( "/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False) # Values for the current Host namenode_id = None namenode_rpc = None dfs_ha_namemodes_ids_list = [] other_namenode_id = None for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems(): found = False if not is_empty(dfs_ha_namenode_ids): dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",") dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)