def prepare_scaling_kerberized_cluster(cluster, cloudera_utils, instances): if kerberos.is_kerberos_security_enabled(cluster): server = None if not kerberos.using_existing_kdc(cluster): server = cloudera_utils.pu.get_manager(cluster) kerberos.setup_clients(cluster, server) kerberos.prepare_policy_files(cluster) # manager can correctly handle updating configs cloudera_utils.push_kerberos_configs(cluster) kerberos.create_keytabs_for_map( cluster, {'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster, instances)})
def setup_kerberos_for_cluster(cluster, cloudera_utils): if kerberos.is_kerberos_security_enabled(cluster): manager = cloudera_utils.pu.get_manager(cluster) kerberos.deploy_infrastructure(cluster, manager) cloudera_utils.full_cluster_stop(cluster) kerberos.prepare_policy_files(cluster) cloudera_utils.push_kerberos_configs(cluster) cloudera_utils.full_cluster_start(cluster) kerberos.create_keytabs_for_map( cluster, { 'hdfs': cloudera_utils.pu.get_hdfs_nodes(cluster), 'spark': [cloudera_utils.pu.get_spark_historyserver(cluster)] })
def deploy_kerberos_principals(cluster, instances=None): if not kerberos.is_kerberos_security_enabled(cluster): return if instances is None: instances = plugin_utils.get_instances(cluster) mapper = { 'hdfs': plugin_utils.instances_with_services(instances, [ p_common.SECONDARY_NAMENODE, p_common.NAMENODE, p_common.DATANODE, p_common.JOURNAL_NODE ]), 'spark': plugin_utils.instances_with_services( instances, [p_common.SPARK_JOBHISTORYSERVER]), 'oozie': plugin_utils.instances_with_services(instances, [p_common.OOZIE_SERVER]), } kerberos.create_keytabs_for_map(cluster, mapper)