def init_ha_active(datanode, cluster):
    """
    Do initial HA setup on the leader.
    """
    local_hostname = hookenv.local_unit().replace('/', '-')
    hadoop = get_hadoop_base()
    hdfs = HDFS(hadoop)
    hdfs.stop_namenode()
    remove_state('namenode.started')
    # initial cluster is us (active) plus a standby
    set_cluster_nodes([local_hostname, cluster.nodes()[0]])
    update_ha_config(datanode)
    hdfs.init_sharededits()
    hdfs.start_namenode()
    leadership.leader_set({'ha-initialized': 'true'})
    set_state('namenode.started')
def check_cluster_nodes(cluster, datanode):
    """
    Check to see if any of the chosen cluster nodes have gone away and
    been replaced by viable replacements.

    Note that we only remove a chosen node if it is no longer part of
    the peer relation *and* has been replaced by a working node.  This
    ensures that reboots and intermittent node loses don't cause
    superfluous updates.
    """
    local_hostname = hookenv.local_unit().replace('/', '-')

    manage_cluster_hosts(cluster)  # ensure /etc/hosts is up-to-date

    chosen_nodes = set(get_cluster_nodes())
    current_nodes = set([local_hostname] + cluster.nodes())
    remaining_nodes = chosen_nodes & current_nodes
    added_nodes = current_nodes - chosen_nodes

    if len(remaining_nodes) < 2 and added_nodes:
        chosen_nodes = (sorted(remaining_nodes) + sorted(added_nodes))[:2]
        set_cluster_nodes(chosen_nodes)
        update_ha_config(datanode)  # ensure new config gets written
def init_cluster_nodes():
    local_hostname = hookenv.local_unit().replace('/', '-')
    set_cluster_nodes([local_hostname])