def configure_ha(cluster, datanode): hadoop = get_hadoop_base() hdfs = HDFS(hadoop) cluster_nodes = cluster.nodes() jn_nodes = datanode.nodes() jn_port = datanode.jn_port() if data_changed('namenode.ha', [cluster_nodes, jn_nodes, jn_port]): utils.update_kv_hosts(cluster.hosts_map()) utils.manage_etc_hosts() hdfs.register_journalnodes(jn_nodes, jn_port) hdfs.restart_namenode() datanode.send_namenodes(cluster_nodes) if not is_state('namenode.shared-edits.init'): hdfs.init_sharededits() set_state('namenode.shared-edits.init')
def init_ha_active(datanode, cluster): """ Do initial HA setup on the leader. """ local_hostname = hookenv.local_unit().replace('/', '-') hadoop = get_hadoop_base() hdfs = HDFS(hadoop) hdfs.stop_namenode() remove_state('namenode.started') # initial cluster is us (active) plus a standby set_cluster_nodes([local_hostname, cluster.nodes()[0]]) update_ha_config(datanode) hdfs.init_sharededits() hdfs.start_namenode() leadership.leader_set({'ha-initialized': 'true'}) set_state('namenode.started')