def configure_ha(cluster, datanode): hadoop = get_hadoop_base() hdfs = HDFS(hadoop) cluster_nodes = cluster.nodes() jn_nodes = datanode.nodes() jn_port = datanode.jn_port() if data_changed('namenode.ha', [cluster_nodes, jn_nodes, jn_port]): utils.update_kv_hosts(cluster.hosts_map()) utils.manage_etc_hosts() hdfs.register_journalnodes(jn_nodes, jn_port) hdfs.restart_namenode() datanode.send_namenodes(cluster_nodes) if not is_state('namenode.shared-edits.init'): hdfs.init_sharededits() set_state('namenode.shared-edits.init')
def update_ha_config(datanode): cluster_nodes = get_cluster_nodes() jn_nodes = sorted(datanode.nodes()) jn_port = datanode.jn_port() started = is_state('namenode.started') new_cluster_config = data_changed('namenode.cluster-nodes', cluster_nodes) new_jn_config = data_changed('namenode.jn.config', (jn_nodes, jn_port)) hadoop = get_hadoop_base() hdfs = HDFS(hadoop) hdfs.configure_namenode(cluster_nodes) hdfs.register_journalnodes(jn_nodes, jn_port) if started and new_cluster_config: hdfs.restart_namenode() elif started and new_jn_config: hdfs.reload_slaves() # is this actually necessary?