def decommission_dn(nn, inst_to_be_deleted, survived_inst): with remote.get_remote(nn) as r: r.write_file_to("/etc/hadoop/dn.excl", utils.generate_fqdn_host_names(inst_to_be_deleted)) run.refresh_nodes(remote.get_remote(nn), "dfsadmin") context.sleep(3) att_amount = 100 while att_amount: cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop") all_found = True datanodes_info = parse_dfs_report(cmd[1]) for i in inst_to_be_deleted: for dn in datanodes_info: if (dn["Name"].startswith(i.internal_ip)) and (dn["Decommission Status"] != "Decommissioned"): all_found = False break if all_found: r.write_files_to( {"/etc/hadoop/dn.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/dn.excl": ""} ) break context.sleep(3) att_amount -= 1 if not att_amount: raise Exception("Cannot finish decommission")
def decommission_dn(nn, inst_to_be_deleted, survived_inst): with remote.get_remote(nn) as r: r.write_file_to('/etc/hadoop/dn.excl', utils.generate_fqdn_host_names(inst_to_be_deleted)) run.refresh_nodes(remote.get_remote(nn), "dfsadmin") context.sleep(3) att_amount = 10 while att_amount: cmd = r.execute_command( "sudo su -c 'hadoop dfsadmin -report' hadoop") all_found = True datanodes_info = parse_dfs_report(cmd[1]) for i in inst_to_be_deleted: for dn in datanodes_info: if (dn["Name"].startswith(i.internal_ip)) and ( dn["Decommission Status"] != "Decommissioned"): all_found = False break if all_found: r.write_files_to({ '/etc/hadoop/dn.incl': utils.generate_fqdn_host_names(survived_inst), '/etc/hadoop/dn.excl': "", }) break context.sleep(3) att_amount -= 1 if not att_amount: raise Exception("Cannot finish decommission")
def decommission_tt(jt, inst_to_be_deleted, survived_inst): with remote.get_remote(jt) as r: r.write_file_to("/etc/hadoop/tt.excl", utils.generate_fqdn_host_names(inst_to_be_deleted)) run.refresh_nodes(remote.get_remote(jt), "mradmin") context.sleep(3) r.write_files_to( {"/etc/hadoop/tt.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/tt.excl": ""} )
def scale_cluster(self, cluster, instances): self._setup_instances(cluster, instances) run.refresh_nodes(remote.get_remote(utils.get_namenode(cluster)), "dfsadmin") jt = utils.get_jobtracker(cluster) if jt: run.refresh_nodes(remote.get_remote(jt), "mradmin") self._start_tt_dn_processes(instances)
def scale_cluster(self, cluster, instances): self._setup_instances(cluster, instances) run.refresh_nodes(remote.get_remote( utils.get_namenode(cluster)), "dfsadmin") jt = utils.get_jobtracker(cluster) if jt: run.refresh_nodes(remote.get_remote(jt), "mradmin") self._start_tt_dn_processes(instances)
def decommission_tt(jt, inst_to_be_deleted, survived_inst): with jt.remote as r: r.write_file_to('/etc/hadoop/tt.excl', utils.generate_fqdn_host_names( inst_to_be_deleted)) run.refresh_nodes(jt.remote, "mradmin") context.sleep(3) r.write_files_to({'/etc/hadoop/tt.incl': utils.generate_fqdn_host_names(survived_inst), '/etc/hadoop/tt.excl': "", })
def decommission_tt(jt, inst_to_be_deleted, survived_inst): with remote.get_remote(jt) as r: r.write_file_to('/etc/hadoop/tt.excl', utils.generate_fqdn_host_names(inst_to_be_deleted)) run.refresh_nodes(remote.get_remote(jt), "mradmin") context.sleep(3) r.write_files_to({ '/etc/hadoop/tt.incl': utils.generate_fqdn_host_names(survived_inst), '/etc/hadoop/tt.excl': "", })
def scale_cluster(self, cluster, instances): self._push_configs_to_nodes(cluster, instances=instances) self._write_hadoop_user_keys(cluster.private_key, instances) run.refresh_nodes(remote.get_remote(utils.get_namenode(cluster)), "dfsadmin") jt = utils.get_jobtracker(cluster) if jt: run.refresh_nodes(remote.get_remote(jt), "mradmin") for i in instances: with remote.get_remote(i) as r: if "datanode" in i.node_group.node_processes: run.start_process(r, "datanode") if "tasktracker" in i.node_group.node_processes: run.start_process(r, "tasktracker")
def scale_cluster(self, cluster, instances): self._extract_configs(cluster) self._push_configs_to_nodes(cluster, instances=instances) self._write_hadoop_user_keys(cluster.private_key, instances) run.refresh_nodes(utils.get_namenode(cluster).remote, "dfsadmin") jt = utils.get_jobtracker(cluster) if jt: run.refresh_nodes(jt.remote, "mradmin") for i in instances: with i.remote as remote: if "datanode" in i.node_group.node_processes: run.start_process(remote, "datanode") if "tasktracker" in i.node_group.node_processes: run.start_process(remote, "tasktracker")
def scale_cluster(self, cluster, instances): self._push_configs_to_nodes(cluster, instances=instances) self._write_hadoop_user_keys(instances, cluster.management_private_key, cluster.management_public_key) run.refresh_nodes(remote.get_remote( utils.get_namenode(cluster)), "dfsadmin") jt = utils.get_jobtracker(cluster) if jt: run.refresh_nodes(remote.get_remote(jt), "mradmin") for i in instances: with remote.get_remote(i) as r: if "datanode" in i.node_group.node_processes: run.start_process(r, "datanode") if "tasktracker" in i.node_group.node_processes: run.start_process(r, "tasktracker")