Example #1
0
def _update_exclude_files(cluster, instances):
    datanodes = _get_instances_with_service(instances, "datanode")
    nodemanagers = _get_instances_with_service(instances, "nodemanager")
    dn_hosts = u.generate_fqdn_host_names(datanodes)
    nm_hosts = u.generate_fqdn_host_names(nodemanagers)
    for instance in u.get_instances(cluster):
        with instance.remote() as r:
            r.execute_command("sudo su - -c \"echo '%s' > %s/dn-exclude\" hadoop" % (dn_hosts, HADOOP_CONF_DIR))
            r.execute_command("sudo su - -c \"echo '%s' > %s/nm-exclude\" hadoop" % (nm_hosts, HADOOP_CONF_DIR))
Example #2
0
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
    with remote.get_remote(jt) as r:
        r.write_file_to('/etc/hadoop/tt.excl',
                        utils.generate_fqdn_host_names(
                            inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(jt), "mradmin")
        context.sleep(3)
        r.write_files_to({'/etc/hadoop/tt.incl':
                         utils.generate_fqdn_host_names(survived_inst),
                         '/etc/hadoop/tt.excl': "",
                          })
Example #3
0
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
    with remote.get_remote(jt) as r:
        r.write_file_to('/etc/hadoop/tt.excl',
                        utils.generate_fqdn_host_names(inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(jt), "mradmin")
        context.sleep(3)
        r.write_files_to({
            '/etc/hadoop/tt.incl':
            utils.generate_fqdn_host_names(survived_inst),
            '/etc/hadoop/tt.excl':
            "",
        })
def _update_exclude_files(cluster, instances):
    datanodes = _get_instances_with_service(instances, 'datanode')
    nodemanagers = _get_instances_with_service(instances, 'nodemanager')
    dn_hosts = utils.generate_fqdn_host_names(datanodes)
    nm_hosts = utils.generate_fqdn_host_names(nodemanagers)
    for instance in utils.get_instances(cluster):
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/dn-exclude" hadoop' %
                (dn_hosts, HADOOP_CONF_DIR))
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/nm-exclude" hadoop' %
                (nm_hosts, HADOOP_CONF_DIR))
Example #5
0
def _update_include_files(cluster, dec_instances=None):
    dec_instances = dec_instances or []
    dec_instances_ids = [instance.id for instance in dec_instances]

    instances = u.get_instances(cluster)

    inst_filter = lambda inst: inst.id not in dec_instances_ids

    datanodes = filter(inst_filter, vu.get_datanodes(cluster))
    nodemanagers = filter(inst_filter, vu.get_nodemanagers(cluster))
    dn_hosts = u.generate_fqdn_host_names(datanodes)
    nm_hosts = u.generate_fqdn_host_names(nodemanagers)
    for instance in instances:
        with instance.remote() as r:
            r.execute_command("sudo su - -c \"echo '%s' > %s/dn-include\" hadoop" % (dn_hosts, HADOOP_CONF_DIR))
            r.execute_command("sudo su - -c \"echo '%s' > %s/nm-include\" hadoop" % (nm_hosts, HADOOP_CONF_DIR))
Example #6
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with remote.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(
                            inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        context.sleep(3)

        poll_utils.plugin_option_poll(
            nn.cluster, is_decommissioned,
            config_helper.DECOMMISSIONING_TIMEOUT,
            _("Decommission %s") % "DataNodes", 3,
            {'r': r, 'inst_to_be_deleted': inst_to_be_deleted})

        r.write_files_to({'/etc/hadoop/dn.incl':
                          utils.generate_fqdn_host_names(survived_inst),
                          '/etc/hadoop/dn.excl': ""})
def _update_include_files(cluster, dec_instances=None):
    dec_instances = dec_instances or []
    dec_instances_ids = [instance.id for instance in dec_instances]

    instances = utils.get_instances(cluster)

    inst_filter = lambda inst: inst.id not in dec_instances_ids

    datanodes = filter(inst_filter, vu.get_datanodes(cluster))
    nodemanagers = filter(inst_filter, vu.get_nodemanagers(cluster))
    dn_hosts = utils.generate_fqdn_host_names(datanodes)
    nm_hosts = utils.generate_fqdn_host_names(nodemanagers)
    for instance in instances:
        with instance.remote() as r:
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/dn-include" hadoop' %
                (dn_hosts, HADOOP_CONF_DIR))
            r.execute_command(
                'sudo su - -c "echo \'%s\' > %s/nm-include" hadoop' %
                (nm_hosts, HADOOP_CONF_DIR))
Example #8
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with utils.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(inst_to_be_deleted))
        run.refresh_nodes(utils.get_remote(nn), "dfsadmin")
        context.sleep(3)

        utils.plugin_option_poll(nn.cluster, _is_decommissioned,
                                 c_helper.DECOMMISSIONING_TIMEOUT,
                                 _("Decommission %s") % "DataNodes", 3, {
                                     'r': r,
                                     'inst_to_be_deleted': inst_to_be_deleted
                                 })

        r.write_files_to({
            '/etc/hadoop/dn.incl':
            utils.generate_fqdn_host_names(survived_inst),
            '/etc/hadoop/dn.excl':
            ""
        })
Example #9
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with remote.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        context.sleep(3)

        timeout = config_helper.get_decommissioning_timeout(
            nn.node_group.cluster)
        s_time = timeutils.utcnow()
        all_found = False

        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            cmd = r.execute_command(
                "sudo su -c 'hadoop dfsadmin -report' hadoop")
            all_found = True
            datanodes_info = parse_dfs_report(cmd[1])
            for i in inst_to_be_deleted:
                for dn in datanodes_info:
                    if (dn["Name"].startswith(i.internal_ip)) and (
                            dn["Decommission Status"] != "Decommissioned"):
                        all_found = False
                        break

            if all_found:
                r.write_files_to({
                    '/etc/hadoop/dn.incl':
                    utils.generate_fqdn_host_names(survived_inst),
                    '/etc/hadoop/dn.excl':
                    "",
                })
                break
            context.sleep(3)

        if not all_found:
            ex.DecommissionError(
                _("Cannot finish decommission of cluster %(cluster)s in "
                  "%(seconds)d seconds") % {
                      "cluster": nn.node_group.cluster,
                      "seconds": timeout
                  })
Example #10
0
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
    with remote.get_remote(nn) as r:
        r.write_file_to('/etc/hadoop/dn.excl',
                        utils.generate_fqdn_host_names(
                            inst_to_be_deleted))
        run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
        context.sleep(3)

        timeout = c_helper.get_decommissioning_timeout(
            nn.node_group.cluster)
        s_time = timeutils.utcnow()
        all_found = False

        while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
            cmd = r.execute_command(
                "sudo -u hdfs hadoop dfsadmin -report")
            all_found = True
            datanodes_info = parse_dfs_report(cmd[1])
            for i in inst_to_be_deleted:
                for dn in datanodes_info:
                    if (dn["Name"].startswith(i.internal_ip)) and (
                            dn["Decommission Status"] != "Decommissioned"):
                        all_found = False
                        break

            if all_found:
                r.write_files_to({'/etc/hadoop/dn.incl':
                                 utils.
                                 generate_fqdn_host_names(survived_inst),
                                  '/etc/hadoop/dn.excl': "",
                                  })
                break
            context.sleep(3)

        if not all_found:
            ex.DecommissionError(
                _("Cannot finish decommission of cluster %(cluster)s in "
                  "%(seconds)d seconds") %
                {"cluster": nn.node_group.cluster,
                 "seconds": timeout})
Example #11
0
    def test_generate_fqdn_host_names(self):
        node = mock.Mock()
        node.fqdn = mock.Mock(return_value="fqdn")

        res = pu.generate_fqdn_host_names([node, node])
        self.assertEqual("fqdn\nfqdn", res)
Example #12
0
 def _push_jobtracker_configs(self, cluster, r):
     r.write_file_to(
         '/etc/hadoop/tt.incl',
         utils.generate_fqdn_host_names(vu.get_tasktrackers(cluster)))
Example #13
0
    def test_generate_fqdn_host_names(self):
        node = mock.Mock()
        node.fqdn = mock.Mock(return_value="fqdn")

        res = pu.generate_fqdn_host_names([node, node])
        self.assertEqual("fqdn\nfqdn", res)
Example #14
0
 def _push_namenode_configs(self, cluster, r):
     r.write_file_to(
         '/etc/hadoop/dn.incl',
         utils.generate_fqdn_host_names(vu.get_datanodes(cluster)))
Example #15
0
 def _push_namenode_configs(self, cluster, r):
     r.write_file_to(
         '/etc/hadoop/dn.incl',
         utils.generate_fqdn_host_names(
             utils.get_instances(cluster, "datanode")))
     r.write_file_to('/etc/hadoop/dn.excl', '')
Example #16
0
 def _push_jobtracker_configs(self, cluster, r):
     r.write_file_to("/etc/hadoop/tt.incl", utils.generate_fqdn_host_names(vu.get_tasktrackers(cluster)))
Example #17
0
 def _push_namenode_configs(self, cluster, r):
     r.write_file_to("/etc/hadoop/dn.incl", utils.generate_fqdn_host_names(vu.get_datanodes(cluster)))
Example #18
0
 def _push_namenode_configs(self, cluster, r):
     r.write_file_to('/etc/hadoop/dn.incl',
                     utils.generate_fqdn_host_names(
                         utils.get_instances(cluster, "datanode")))
     r.write_file_to('/etc/hadoop/dn.excl', '')