Exemple #1
0
    def ru_downgrade_state(cls):
        '''
        Downgrades Namenode
        A downgrade is done - may need to convert state to previous version or state is compatible - again upgrade is being abandoned
        NOTE: this command will not return until namenode shuts down
        '''
        command = "sudo su - -c 'hadoop namenode -rollingUpgrade downgrade' hdfs"
        if HDFS.isHAEnabled():
            nodes = []
            nodes.append(HDFS.getNamenodeByState('standby'))
            nodes.append(HDFS.getNamenodeByState('active'))
            for node in nodes:
                HDFS.resetNamenode('stop', host=node)
                (exitcode, stdout) = Machine.runas(Machine.getAdminUser(),
                                                   command, node, None, None,
                                                   "True",
                                                   Machine.getAdminPasswd())
                ruAssert(
                    "HDFS", exitcode == 0,
                    "[NNDowngrade] hadoop namenode -rollingUpgrade downgrade command failed"
                )
            return

        HDFS.stopNamenode()
        node = HDFS.getNamenode()
        (exitcode, stdout) = Machine.runas(Machine.getAdminUser(), command,
                                           node, None, None, "True",
                                           Machine.getAdminPasswd())
        ruAssert(
            "HDFS", exitcode == 0,
            "[NNDowngrade] hadoop namenode -rollingUpgrade downgrade command failed"
        )
Exemple #2
0
 def modifyConfigs(cls, changes):
     #  get the new config location
     name_node = HDFS.getNamenode()
     node_list = [name_node]
     HCAT_CONF_DIR = Config.get('hcatalog', 'HCATALOG_CONF',
                                '/etc/hive-webhcat/conf')
     updatedConf = os.path.join(
         os.path.join(Machine.getTempDir(), 'hcatalogConf_') +
         str(int(round(time.time() * 1000))))
     configUtils.modifyConfig(changes, HCAT_CONF_DIR, updatedConf,
                              node_list)
     return updatedConf
import logging
import re

from beaver.machine import Machine
from beaver.component.hadoop import HDFS

logger = logging.getLogger(__name__)

nn = HDFS.getNamenode()
logger.info("NN = %s", nn)

#list of DNs
dns = HDFS.getDatanodes()
logger.info("DNs = %s", dns)

nodes = [nn]
for dn in dns:
    if dn not in nodes:
        nodes.append(dn)

paths = ["/grid/0", "/grid/1", "/grid/2", "/grid/3", "/grid/4", "/grid/5", ""]
logger.info("nodes=%s", nodes)
logger.info("paths=%s", paths)
suspiciousNodes = []
for node in nodes:
    logger.info("node=%s", node)
    for path in paths:
        fname = path + "/tmp/touchtest"
        #logger.info("testing %s at %s" % (fname, node))
        (exit_code, stdout) = Machine.touchRemote(node, fname)
        if re.search("Read-only file system", stdout) != None:
Exemple #4
0
    def downgrade_master(cls, version, config=None):
        '''
        Downgrade HDFS Master services
        :param version: Version to be downgraded to
        :param config: Configuration location
        '''
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        if HDFS.isHAEnabled():
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Namenode Downgrade Started ")
            nodes = []
            nodes.append(HDFS.getNamenodeByState('standby'))
            nodes.append(HDFS.getNamenodeByState('active'))
            for node in nodes:
                HDFS.resetNamenode('stop', host=node)
                HDFS.resetZkfc('stop', hosts=node.split())
                # BUG-25534: There is no package for zkfc. So just updating the NN is enough.
                hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node)
                HDFS.resetNamenode('start', config=config, host=node)
                HDFS.resetZkfc('start', hosts=node.split())
                # lets make sure the NN is out of safemode before we proceed to the next namenode
                HDFS.waitForNNOutOfSafemode(options='-fs hdfs://%s:8020' %
                                            node)

            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Namenode Downgrade Finished ")
            jn_nodes = HDFS.getJournalNodes()
            if len(jn_nodes) < 3:
                UpgradePerNode.reportProgress(
                    "[INFO][HDFS][Downgrade] Less than three Journal Nodes. Not going to do Downgrade "
                )
                return

            #Loop through all the JNs and stop flip start one at a time
            hdfs_user = Config.get('hadoop', 'HDFS_USER')
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Journal Node Upgrade Started")
            cls.ensure_all_jns_are_up(jn_nodes)
            #Loop through all the JNs and stop flip start one at a time
            for node in jn_nodes:
                Hadoop.resetService(hdfs_user,
                                    node,
                                    "journalnode",
                                    'stop',
                                    binFolder="sbin")
                hdpSelect.changeVersion("hadoop-hdfs-journalnode", version,
                                        node)
                Hadoop.resetService(hdfs_user,
                                    node,
                                    "journalnode",
                                    'start',
                                    binFolder="sbin")
                time.sleep(5)
                cls.ensure_all_jns_are_up(jn_nodes)

            cls.ensure_nn_is_active()
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] HA Journal Node Downgrade Finished ")
        else:
            ## TODO add code to upgrade SNN
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Namenode Downgrade Started ")
            node = HDFS.getNamenode()
            HDFS.stopNamenode()
            hdpSelect.changeVersion("hadoop-hdfs-namenode", version, node)
            HDFS.startNamenode(config=config)
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Namenode Downgrade Finished ")
            # upgrade SNN
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade]Secondary Namenode Downgrade Started ")
            node = HDFS.getSecondaryNamenode()
            HDFS.stopSecondaryNamenode()
            hdpSelect.changeVersion("hadoop-hdfs-secondarynamenode", version,
                                    node)
            HDFS.startSecondaryNamenode(config=config)
            UpgradePerNode.reportProgress(
                "[INFO][HDFS][Downgrade] Secondary Namenode Downgrade Finished"
            )