Exemplo n.º 1
0
    def upgrade_master(cls, version, config=None):
        '''
        Upgrades Yarn Master services: RM and JHS
        :param version: Version to be upgraded to
        :param config: Config location
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Job history server upgrade started ")
        logger.info("**** Begin history server upgrade ****")
        MAPRED.stopHistoryserver()
        historyNode = MAPRED.getHistoryserver()
        # TODO use hdp-select script to upgrade JHS
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        hdpSelect.changeVersion("hadoop-mapreduce-historyserver", version,
                                historyNode)
        MAPRED.startHistoryserver(config=config)
        time.sleep(5)
        logger.info(
            "**** End history server upgrade, begin timeline server upgrade ****"
        )
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade]Job history server upgrade finished ")
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Timeline server upgrade started  ")

        ytsNode = YARN.getATSHost()
        YARN.stopATSServer(ytsNode)
        hdpSelect.changeVersion("hadoop-yarn-timelineserver", version, ytsNode)
        YARN.startATSServer(config=config)
        time.sleep(5)

        logger.info("**** End timeline server upgrade, begin rm upgrade ****")
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Timeline server upgrade finished  ")
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Resource manager upgrade started  ")
        if YARN.isHAEnabled():
            nodes = []
            nodes.append(YARN.getRMHostByState('standby'))
            nodes.append(YARN.getRMHostByState('active'))
            for node in nodes:
                YARN.resetResourceManager('stop', config=config, host=node)
                time.sleep(2)
                hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version,
                                        node)
                YARN.resetResourceManager('start', config=config, host=node)
                time.sleep(5)
        else:
            node = MAPRED.getJobtracker()
            MAPRED.stopJobtracker()
            time.sleep(2)
            # TODO use hdp-select script to upgrade JHs
            hdpSelect.changeVersion("hadoop-yarn-resourcemanager", version,
                                    node)
            MAPRED.startJobtracker(config=config)
            time.sleep(5)
        logger.info("*** Ending RM upgrade ****")
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Resource manager Upgrade Finished")
Exemplo n.º 2
0
    def updateJobProperties(cls,
                            propFile,
                            properties=None,
                            haEnabled=False,
                            debug=False):
        fileSystemName = Hadoop.getFSDefaultValue()
        jobTrackerIP = MAPRED.getJobtrackerAddress()
        jobTracker = jobTrackerIP[0] + ":" + jobTrackerIP[1]

        if not properties:
            properties = {}
        if not properties.has_key('nameNode'):
            properties['nameNode'] = fileSystemName
        if not properties.has_key('jobTracker'):
            properties['jobTracker'] = jobTracker

        if "hcatalog" in propFile:
            if Hadoop.isSecure():
                kerberosPrincipal = Hive.getConfigValue(
                    "hive.metastore.kerberos.principal")
                properties[
                    'hive.metastore.kerberos.principal'] = kerberosPrincipal

            logger.info("Updating for hcatalog workflow")
            hcatNode = Hive.getConfigValue("hive.metastore.uris").replace(
                'thrift', 'hcat')
            logger.info("Hcat node is " + hcatNode)
            properties['hcatNode'] = hcatNode

        if Hadoop.isSecure():
            # determine the namenode and the jobtracker principal
            nnPrincipal = None
            if haEnabled:
                nnPrincipal = HDFS.getNameNodePrincipal().replace(
                    '_HOST', HDFS.getNamenodeByState('active'))
            else:
                nnPrincipal = HDFS.getNameNodePrincipal().replace(
                    '_HOST',
                    HDFS.getNamenodeHttpAddress()[0])
            jtPrincipal = MAPRED.getMasterPrincipal().replace(
                '_HOST', jobTrackerIP[0])
            properties['dfs.namenode.kerberos.principal'] = nnPrincipal
            properties['mapreduce.jobtracker.kerberos.principal'] = jtPrincipal

        wfPath = util.getPropertyValueFromFile(propFile,
                                               "oozie.wf.application.path")
        if wfPath != None and wfPath.find("hdfs://localhost:9000") != -1:
            wfPath = wfPath.replace("hdfs://localhost:9000", fileSystemName)
            logger.info("Value of replaced oozie.wf.application.path is " +
                        wfPath)
            properties['oozie.wf.application.path'] = wfPath

        util.writePropertiesToFile(propFile, propFile, properties)

        if debug:
            logger.info('Content of properties file %s' % propFile)
            f = open(propFile, 'r')
            # print the file to the console
            logger.info(f.read())
            f.close()
Exemplo n.º 3
0
    def checkClasspathVersion(cls, Version_Num, config=None):
        Local_Test_dir = os.path.join(Config.getEnv("WORKSPACE"), "tests",
                                      "rolling_upgrade", "yarn")
        Multi_Version_App_Dir = os.path.join(Local_Test_dir, "data")
        Mapper = "data/versionVerifyMapper.py"
        Reducer = "data/versionVerifyReducer.py"
        Verify_File_Name = "test.txt"
        Verify_Test_File = os.path.join(Multi_Version_App_Dir,
                                        Verify_File_Name)
        # Set up env
        mapred_app_path = MAPRED.getConfigValue(
            "mapreduce.application.framework.path", None)
        mapred_classpath = MAPRED.getConfigValue(
            "mapreduce.application.classpath", None)
        env = {
            "mapreduce.application.framework.path": mapred_app_path,
            "mapreduce.application.classpath": mapred_classpath
        }
        verifyInput = cls._hdfs_input + "/verify"
        HDFS.createDirectory(verifyInput, None, "777", False)
        # Copy template files for the verifier streaming job
        templateFile = open(Verify_Test_File, 'w')
        templateFile.write(Version_Num)
        templateFile.close()
        HDFS.copyFromLocal(Verify_Test_File,
                           verifyInput,
                           user=Config.get('hadoop', 'HADOOPQA_USER'))
        # Submit the special streaming job
        shortStreamingId = HadoopJobHelper.runStreamJob(
            Mapper,
            Reducer,
            verifyInput,
            cls._hdfs_output_verify,
            files=Multi_Version_App_Dir,
            config=config,
            extraJobArg=cls._jobArgs,
            env=env,
            proposedJobName=cls._shortStreamingName)
        MAPRED.waitForJobDoneOrTimeout(shortStreamingId, timeoutInSec=180)
        # Make sure task succeeded
        #assert YARN.getAppFinalStateFromID(appId) == 'SUCCEEDED'

        # Check result content
        retVal, checkContent = HDFS.cat(cls._hdfs_output_verify +
                                        '/part-00000')
        logger.info("CHECK CLASSPATH VERSION OUTPUT")
        logger.info(retVal)
        logger.info(checkContent)
        ruAssert("YARN", retVal == 0)
        ruAssert("YARN", 'True' in checkContent,
                 "[VersionVerify] Stream job returns false: " + checkContent)
        #assert retVal == 0
        #assert 'True' in checkContent, "Stream job returns false: " + checkContent
        #assert 'False' not in checkContent, "Stream job returns false: " + checkContent
        HDFS.deleteDirectory(cls._hdfs_output_verify,
                             user=Config.get('hadoop', 'HADOOPQA_USER'))
Exemplo n.º 4
0
def resetYarn(skip_check=False):
    # updates for Hadoop 2
    if YARN.isHAEnabled():
        logger.info("Resetting YARN...")
        # only do this on nano as we these services are unreliable on nano
        tasktrackers = MAPRED.getTasktrackers()
        if skip_check or (Hadoop.isHadoop2() and Machine.isLinux()
                          and Machine.isNano()):
            YARN.restartHARMNodes()
            # add sleep to give RM enough time to register all the nodes
            # and be ready
            MAPRED.waitForNMToRegister(len(tasktrackers))

        logger.info("Resetting YARN Completed.")
Exemplo n.º 5
0
 def MAPRED_getJobSummaryLogFile(cls, jtHost=None, logoutput=True):
     if not jtHost:
         jtHost = cls.MAPRED_getJobtracker()
     try:
         file_paths = [
             Config.get('hadoop', 'YARN_LOG_DIR'),
             Config.get('hadoop', 'HADOOP_LOG_DIR')
         ]
         file_name = 'hadoop-mapreduce.jobsummary.log'
         if Machine.isWindows():
             file_name = 'rm-appsummary.log'
         for file_path in file_paths:
             lines = Machine.find(user=Machine.getAdminUser(),
                                  host=jtHost,
                                  filepath=file_path,
                                  searchstr=file_name,
                                  passwd=Machine.getAdminPasswd())
             lines = util.prune_output(lines, Machine.STRINGS_TO_IGNORE)
             if lines and lines[0]:
                 return lines[0]
             else:
                 continue
         # otherwise return the RM/jobtracker log
         from beaver.component.hadoop import MAPRED
         return MAPRED.getJobTrackerLogFile()
     except Exception:
         if logoutput:
             logger.error(
                 "Exception occured during MAPRED_getJobSummaryLogFile() call"
             )
             logger.error(traceback.format_exc())
         return None
Exemplo n.º 6
0
 def run_client_smoketest(cls, config=None, env=None):
     '''
     Run Smoke test after upgrading Client
     :param config: Configuration location
     :param env: Set Environment variables
     '''
     from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
     UpgradePerNode.reportProgress(
         "###  Starting  cli smoke job for tez  ####")
     logger.info(
         "***** Starting MRRSleep job in Tez with setting env ******")
     numNodes = len(MAPRED.getTasktrackers())
     # Post a sleep job
     (exit_code, stdout) = Tez.runTezExampleJar(
         'mrrsleep "-Dmapreduce.framework.name=yarn-tez" "-Dtez.queue.name=%s" -m %d -r %d -ir 3 -irs 5 -mt 10 -rt 10'
         % (cls._queue, numNodes, numNodes),
         env=env)
     # Make sure succeeded
     if exit_code == 0:
         UpgradePerNode.reportProgress(
             "### [PASSED][Tez][ClientSmoke] cli smoke job for tez passed ####"
         )
     else:
         UpgradePerNode.reportProgress(
             "### [FAILED][Tez][ClientSmoke] cli smoke job for tez failed ####"
         )
     time.sleep(10)
     logger.info("***** Completed MRRSleep job in Tez ******")
     UpgradePerNode.reportProgress(
         "### cli smoke job for tez finished ####")
     cls.validate_correct_tar_used(stdout)
Exemplo n.º 7
0
 def MAPRED_getJTLogDir(cls, logoutput=True):
     try:
         from beaver.component.hadoop import MAPRED
         return MAPRED.getJTLogDir()
     except Exception:
         if logoutput:
             logger.error(
                 "Exception occured during MAPRED_getJTLogDir() call")
             logger.error(traceback.format_exc())
         return None
Exemplo n.º 8
0
    def upgrade_slave(cls, version, node, config=None):
        '''
        Upgrades Yarn slave services : NM
        :param version: Version to be upgraded to
        :param node: Slave Node
        :param config: Config location
        :return:
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Nodemanager Upgrade for %s started " % node)
        logger.info("**** Beginning upgrade for nodemanager on %s ****" % node)
        MAPRED.stopTasktrackers(nodes=[node])
        from beaver.component.rollingupgrade.ruCommon import hdpSelect
        hdpSelect.changeVersion("hadoop-yarn-nodemanager", version, node)

        # BUG-27328
        # make sure we set the linux container executor permissions
        if Hadoop.isSecure() and not Machine.isWindows():
            container_exec_cfg = os.path.join(
                Config.get('hadoop', 'HADOOP_CONF'), 'container-executor.cfg')
            container_exec = os.path.join(
                Config.get('hadoop',
                           'YARN_HOME').replace("client", "nodemanager"),
                'bin', 'container-executor')
            cmd="chown root:hadoop %s %s ; chmod 400 %s ; chmod 6050 %s" % \
                (container_exec_cfg, container_exec, container_exec_cfg, container_exec)
            Machine.runas(Machine.getAdminUser(),
                          cmd,
                          host=node,
                          cwd=None,
                          env=None,
                          logoutput=True,
                          passwd=Machine.getAdminPasswd())

        MAPRED.startTasktrackers(config, nodes=[node])
        logger.info("**** Ending upgrade for nodemanager on %s ****" % node)
        UpgradePerNode.reportProgress(
            "[INFO][YARN][Upgrade] Nodemanager Upgrade on %s Finished " % node)
Exemplo n.º 9
0
 def run_client_smoketest(cls, config=None, env=None):
     '''
     Run sleep Job passing env variables
     :param config: Configuration location
     :param env: Set Environment variables
     '''
     from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
     UpgradePerNode.reportProgress(
         "[INFO][YARN][SmokeClient]  Starting CLI test for Yarn ")
     jobCmd = "jar " + MAPRED.sleepJobJar(
     ) + " sleep -Dmapred.job.queue.name=%s  -m 1 -r 1 -mt 10 -rt 10 " % (
         cls._queue)
     exit_code, stdout = Hadoop.run(jobCmd, env=env)
     ruAssert("YARN", exit_code == 0, "[SmokeClient] Yarn smoketest failed")
     UpgradePerNode.reportProgress(
         "[INFO][YARN][SmokeClient] CLI test for Yarn finished ")
Exemplo n.º 10
0
    def launchMultipleSleepJobs(cls,
                                numJobs,
                                mapSleepTime=1000,
                                reduceSleepTime=1000,
                                config=None):
        '''
        Function to Launch multiple sleep jobs
        :param numJobs: number of sleep jobs want to run
        :param mapSleepTime: Map sleep time
        :param reduceSleepTime: Reduce sleep time
        :param config: expected Configuration location
        :return: jobIDs
        '''
        jobIds = []
        # Create jobs
        i = 0
        for i in range(0, numJobs):
            jobclientFile = os.path.join(Config.getEnv('ARTIFACTS_DIR'),
                                         "JobClient_output.log")
            HadoopJobHelper.runSleepJob(numOfMaps=1,
                                        numOfReduce=1,
                                        mapSleepTime=mapSleepTime,
                                        reduceSleepTime=reduceSleepTime,
                                        extraJobArg=cls._jobArgs,
                                        runInBackground=False,
                                        config=config,
                                        directoutput=True,
                                        outputFile=jobclientFile)
            f = open(jobclientFile)
            text = f.read()
            f.close()
            currJobId = YARN.getAppAndJobIdsFromConsole(text)[1][0]
            jobIds.append(currJobId)
        # Join jobs

        for job in jobIds:
            ruAssert("YARN", MAPRED.isJobSucceed(job))
        return jobIds
Exemplo n.º 11
0
 def run_smoke_test(cls, smoketestnumber, config=None):
     '''
     Run smoke test for yarn
     :param smoketestnumber: Used for unique output log location
     '''
     from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
     UpgradePerNode.reportProgress(
         "[INFO][YARN][Smoke]  Starting smoke test for Yarn")
     numNodes = len(MAPRED.getTasktrackers())
     # run sleep job
     (exitCode,
      stdout) = HadoopJobHelper.runSleepJob(numOfMaps=numNodes,
                                            numOfReduce=numNodes,
                                            mapSleepTime=10,
                                            reduceSleepTime=10,
                                            extraJobArg=cls._jobArgs,
                                            runInBackground=False,
                                            config=config)
     # Make sure succeeded
     ruAssert("YARN", exitCode == 0, "[Smoke] YARN smoke failed")
     UpgradePerNode.reportProgress(
         "[INFO][YARN][Smoke] Smoke test for Yarn finished ")
     cls.run_rest_apis_test()
Exemplo n.º 12
0
 def run_smoke_test(cls, smoketestnumber, config=None):
     '''
     Run MRRSleep tez job
     :param smoketestnumber: Used for unique output log location
     '''
     logger.info("***** Starting MRRSleep job in Tez ******")
     from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
     UpgradePerNode.reportProgress("###  Starting smoke job for tez  ####")
     numNodes = len(MAPRED.getTasktrackers())
     # Post a sleep job
     (exit_code, stdout) = Tez.runTezExampleJar(
         'mrrsleep "-Dmapreduce.framework.name=yarn-tez" "-Dtez.queue.name=%s" -m %d -r %d -ir 3 -irs 5 -mt 10 -rt 10'
         % (cls._queue, numNodes, numNodes))
     # Make sure succeeded
     if exit_code == 0:
         UpgradePerNode.reportProgress(
             "### [PASSED][Tez][Smoke] smoke job for tez passed  ####")
     else:
         UpgradePerNode.reportProgress(
             "### [FAILED][Tez][Smoke] smoke job for tez failed  ####")
     time.sleep(10)
     logger.info("***** Completed MRRSleep job in Tez ******")
     UpgradePerNode.reportProgress("###  smoke test finished for tez  ####")
     cls.validate_correct_tar_used(stdout)
Exemplo n.º 13
0
    def getEnvironmentVariables(cls, directory):

        # Need to set the below for the oozie-regression test
        OOZIE_HDFS_LOG_DIR = str(
            Hadoop.getFSDefaultValue(True) + '/user/' +
            Config.get("oozie", "OOZIE_USER") + '/oozie-logs/')
        if Machine.type() == "Windows":
            sep = ";"
            OOZIE_HDFS_LOG_DIR = OOZIE_HDFS_LOG_DIR + os.environ.get(
                "COMPUTERNAME")
        else:
            sep = ":"
            OOZIE_HDFS_LOG_DIR = OOZIE_HDFS_LOG_DIR + Oozie.getOozieServers(
            )[0]

        OOZIE_QA_REG_DIR = cls.getOozieRegressionFolder()
        path = os.path.join(
            OOZIE_QA_REG_DIR, "lib",
            "yoozietest-qa-1.0.0-SNAPSHOT.jar") + sep + os.path.join(
                OOZIE_QA_REG_DIR, "lib",
                "original-yoozietest-qa-1.0.0-SNAPSHOT.jar")
        Config.setEnv("LOCAL_CP", path)

        oozie_server = cls.getOozieUrl()

        if HDFS.isASV() or HDFS.isCabo():
            if HDFS.isCabo():
                # Cabo gets FQDN uri to use the scheme as a differentiator for other FS
                HIT_NN = str(Hadoop.getConfigValue("fs.defaultFS", None))
            else:  # is ASV
                HIT_NN = str(Hadoop.getFSDefaultValue(False))

            return {
                "JAVA_HOME": Config.get('machine', 'JAVA_HOME'),
                "HADOOP_HOME": Config.get('hadoop', 'HADOOP_HOME'),
                "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                "HIT_NN": HIT_NN,
                "HIT_JT": str(MAPRED.getJobtrackerAddress()[0]),
                "HIT_JT_PORT": str(MAPRED.getJobtrackerAddress()[1]),

                ###TODO Change following 3 for secure setups
                "OOZIE_QA_AUTH": "simple",
                "OOZIE_NN_KRB_PRINCIPAL": "Blah",
                "OOZIE_JT_KRB_PRINCIPAL": "Blah",
                "OOZIE_TEST_SUITE": "testngRegressionSuiteDebug",
                "OOZIE_HOME": Config.get("oozie", "OOZIE_HOME"),
                "OOZIE_PORT": Config.get("oozie", "OOZIE_PORT"),
                "OOZIE_SECURE_HOSTNAME": Machine.getfqdn(),
                "OOZIE_FOLDER": Config.get("oozie", "OOZIE_USER"),
                "OOZIE_USER": Config.get("oozie", "OOZIE_USER"),
                "FIREFOX_PATH": Config.get("firefox", "FIREFOX_PATH"),
                "FIREFOX_DISPLAY": Config.get("firefox", "FIREFOX_DISPLAY"),
                "OOZIE_QA_REG_DIR": OOZIE_QA_REG_DIR,
                "OOZIE_QA_HADOOP_QUEUE": "default",
                "OOZIE_URL": str(oozie_server),
                "HIT_OOZIE": ((str(oozie_server)).split(":")[2]).split("/")[0],
                "LOCAL_CP": path,
                "HIT_HDFS_STORAGE_DIR": directory,
                "OOZIE_HDFS_LOG_DIR": OOZIE_HDFS_LOG_DIR
            }

        else:
            if Hadoop.isSecure():
                oozie_qa_auth = "kerberos"
                nnPrincipal = HDFS.getNameNodePrincipal()
                jtPrincipal = MAPRED.getMasterPrincipal()
                user = Config.getEnv('USER')
                kerbTicket = Machine.getKerberosTicket(user)
            else:
                oozie_qa_auth = "simple"
                nnPrincipal = "blah"
                jtPrincipal = "blah"
                kerbTicket = "blah"

            return {
                "JAVA_HOME": Config.get('machine', 'JAVA_HOME'),
                "HADOOP_HOME": Config.get('hadoop', 'HADOOP_HOME'),
                "HADOOP_CONF_DIR": Config.get('hadoop', 'HADOOP_CONF'),
                "HIT_NN": str(Hadoop.getFSDefaultValue(False)),
                "HIT_NN_PORT":
                str(Hadoop.getFSDefaultValue(True)).split(":")[2],
                "HIT_JT": str(MAPRED.getJobtrackerAddress()[0]),
                "HIT_JT_PORT": str(MAPRED.getJobtrackerAddress()[1]),
                "OOZIE_QA_AUTH": oozie_qa_auth,
                "OOZIE_NN_KRB_PRINCIPAL": nnPrincipal,
                "OOZIE_JT_KRB_PRINCIPAL": jtPrincipal,
                "OOZIE_TEST_SUITE": "testngRegressionSuiteDebug",
                "OOZIE_HOME": Config.get("oozie", "OOZIE_HOME"),
                "OOZIE_PORT": Config.get("oozie", "OOZIE_PORT"),
                "OOZIE_FOLDER": Config.get("oozie", "OOZIE_USER"),
                "OOZIE_USER": Config.get("oozie", "OOZIE_USER"),
                "FIREFOX_PATH": Config.get("firefox", "FIREFOX_PATH"),
                "FIREFOX_DISPLAY": Config.get("firefox", "FIREFOX_DISPLAY"),
                "OOZIE_QA_REG_DIR": OOZIE_QA_REG_DIR,
                "OOZIE_QA_HADOOP_QUEUE": "default",
                "OOZIE_URL": str(oozie_server),
                "HIT_OOZIE": ((str(oozie_server)).split(":")[2]).split("/")[0],
                "LOCAL_CP": path,
                "HIT_HDFS_STORAGE_DIR": directory,
                "KRB5CCNAME": kerbTicket,
                "OOZIE_HDFS_LOG_DIR": OOZIE_HDFS_LOG_DIR
            }
Exemplo n.º 14
0
    def run_rest_apis_test(self):
        '''
        Run checks to make sure the REST interfaces for the RM, NM, JHS and TimelineServer are up
        :return:
        '''
        from beaver.component.rollingupgrade.ruUpgrade import UpgradePerNode
        UpgradePerNode.reportProgress(
            "[INFO][YARN][RestTest] Testing REST interfaces for RM, NM, JHS and TimelineServer "
        )
        logger.info(
            "**** Testing REST interfaces for RM, NM, JHS and TimelineServer ****"
        )
        hostUrlMap = {}
        hostUrlExpectedStatusCode = {}
        rmAddress = YARN.getResourceManagerWebappAddress()
        rmPort = None
        nmPort = None
        jhsAddress = None
        atsAddress = YARN.get_ats_web_app_address()
        scheme = "http"

        if YARN.isHttpsEnabled():
            scheme = "https"
            if rmAddress.startswith("https://"):
                rmAddress = rmAddress[len("https://"):]
            nmPort = YARN.getNodeManagerWebappHttpsPort()
            jhsAddress = MAPRED.getHistoryServerWebappHttpsAddress()
            if jhsAddress.startswith("https://"):
                jhsAddress = jhsAddress[len("https://"):]
            if atsAddress.startswith("https://"):
                atsAddress = atsAddress[len("https://"):]
        else:
            if rmAddress.startswith("http://"):
                rmAddress = rmAddress[len("http://"):]
            nmPort = YARN.getNodeManagerWebappPort()
            jhsAddress = MAPRED.getHistoryServerWebappAddress()
            if jhsAddress.startswith("http://"):
                jhsAddress = jhsAddress[len("http://"):]
            if atsAddress.startswith("http://"):
                atsAddress = atsAddress[len("http://"):]

        rmPort = rmAddress.split(":")[1]
        hostUrlMap[rmAddress] = ["/ws/v1/cluster/info"]
        hostUrlExpectedStatusCode[rmAddress] = 200

        for nm in MAPRED.getTasktrackers():
            host = "%s:%s" % (nm, nmPort)
            hostUrlMap[host] = ["/ws/v1/node/info"]
            hostUrlExpectedStatusCode[host] = 200
        hostUrlMap[jhsAddress] = ["/ws/v1/history/info"]
        hostUrlExpectedStatusCode[jhsAddress] = 200
        hostUrlMap[atsAddress] = ["/ws/v1/timeline"]
        hostUrlExpectedStatusCode[atsAddress] = 200

        for host in hostUrlMap.keys():
            urls = hostUrlMap[host]
            for url in urls:
                fetch_url = scheme + "://" + host + url
                (return_code, data, headers) = util.query_yarn_web_service(
                    fetch_url,
                    Config.get('hadoop', 'HADOOPQA_USER'),
                    also_check_modified_config_for_spnego=False)
                if int(return_code) == hostUrlExpectedStatusCode[host]:
                    UpgradePerNode.reportProgress(
                        "[PASSED][YARN][RestTest] Got %s status code from url %s. Passed "
                        % (return_code, fetch_url))
                else:
                    UpgradePerNode.reportProgress(
                        "[FAILED][YARN][RestTest]Got %s status code from url %s. Failed "
                        % (return_code, fetch_url))
def test_CleanUpOfFilesAfterJobCompletionForFilesWithSymLink():
    testCaseDescription="test_CleanUpOfFilesAfterJobCompletionForFilesWithSymLink"
    testCaseId = "cleanup04"
    util.displayTestCaseMessage(testCaseDescription,testCaseId)
    fileCreated = "mysymlink.txt"    
    
    out = insertFileIntoHdfs(CREATE_FILE_2)    
    assert out[0] == 0
    time.sleep(15)
    
    logger.info("Try to get Job Tracker")
    JOBTRACKER = MAPRED.getJobTracker()
    assert JOBTRACKER != None
    
    localFilePath = getFullPathOfFile(CREATE_FILE_2_PATH_IN_LOCAL)
    
    hadoopStreamingCmdFormat = 'jar %s -files %s -input %s -output %s -mapper "python %s" -reducer NONE'
    jobJarHadoopStreamingCmd = hadoopStreamingCmdFormat % (HADOOP_STREAMING_JAR, localFilePath, CREATE_FILE_2_PATH_IN_HADOOP, OUT_PATH_IN_HADOOP, CREATE_FILE_2)
    logger.info(jobJarHadoopStreamingCmd)    
    out = Hadoop.runInBackground(jobJarHadoopStreamingCmd)    
    time.sleep(15)
    
    logger.info("Try to get job id.....")    
    for i in range(1, 5):
        jobId = MAPRED.getJobID()        
        if (validateJobId(jobId)):            
            break
        time.sleep(10)
    assert jobId.startswith('job_') == True
    logger.info(" Get JobId: " + jobId + " successfully")
    
    logger.info("Try to get Attempt ID....")   
    attemptId = MAPRED.getAttemptIdsForJobId(jobId)                
    assert attemptId.startswith("attempt_") == True
    
    logger.info("Try to get Task Tracker...")    
    taskTrackersList = Hadoop.getTasktrackers()
    taskTracker = taskTrackersList[0].rstrip("\n")
    logger.info(" Task Tracker running the map task is " + taskTracker)
    time.sleep(40)
    
    logFileDirList = getLocalDirInfo(taskTracker);
    logger.info("Log file list: " + logFileDirList)
    logFileDirList = logFileDirList.split(',')
    isExistedTempFile = False
    for logFileDir in logFileDirList:        
        logger.info("Directory of log file: " + logFileDir)
        isExistedTempFile = checkJobCreatedTempFileInTT(logFileDir, HADOOPQA_USER, jobId, attemptId, fileCreated, taskTracker)
        if isExistedTempFile == True:            
            break
    assert isExistedTempFile == True
    
    logger.info("Check job is completed or not")
    for i in range(1, 10):
        isJobCompleted = MAPRED.checkForJobCompletion(jobId)
        if isJobCompleted == True:
            break
        time.sleep(20)
    assert isJobCompleted == True
    logger.info("Job is completed!")
    
    #  Now check for the file to be cleared off  after the job is complete
    isExistedTempFile = checkJobCreatedTempFileInTT(logFileDir,HADOOPQA_USER,jobId,attemptId,fileCreated,taskTracker)
    if isExistedTempFile == True:
        logger.info(" The test case  to check the files cleared after killing of jobs failed ")
        logger.info(" The file created by the job still exists even after the job is successfully killed ")
    assert isExistedTempFile==False
    
    
def test_CleanUpOfFilesAfterFailedJob():
    testCaseDescription="testCleanUpOfFilesAfterFailedJob"
    testCaseId = "cleanup03"
    util.displayTestCaseMessage(testCaseDescription,testCaseId)
    fileCreated="FileCreatedByJob.log"
    out=insertFileIntoHdfs(CREATE_FILE)
    assert out[0] == 0
        
    logger.info("Try to get Job Tracker")
    JOBTRACKER = MAPRED.getJobTracker()
    assert JOBTRACKER != None
    
    logger.info( "Submitting a streaming job that will create a file ")
    localFilePath = getFullPathOfFile(CREATE_FILE_PATH_IN_LOCAL)
    
    hadoopStreamingCmdFormat = 'jar %s -files %s -input %s -output %s -mapper "python %s" -reducer NONE'
    jobJarHadoopStreamingCmd = hadoopStreamingCmdFormat % (HADOOP_STREAMING_JAR, localFilePath, CREATE_FILE_PATH_IN_HADOOP, OUT_PATH_IN_HADOOP, CREATE_FILE)
    logger.info(jobJarHadoopStreamingCmd)    
    out = Hadoop.runInBackground(jobJarHadoopStreamingCmd)    
    time.sleep(20)
    
    logger.info("Try to get job id.....")    
    for i in range(1, 5):
        jobId = MAPRED.getJobID()        
        if (validateJobId(jobId)):            
            break
        time.sleep(10)
    assert jobId.startswith('job_') == True
    logger.info(" Get JobId: " + jobId + " successfully")
    
    logger.info("Try to get Attempt ID....")   
    attemptId = MAPRED.getAttemptIdsForJobId(jobId)                
    assert attemptId.startswith("attempt_") == True        
    
    logger.info("Try to get Task Tracker...")    
    taskTrackersList = Hadoop.getTasktrackers()
    taskTracker = taskTrackersList[0].rstrip("\n")
    logger.info(" Task Tracker running the map task is " + taskTracker)
    time.sleep(20)
    
    logFileDirList = getLocalDirInfo(taskTracker);
    logger.info("Log file list: " + logFileDirList)
    logFileDirList = logFileDirList.split(',')
    isExistedTempFile = False
    for logFileDir in logFileDirList:        
        logger.info("Directory of log file: " + logFileDir)
        isExistedTempFile = checkJobCreatedTempFileInTT(logFileDir, HADOOPQA_USER, jobId, attemptId, fileCreated, taskTracker)
        if isExistedTempFile == True:            
            break
    assert isExistedTempFile == True
    # Now fail the job
    getAttemptIdsForJobIdAndStoreInFile(jobId)
    attemptIdCount=MAPRED.checkForNewAttemptIds(jobId)
    assert len(attemptIdCount) != 0        
    while len(attemptIdCount) != 0:
        logger.info(" Since there are  attempts ids  proceeding to kill them ")
        MAPRED.failAttempts(attemptIdCount)
        attemptIdCount=MAPRED.checkForNewAttemptIds(jobId)
        
    logger.info("Check job status")    
    isJobFailed=MAPRED.isJobFailed(jobId)
    if isJobFailed==False:
        logger.info( " The job could not be failed successfully and unable to proceed with the tests ")
    assert isJobFailed==True
    
    isExistedTempFile = checkJobCreatedTempFileInTT(logFileDir,HADOOPQA_USER,jobId,attemptId,fileCreated,taskTracker)
    if isExistedTempFile:
        logger.info(" The test case  to check the files cleared after killing of jobs failed ")
        logger.info(" The file created by the job still exists even after the job is successfully killed ")
    assert isExistedTempFile==False